1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/PrettyStackTrace.h" 25 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 26 using namespace clang; 27 using namespace CodeGen; 28 using namespace llvm::omp; 29 30 namespace { 31 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 32 /// for captured expressions. 33 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 34 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 35 for (const auto *C : S.clauses()) { 36 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 37 if (const auto *PreInit = 38 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 39 for (const auto *I : PreInit->decls()) { 40 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 41 CGF.EmitVarDecl(cast<VarDecl>(*I)); 42 } else { 43 CodeGenFunction::AutoVarEmission Emission = 44 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 45 CGF.EmitAutoVarCleanups(Emission); 46 } 47 } 48 } 49 } 50 } 51 } 52 CodeGenFunction::OMPPrivateScope InlinedShareds; 53 54 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 55 return CGF.LambdaCaptureFields.lookup(VD) || 56 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 57 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl)); 58 } 59 60 public: 61 OMPLexicalScope( 62 CodeGenFunction &CGF, const OMPExecutableDirective &S, 63 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 64 const bool EmitPreInitStmt = true) 65 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 66 InlinedShareds(CGF) { 67 if (EmitPreInitStmt) 68 emitPreInitStmt(CGF, S); 69 if (!CapturedRegion.hasValue()) 70 return; 71 assert(S.hasAssociatedStmt() && 72 "Expected associated statement for inlined directive."); 73 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 74 for (const auto &C : CS->captures()) { 75 if (C.capturesVariable() || C.capturesVariableByCopy()) { 76 auto *VD = C.getCapturedVar(); 77 assert(VD == VD->getCanonicalDecl() && 78 "Canonical decl must be captured."); 79 DeclRefExpr DRE( 80 CGF.getContext(), const_cast<VarDecl *>(VD), 81 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 82 InlinedShareds.isGlobalVarCaptured(VD)), 83 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 84 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 85 return CGF.EmitLValue(&DRE).getAddress(CGF); 86 }); 87 } 88 } 89 (void)InlinedShareds.Privatize(); 90 } 91 }; 92 93 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 94 /// for captured expressions. 95 class OMPParallelScope final : public OMPLexicalScope { 96 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 97 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 98 return !(isOpenMPTargetExecutionDirective(Kind) || 99 isOpenMPLoopBoundSharingDirective(Kind)) && 100 isOpenMPParallelDirective(Kind); 101 } 102 103 public: 104 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 105 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 106 EmitPreInitStmt(S)) {} 107 }; 108 109 /// Lexical scope for OpenMP teams construct, that handles correct codegen 110 /// for captured expressions. 111 class OMPTeamsScope final : public OMPLexicalScope { 112 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 113 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 114 return !isOpenMPTargetExecutionDirective(Kind) && 115 isOpenMPTeamsDirective(Kind); 116 } 117 118 public: 119 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 120 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 121 EmitPreInitStmt(S)) {} 122 }; 123 124 /// Private scope for OpenMP loop-based directives, that supports capturing 125 /// of used expression from loop statement. 126 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 127 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 128 CodeGenFunction::OMPMapVars PreCondVars; 129 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 130 for (const auto *E : S.counters()) { 131 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 132 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 133 (void)PreCondVars.setVarAddr( 134 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 135 } 136 // Mark private vars as undefs. 137 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 138 for (const Expr *IRef : C->varlists()) { 139 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 140 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 141 (void)PreCondVars.setVarAddr( 142 CGF, OrigVD, 143 Address(llvm::UndefValue::get( 144 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 145 OrigVD->getType().getNonReferenceType()))), 146 CGF.getContext().getDeclAlign(OrigVD))); 147 } 148 } 149 } 150 (void)PreCondVars.apply(CGF); 151 // Emit init, __range and __end variables for C++ range loops. 152 const Stmt *Body = 153 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 154 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 155 Body = OMPLoopDirective::tryToFindNextInnerLoop( 156 Body, /*TryImperfectlyNestedLoops=*/true); 157 if (auto *For = dyn_cast<ForStmt>(Body)) { 158 Body = For->getBody(); 159 } else { 160 assert(isa<CXXForRangeStmt>(Body) && 161 "Expected canonical for loop or range-based for loop."); 162 auto *CXXFor = cast<CXXForRangeStmt>(Body); 163 if (const Stmt *Init = CXXFor->getInit()) 164 CGF.EmitStmt(Init); 165 CGF.EmitStmt(CXXFor->getRangeStmt()); 166 CGF.EmitStmt(CXXFor->getEndStmt()); 167 Body = CXXFor->getBody(); 168 } 169 } 170 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 171 for (const auto *I : PreInits->decls()) 172 CGF.EmitVarDecl(cast<VarDecl>(*I)); 173 } 174 PreCondVars.restore(CGF); 175 } 176 177 public: 178 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 179 : CodeGenFunction::RunCleanupsScope(CGF) { 180 emitPreInitStmt(CGF, S); 181 } 182 }; 183 184 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 185 CodeGenFunction::OMPPrivateScope InlinedShareds; 186 187 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 188 return CGF.LambdaCaptureFields.lookup(VD) || 189 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 190 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 191 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 192 } 193 194 public: 195 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 196 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 197 InlinedShareds(CGF) { 198 for (const auto *C : S.clauses()) { 199 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 200 if (const auto *PreInit = 201 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 202 for (const auto *I : PreInit->decls()) { 203 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 204 CGF.EmitVarDecl(cast<VarDecl>(*I)); 205 } else { 206 CodeGenFunction::AutoVarEmission Emission = 207 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 208 CGF.EmitAutoVarCleanups(Emission); 209 } 210 } 211 } 212 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 213 for (const Expr *E : UDP->varlists()) { 214 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 215 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 216 CGF.EmitVarDecl(*OED); 217 } 218 } 219 } 220 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 221 CGF.EmitOMPPrivateClause(S, InlinedShareds); 222 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 223 if (const Expr *E = TG->getReductionRef()) 224 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 225 } 226 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 227 while (CS) { 228 for (auto &C : CS->captures()) { 229 if (C.capturesVariable() || C.capturesVariableByCopy()) { 230 auto *VD = C.getCapturedVar(); 231 assert(VD == VD->getCanonicalDecl() && 232 "Canonical decl must be captured."); 233 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 234 isCapturedVar(CGF, VD) || 235 (CGF.CapturedStmtInfo && 236 InlinedShareds.isGlobalVarCaptured(VD)), 237 VD->getType().getNonReferenceType(), VK_LValue, 238 C.getLocation()); 239 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 240 return CGF.EmitLValue(&DRE).getAddress(CGF); 241 }); 242 } 243 } 244 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 245 } 246 (void)InlinedShareds.Privatize(); 247 } 248 }; 249 250 } // namespace 251 252 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 253 const OMPExecutableDirective &S, 254 const RegionCodeGenTy &CodeGen); 255 256 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 257 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 258 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 259 OrigVD = OrigVD->getCanonicalDecl(); 260 bool IsCaptured = 261 LambdaCaptureFields.lookup(OrigVD) || 262 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 263 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 264 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 265 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 266 return EmitLValue(&DRE); 267 } 268 } 269 return EmitLValue(E); 270 } 271 272 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 273 ASTContext &C = getContext(); 274 llvm::Value *Size = nullptr; 275 auto SizeInChars = C.getTypeSizeInChars(Ty); 276 if (SizeInChars.isZero()) { 277 // getTypeSizeInChars() returns 0 for a VLA. 278 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 279 VlaSizePair VlaSize = getVLASize(VAT); 280 Ty = VlaSize.Type; 281 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 282 : VlaSize.NumElts; 283 } 284 SizeInChars = C.getTypeSizeInChars(Ty); 285 if (SizeInChars.isZero()) 286 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 287 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 288 } 289 return CGM.getSize(SizeInChars); 290 } 291 292 void CodeGenFunction::GenerateOpenMPCapturedVars( 293 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 294 const RecordDecl *RD = S.getCapturedRecordDecl(); 295 auto CurField = RD->field_begin(); 296 auto CurCap = S.captures().begin(); 297 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 298 E = S.capture_init_end(); 299 I != E; ++I, ++CurField, ++CurCap) { 300 if (CurField->hasCapturedVLAType()) { 301 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 302 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 303 CapturedVars.push_back(Val); 304 } else if (CurCap->capturesThis()) { 305 CapturedVars.push_back(CXXThisValue); 306 } else if (CurCap->capturesVariableByCopy()) { 307 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 308 309 // If the field is not a pointer, we need to save the actual value 310 // and load it as a void pointer. 311 if (!CurField->getType()->isAnyPointerType()) { 312 ASTContext &Ctx = getContext(); 313 Address DstAddr = CreateMemTemp( 314 Ctx.getUIntPtrType(), 315 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 316 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 317 318 llvm::Value *SrcAddrVal = EmitScalarConversion( 319 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 320 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 321 LValue SrcLV = 322 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 323 324 // Store the value using the source type pointer. 325 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 326 327 // Load the value using the destination type pointer. 328 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 329 } 330 CapturedVars.push_back(CV); 331 } else { 332 assert(CurCap->capturesVariable() && "Expected capture by reference."); 333 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 334 } 335 } 336 } 337 338 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 339 QualType DstType, StringRef Name, 340 LValue AddrLV) { 341 ASTContext &Ctx = CGF.getContext(); 342 343 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 344 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 345 Ctx.getPointerType(DstType), Loc); 346 Address TmpAddr = 347 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 348 .getAddress(CGF); 349 return TmpAddr; 350 } 351 352 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 353 if (T->isLValueReferenceType()) 354 return C.getLValueReferenceType( 355 getCanonicalParamType(C, T.getNonReferenceType()), 356 /*SpelledAsLValue=*/false); 357 if (T->isPointerType()) 358 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 359 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 360 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 361 return getCanonicalParamType(C, VLA->getElementType()); 362 if (!A->isVariablyModifiedType()) 363 return C.getCanonicalType(T); 364 } 365 return C.getCanonicalParamType(T); 366 } 367 368 namespace { 369 /// Contains required data for proper outlined function codegen. 370 struct FunctionOptions { 371 /// Captured statement for which the function is generated. 372 const CapturedStmt *S = nullptr; 373 /// true if cast to/from UIntPtr is required for variables captured by 374 /// value. 375 const bool UIntPtrCastRequired = true; 376 /// true if only casted arguments must be registered as local args or VLA 377 /// sizes. 378 const bool RegisterCastedArgsOnly = false; 379 /// Name of the generated function. 380 const StringRef FunctionName; 381 /// Location of the non-debug version of the outlined function. 382 SourceLocation Loc; 383 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 384 bool RegisterCastedArgsOnly, StringRef FunctionName, 385 SourceLocation Loc) 386 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 387 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 388 FunctionName(FunctionName), Loc(Loc) {} 389 }; 390 } // namespace 391 392 static llvm::Function *emitOutlinedFunctionPrologue( 393 CodeGenFunction &CGF, FunctionArgList &Args, 394 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 395 &LocalAddrs, 396 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 397 &VLASizes, 398 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 399 const CapturedDecl *CD = FO.S->getCapturedDecl(); 400 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 401 assert(CD->hasBody() && "missing CapturedDecl body"); 402 403 CXXThisValue = nullptr; 404 // Build the argument list. 405 CodeGenModule &CGM = CGF.CGM; 406 ASTContext &Ctx = CGM.getContext(); 407 FunctionArgList TargetArgs; 408 Args.append(CD->param_begin(), 409 std::next(CD->param_begin(), CD->getContextParamPosition())); 410 TargetArgs.append( 411 CD->param_begin(), 412 std::next(CD->param_begin(), CD->getContextParamPosition())); 413 auto I = FO.S->captures().begin(); 414 FunctionDecl *DebugFunctionDecl = nullptr; 415 if (!FO.UIntPtrCastRequired) { 416 FunctionProtoType::ExtProtoInfo EPI; 417 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 418 DebugFunctionDecl = FunctionDecl::Create( 419 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 420 SourceLocation(), DeclarationName(), FunctionTy, 421 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 422 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 423 } 424 for (const FieldDecl *FD : RD->fields()) { 425 QualType ArgType = FD->getType(); 426 IdentifierInfo *II = nullptr; 427 VarDecl *CapVar = nullptr; 428 429 // If this is a capture by copy and the type is not a pointer, the outlined 430 // function argument type should be uintptr and the value properly casted to 431 // uintptr. This is necessary given that the runtime library is only able to 432 // deal with pointers. We can pass in the same way the VLA type sizes to the 433 // outlined function. 434 if (FO.UIntPtrCastRequired && 435 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 436 I->capturesVariableArrayType())) 437 ArgType = Ctx.getUIntPtrType(); 438 439 if (I->capturesVariable() || I->capturesVariableByCopy()) { 440 CapVar = I->getCapturedVar(); 441 II = CapVar->getIdentifier(); 442 } else if (I->capturesThis()) { 443 II = &Ctx.Idents.get("this"); 444 } else { 445 assert(I->capturesVariableArrayType()); 446 II = &Ctx.Idents.get("vla"); 447 } 448 if (ArgType->isVariablyModifiedType()) 449 ArgType = getCanonicalParamType(Ctx, ArgType); 450 VarDecl *Arg; 451 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 452 Arg = ParmVarDecl::Create( 453 Ctx, DebugFunctionDecl, 454 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 455 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 456 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 457 } else { 458 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 459 II, ArgType, ImplicitParamDecl::Other); 460 } 461 Args.emplace_back(Arg); 462 // Do not cast arguments if we emit function with non-original types. 463 TargetArgs.emplace_back( 464 FO.UIntPtrCastRequired 465 ? Arg 466 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 467 ++I; 468 } 469 Args.append( 470 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 471 CD->param_end()); 472 TargetArgs.append( 473 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 474 CD->param_end()); 475 476 // Create the function declaration. 477 const CGFunctionInfo &FuncInfo = 478 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 479 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 480 481 auto *F = 482 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 483 FO.FunctionName, &CGM.getModule()); 484 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 485 if (CD->isNothrow()) 486 F->setDoesNotThrow(); 487 F->setDoesNotRecurse(); 488 489 // Generate the function. 490 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 491 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 492 FO.UIntPtrCastRequired ? FO.Loc 493 : CD->getBody()->getBeginLoc()); 494 unsigned Cnt = CD->getContextParamPosition(); 495 I = FO.S->captures().begin(); 496 for (const FieldDecl *FD : RD->fields()) { 497 // Do not map arguments if we emit function with non-original types. 498 Address LocalAddr(Address::invalid()); 499 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 500 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 501 TargetArgs[Cnt]); 502 } else { 503 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 504 } 505 // If we are capturing a pointer by copy we don't need to do anything, just 506 // use the value that we get from the arguments. 507 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 508 const VarDecl *CurVD = I->getCapturedVar(); 509 if (!FO.RegisterCastedArgsOnly) 510 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 511 ++Cnt; 512 ++I; 513 continue; 514 } 515 516 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 517 AlignmentSource::Decl); 518 if (FD->hasCapturedVLAType()) { 519 if (FO.UIntPtrCastRequired) { 520 ArgLVal = CGF.MakeAddrLValue( 521 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 522 Args[Cnt]->getName(), ArgLVal), 523 FD->getType(), AlignmentSource::Decl); 524 } 525 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 526 const VariableArrayType *VAT = FD->getCapturedVLAType(); 527 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 528 } else if (I->capturesVariable()) { 529 const VarDecl *Var = I->getCapturedVar(); 530 QualType VarTy = Var->getType(); 531 Address ArgAddr = ArgLVal.getAddress(CGF); 532 if (ArgLVal.getType()->isLValueReferenceType()) { 533 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 534 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 535 assert(ArgLVal.getType()->isPointerType()); 536 ArgAddr = CGF.EmitLoadOfPointer( 537 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 538 } 539 if (!FO.RegisterCastedArgsOnly) { 540 LocalAddrs.insert( 541 {Args[Cnt], 542 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 543 } 544 } else if (I->capturesVariableByCopy()) { 545 assert(!FD->getType()->isAnyPointerType() && 546 "Not expecting a captured pointer."); 547 const VarDecl *Var = I->getCapturedVar(); 548 LocalAddrs.insert({Args[Cnt], 549 {Var, FO.UIntPtrCastRequired 550 ? castValueFromUintptr( 551 CGF, I->getLocation(), FD->getType(), 552 Args[Cnt]->getName(), ArgLVal) 553 : ArgLVal.getAddress(CGF)}}); 554 } else { 555 // If 'this' is captured, load it into CXXThisValue. 556 assert(I->capturesThis()); 557 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 558 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 559 } 560 ++Cnt; 561 ++I; 562 } 563 564 return F; 565 } 566 567 llvm::Function * 568 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 569 SourceLocation Loc) { 570 assert( 571 CapturedStmtInfo && 572 "CapturedStmtInfo should be set when generating the captured function"); 573 const CapturedDecl *CD = S.getCapturedDecl(); 574 // Build the argument list. 575 bool NeedWrapperFunction = 576 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 577 FunctionArgList Args; 578 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 579 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 580 SmallString<256> Buffer; 581 llvm::raw_svector_ostream Out(Buffer); 582 Out << CapturedStmtInfo->getHelperName(); 583 if (NeedWrapperFunction) 584 Out << "_debug__"; 585 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 586 Out.str(), Loc); 587 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 588 VLASizes, CXXThisValue, FO); 589 CodeGenFunction::OMPPrivateScope LocalScope(*this); 590 for (const auto &LocalAddrPair : LocalAddrs) { 591 if (LocalAddrPair.second.first) { 592 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 593 return LocalAddrPair.second.second; 594 }); 595 } 596 } 597 (void)LocalScope.Privatize(); 598 for (const auto &VLASizePair : VLASizes) 599 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 600 PGO.assignRegionCounters(GlobalDecl(CD), F); 601 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 602 (void)LocalScope.ForceCleanup(); 603 FinishFunction(CD->getBodyRBrace()); 604 if (!NeedWrapperFunction) 605 return F; 606 607 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 608 /*RegisterCastedArgsOnly=*/true, 609 CapturedStmtInfo->getHelperName(), Loc); 610 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 611 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 612 Args.clear(); 613 LocalAddrs.clear(); 614 VLASizes.clear(); 615 llvm::Function *WrapperF = 616 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 617 WrapperCGF.CXXThisValue, WrapperFO); 618 llvm::SmallVector<llvm::Value *, 4> CallArgs; 619 for (const auto *Arg : Args) { 620 llvm::Value *CallArg; 621 auto I = LocalAddrs.find(Arg); 622 if (I != LocalAddrs.end()) { 623 LValue LV = WrapperCGF.MakeAddrLValue( 624 I->second.second, 625 I->second.first ? I->second.first->getType() : Arg->getType(), 626 AlignmentSource::Decl); 627 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 628 } else { 629 auto EI = VLASizes.find(Arg); 630 if (EI != VLASizes.end()) { 631 CallArg = EI->second.second; 632 } else { 633 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 634 Arg->getType(), 635 AlignmentSource::Decl); 636 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 637 } 638 } 639 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 640 } 641 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 642 WrapperCGF.FinishFunction(); 643 return WrapperF; 644 } 645 646 //===----------------------------------------------------------------------===// 647 // OpenMP Directive Emission 648 //===----------------------------------------------------------------------===// 649 void CodeGenFunction::EmitOMPAggregateAssign( 650 Address DestAddr, Address SrcAddr, QualType OriginalType, 651 const llvm::function_ref<void(Address, Address)> CopyGen) { 652 // Perform element-by-element initialization. 653 QualType ElementTy; 654 655 // Drill down to the base element type on both arrays. 656 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 657 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 658 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 659 660 llvm::Value *SrcBegin = SrcAddr.getPointer(); 661 llvm::Value *DestBegin = DestAddr.getPointer(); 662 // Cast from pointer to array type to pointer to single element. 663 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 664 // The basic structure here is a while-do loop. 665 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 666 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 667 llvm::Value *IsEmpty = 668 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 669 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 670 671 // Enter the loop body, making that address the current address. 672 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 673 EmitBlock(BodyBB); 674 675 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 676 677 llvm::PHINode *SrcElementPHI = 678 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 679 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 680 Address SrcElementCurrent = 681 Address(SrcElementPHI, 682 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 683 684 llvm::PHINode *DestElementPHI = 685 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 686 DestElementPHI->addIncoming(DestBegin, EntryBB); 687 Address DestElementCurrent = 688 Address(DestElementPHI, 689 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 690 691 // Emit copy. 692 CopyGen(DestElementCurrent, SrcElementCurrent); 693 694 // Shift the address forward by one element. 695 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 696 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 697 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 698 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 699 // Check whether we've reached the end. 700 llvm::Value *Done = 701 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 702 Builder.CreateCondBr(Done, DoneBB, BodyBB); 703 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 704 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 705 706 // Done. 707 EmitBlock(DoneBB, /*IsFinished=*/true); 708 } 709 710 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 711 Address SrcAddr, const VarDecl *DestVD, 712 const VarDecl *SrcVD, const Expr *Copy) { 713 if (OriginalType->isArrayType()) { 714 const auto *BO = dyn_cast<BinaryOperator>(Copy); 715 if (BO && BO->getOpcode() == BO_Assign) { 716 // Perform simple memcpy for simple copying. 717 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 718 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 719 EmitAggregateAssign(Dest, Src, OriginalType); 720 } else { 721 // For arrays with complex element types perform element by element 722 // copying. 723 EmitOMPAggregateAssign( 724 DestAddr, SrcAddr, OriginalType, 725 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 726 // Working with the single array element, so have to remap 727 // destination and source variables to corresponding array 728 // elements. 729 CodeGenFunction::OMPPrivateScope Remap(*this); 730 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 731 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 732 (void)Remap.Privatize(); 733 EmitIgnoredExpr(Copy); 734 }); 735 } 736 } else { 737 // Remap pseudo source variable to private copy. 738 CodeGenFunction::OMPPrivateScope Remap(*this); 739 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 740 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 741 (void)Remap.Privatize(); 742 // Emit copying of the whole variable. 743 EmitIgnoredExpr(Copy); 744 } 745 } 746 747 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 748 OMPPrivateScope &PrivateScope) { 749 if (!HaveInsertPoint()) 750 return false; 751 bool DeviceConstTarget = 752 getLangOpts().OpenMPIsDevice && 753 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 754 bool FirstprivateIsLastprivate = false; 755 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 756 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 757 for (const auto *D : C->varlists()) 758 Lastprivates.try_emplace( 759 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 760 C->getKind()); 761 } 762 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 763 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 764 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 765 // Force emission of the firstprivate copy if the directive does not emit 766 // outlined function, like omp for, omp simd, omp distribute etc. 767 bool MustEmitFirstprivateCopy = 768 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 769 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 770 const auto *IRef = C->varlist_begin(); 771 const auto *InitsRef = C->inits().begin(); 772 for (const Expr *IInit : C->private_copies()) { 773 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 774 bool ThisFirstprivateIsLastprivate = 775 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 776 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 777 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 778 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 779 !FD->getType()->isReferenceType() && 780 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 781 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 782 ++IRef; 783 ++InitsRef; 784 continue; 785 } 786 // Do not emit copy for firstprivate constant variables in target regions, 787 // captured by reference. 788 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 789 FD && FD->getType()->isReferenceType() && 790 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 791 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 792 OrigVD); 793 ++IRef; 794 ++InitsRef; 795 continue; 796 } 797 FirstprivateIsLastprivate = 798 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 799 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 800 const auto *VDInit = 801 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 802 bool IsRegistered; 803 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 804 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 805 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 806 LValue OriginalLVal; 807 if (!FD) { 808 // Check if the firstprivate variable is just a constant value. 809 ConstantEmission CE = tryEmitAsConstant(&DRE); 810 if (CE && !CE.isReference()) { 811 // Constant value, no need to create a copy. 812 ++IRef; 813 ++InitsRef; 814 continue; 815 } 816 if (CE && CE.isReference()) { 817 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 818 } else { 819 assert(!CE && "Expected non-constant firstprivate."); 820 OriginalLVal = EmitLValue(&DRE); 821 } 822 } else { 823 OriginalLVal = EmitLValue(&DRE); 824 } 825 QualType Type = VD->getType(); 826 if (Type->isArrayType()) { 827 // Emit VarDecl with copy init for arrays. 828 // Get the address of the original variable captured in current 829 // captured region. 830 IsRegistered = PrivateScope.addPrivate( 831 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 832 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 833 const Expr *Init = VD->getInit(); 834 if (!isa<CXXConstructExpr>(Init) || 835 isTrivialInitializer(Init)) { 836 // Perform simple memcpy. 837 LValue Dest = 838 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 839 EmitAggregateAssign(Dest, OriginalLVal, Type); 840 } else { 841 EmitOMPAggregateAssign( 842 Emission.getAllocatedAddress(), 843 OriginalLVal.getAddress(*this), Type, 844 [this, VDInit, Init](Address DestElement, 845 Address SrcElement) { 846 // Clean up any temporaries needed by the 847 // initialization. 848 RunCleanupsScope InitScope(*this); 849 // Emit initialization for single element. 850 setAddrOfLocalVar(VDInit, SrcElement); 851 EmitAnyExprToMem(Init, DestElement, 852 Init->getType().getQualifiers(), 853 /*IsInitializer*/ false); 854 LocalDeclMap.erase(VDInit); 855 }); 856 } 857 EmitAutoVarCleanups(Emission); 858 return Emission.getAllocatedAddress(); 859 }); 860 } else { 861 Address OriginalAddr = OriginalLVal.getAddress(*this); 862 IsRegistered = 863 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 864 ThisFirstprivateIsLastprivate, 865 OrigVD, &Lastprivates, IRef]() { 866 // Emit private VarDecl with copy init. 867 // Remap temp VDInit variable to the address of the original 868 // variable (for proper handling of captured global variables). 869 setAddrOfLocalVar(VDInit, OriginalAddr); 870 EmitDecl(*VD); 871 LocalDeclMap.erase(VDInit); 872 if (ThisFirstprivateIsLastprivate && 873 Lastprivates[OrigVD->getCanonicalDecl()] == 874 OMPC_LASTPRIVATE_conditional) { 875 // Create/init special variable for lastprivate conditionals. 876 Address VDAddr = 877 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 878 *this, OrigVD); 879 llvm::Value *V = EmitLoadOfScalar( 880 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 881 AlignmentSource::Decl), 882 (*IRef)->getExprLoc()); 883 EmitStoreOfScalar(V, 884 MakeAddrLValue(VDAddr, (*IRef)->getType(), 885 AlignmentSource::Decl)); 886 LocalDeclMap.erase(VD); 887 setAddrOfLocalVar(VD, VDAddr); 888 return VDAddr; 889 } 890 return GetAddrOfLocalVar(VD); 891 }); 892 } 893 assert(IsRegistered && 894 "firstprivate var already registered as private"); 895 // Silence the warning about unused variable. 896 (void)IsRegistered; 897 } 898 ++IRef; 899 ++InitsRef; 900 } 901 } 902 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 903 } 904 905 void CodeGenFunction::EmitOMPPrivateClause( 906 const OMPExecutableDirective &D, 907 CodeGenFunction::OMPPrivateScope &PrivateScope) { 908 if (!HaveInsertPoint()) 909 return; 910 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 911 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 912 auto IRef = C->varlist_begin(); 913 for (const Expr *IInit : C->private_copies()) { 914 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 915 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 916 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 917 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 918 // Emit private VarDecl with copy init. 919 EmitDecl(*VD); 920 return GetAddrOfLocalVar(VD); 921 }); 922 assert(IsRegistered && "private var already registered as private"); 923 // Silence the warning about unused variable. 924 (void)IsRegistered; 925 } 926 ++IRef; 927 } 928 } 929 } 930 931 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 932 if (!HaveInsertPoint()) 933 return false; 934 // threadprivate_var1 = master_threadprivate_var1; 935 // operator=(threadprivate_var2, master_threadprivate_var2); 936 // ... 937 // __kmpc_barrier(&loc, global_tid); 938 llvm::DenseSet<const VarDecl *> CopiedVars; 939 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 940 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 941 auto IRef = C->varlist_begin(); 942 auto ISrcRef = C->source_exprs().begin(); 943 auto IDestRef = C->destination_exprs().begin(); 944 for (const Expr *AssignOp : C->assignment_ops()) { 945 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 946 QualType Type = VD->getType(); 947 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 948 // Get the address of the master variable. If we are emitting code with 949 // TLS support, the address is passed from the master as field in the 950 // captured declaration. 951 Address MasterAddr = Address::invalid(); 952 if (getLangOpts().OpenMPUseTLS && 953 getContext().getTargetInfo().isTLSSupported()) { 954 assert(CapturedStmtInfo->lookup(VD) && 955 "Copyin threadprivates should have been captured!"); 956 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 957 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 958 MasterAddr = EmitLValue(&DRE).getAddress(*this); 959 LocalDeclMap.erase(VD); 960 } else { 961 MasterAddr = 962 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 963 : CGM.GetAddrOfGlobal(VD), 964 getContext().getDeclAlign(VD)); 965 } 966 // Get the address of the threadprivate variable. 967 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 968 if (CopiedVars.size() == 1) { 969 // At first check if current thread is a master thread. If it is, no 970 // need to copy data. 971 CopyBegin = createBasicBlock("copyin.not.master"); 972 CopyEnd = createBasicBlock("copyin.not.master.end"); 973 Builder.CreateCondBr( 974 Builder.CreateICmpNE( 975 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 976 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 977 CGM.IntPtrTy)), 978 CopyBegin, CopyEnd); 979 EmitBlock(CopyBegin); 980 } 981 const auto *SrcVD = 982 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 983 const auto *DestVD = 984 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 985 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 986 } 987 ++IRef; 988 ++ISrcRef; 989 ++IDestRef; 990 } 991 } 992 if (CopyEnd) { 993 // Exit out of copying procedure for non-master thread. 994 EmitBlock(CopyEnd, /*IsFinished=*/true); 995 return true; 996 } 997 return false; 998 } 999 1000 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1001 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1002 if (!HaveInsertPoint()) 1003 return false; 1004 bool HasAtLeastOneLastprivate = false; 1005 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1006 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1007 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1008 for (const Expr *C : LoopDirective->counters()) { 1009 SIMDLCVs.insert( 1010 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1011 } 1012 } 1013 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1014 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1015 HasAtLeastOneLastprivate = true; 1016 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1017 !getLangOpts().OpenMPSimd) 1018 break; 1019 const auto *IRef = C->varlist_begin(); 1020 const auto *IDestRef = C->destination_exprs().begin(); 1021 for (const Expr *IInit : C->private_copies()) { 1022 // Keep the address of the original variable for future update at the end 1023 // of the loop. 1024 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1025 // Taskloops do not require additional initialization, it is done in 1026 // runtime support library. 1027 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1028 const auto *DestVD = 1029 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1030 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1031 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1032 /*RefersToEnclosingVariableOrCapture=*/ 1033 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1034 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1035 return EmitLValue(&DRE).getAddress(*this); 1036 }); 1037 // Check if the variable is also a firstprivate: in this case IInit is 1038 // not generated. Initialization of this variable will happen in codegen 1039 // for 'firstprivate' clause. 1040 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1041 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1042 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1043 OrigVD]() { 1044 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1045 Address VDAddr = 1046 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1047 OrigVD); 1048 setAddrOfLocalVar(VD, VDAddr); 1049 return VDAddr; 1050 } 1051 // Emit private VarDecl with copy init. 1052 EmitDecl(*VD); 1053 return GetAddrOfLocalVar(VD); 1054 }); 1055 assert(IsRegistered && 1056 "lastprivate var already registered as private"); 1057 (void)IsRegistered; 1058 } 1059 } 1060 ++IRef; 1061 ++IDestRef; 1062 } 1063 } 1064 return HasAtLeastOneLastprivate; 1065 } 1066 1067 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1068 const OMPExecutableDirective &D, bool NoFinals, 1069 llvm::Value *IsLastIterCond) { 1070 if (!HaveInsertPoint()) 1071 return; 1072 // Emit following code: 1073 // if (<IsLastIterCond>) { 1074 // orig_var1 = private_orig_var1; 1075 // ... 1076 // orig_varn = private_orig_varn; 1077 // } 1078 llvm::BasicBlock *ThenBB = nullptr; 1079 llvm::BasicBlock *DoneBB = nullptr; 1080 if (IsLastIterCond) { 1081 // Emit implicit barrier if at least one lastprivate conditional is found 1082 // and this is not a simd mode. 1083 if (!getLangOpts().OpenMPSimd && 1084 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1085 [](const OMPLastprivateClause *C) { 1086 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1087 })) { 1088 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1089 OMPD_unknown, 1090 /*EmitChecks=*/false, 1091 /*ForceSimpleCall=*/true); 1092 } 1093 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1094 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1095 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1096 EmitBlock(ThenBB); 1097 } 1098 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1099 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1100 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1101 auto IC = LoopDirective->counters().begin(); 1102 for (const Expr *F : LoopDirective->finals()) { 1103 const auto *D = 1104 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1105 if (NoFinals) 1106 AlreadyEmittedVars.insert(D); 1107 else 1108 LoopCountersAndUpdates[D] = F; 1109 ++IC; 1110 } 1111 } 1112 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1113 auto IRef = C->varlist_begin(); 1114 auto ISrcRef = C->source_exprs().begin(); 1115 auto IDestRef = C->destination_exprs().begin(); 1116 for (const Expr *AssignOp : C->assignment_ops()) { 1117 const auto *PrivateVD = 1118 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1119 QualType Type = PrivateVD->getType(); 1120 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1121 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1122 // If lastprivate variable is a loop control variable for loop-based 1123 // directive, update its value before copyin back to original 1124 // variable. 1125 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1126 EmitIgnoredExpr(FinalExpr); 1127 const auto *SrcVD = 1128 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1129 const auto *DestVD = 1130 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1131 // Get the address of the private variable. 1132 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1133 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1134 PrivateAddr = 1135 Address(Builder.CreateLoad(PrivateAddr), 1136 getNaturalTypeAlignment(RefTy->getPointeeType())); 1137 // Store the last value to the private copy in the last iteration. 1138 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1139 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1140 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1141 (*IRef)->getExprLoc()); 1142 // Get the address of the original variable. 1143 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1144 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1145 } 1146 ++IRef; 1147 ++ISrcRef; 1148 ++IDestRef; 1149 } 1150 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1151 EmitIgnoredExpr(PostUpdate); 1152 } 1153 if (IsLastIterCond) 1154 EmitBlock(DoneBB, /*IsFinished=*/true); 1155 } 1156 1157 void CodeGenFunction::EmitOMPReductionClauseInit( 1158 const OMPExecutableDirective &D, 1159 CodeGenFunction::OMPPrivateScope &PrivateScope) { 1160 if (!HaveInsertPoint()) 1161 return; 1162 SmallVector<const Expr *, 4> Shareds; 1163 SmallVector<const Expr *, 4> Privates; 1164 SmallVector<const Expr *, 4> ReductionOps; 1165 SmallVector<const Expr *, 4> LHSs; 1166 SmallVector<const Expr *, 4> RHSs; 1167 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1168 auto IPriv = C->privates().begin(); 1169 auto IRed = C->reduction_ops().begin(); 1170 auto ILHS = C->lhs_exprs().begin(); 1171 auto IRHS = C->rhs_exprs().begin(); 1172 for (const Expr *Ref : C->varlists()) { 1173 Shareds.emplace_back(Ref); 1174 Privates.emplace_back(*IPriv); 1175 ReductionOps.emplace_back(*IRed); 1176 LHSs.emplace_back(*ILHS); 1177 RHSs.emplace_back(*IRHS); 1178 std::advance(IPriv, 1); 1179 std::advance(IRed, 1); 1180 std::advance(ILHS, 1); 1181 std::advance(IRHS, 1); 1182 } 1183 } 1184 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps); 1185 unsigned Count = 0; 1186 auto ILHS = LHSs.begin(); 1187 auto IRHS = RHSs.begin(); 1188 auto IPriv = Privates.begin(); 1189 for (const Expr *IRef : Shareds) { 1190 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1191 // Emit private VarDecl with reduction init. 1192 RedCG.emitSharedLValue(*this, Count); 1193 RedCG.emitAggregateType(*this, Count); 1194 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1195 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1196 RedCG.getSharedLValue(Count), 1197 [&Emission](CodeGenFunction &CGF) { 1198 CGF.EmitAutoVarInit(Emission); 1199 return true; 1200 }); 1201 EmitAutoVarCleanups(Emission); 1202 Address BaseAddr = RedCG.adjustPrivateAddress( 1203 *this, Count, Emission.getAllocatedAddress()); 1204 bool IsRegistered = PrivateScope.addPrivate( 1205 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1206 assert(IsRegistered && "private var already registered as private"); 1207 // Silence the warning about unused variable. 1208 (void)IsRegistered; 1209 1210 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1211 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1212 QualType Type = PrivateVD->getType(); 1213 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1214 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1215 // Store the address of the original variable associated with the LHS 1216 // implicit variable. 1217 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1218 return RedCG.getSharedLValue(Count).getAddress(*this); 1219 }); 1220 PrivateScope.addPrivate( 1221 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1222 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1223 isa<ArraySubscriptExpr>(IRef)) { 1224 // Store the address of the original variable associated with the LHS 1225 // implicit variable. 1226 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1227 return RedCG.getSharedLValue(Count).getAddress(*this); 1228 }); 1229 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1230 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1231 ConvertTypeForMem(RHSVD->getType()), 1232 "rhs.begin"); 1233 }); 1234 } else { 1235 QualType Type = PrivateVD->getType(); 1236 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1237 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1238 // Store the address of the original variable associated with the LHS 1239 // implicit variable. 1240 if (IsArray) { 1241 OriginalAddr = Builder.CreateElementBitCast( 1242 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1243 } 1244 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1245 PrivateScope.addPrivate( 1246 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1247 return IsArray 1248 ? Builder.CreateElementBitCast( 1249 GetAddrOfLocalVar(PrivateVD), 1250 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1251 : GetAddrOfLocalVar(PrivateVD); 1252 }); 1253 } 1254 ++ILHS; 1255 ++IRHS; 1256 ++IPriv; 1257 ++Count; 1258 } 1259 } 1260 1261 void CodeGenFunction::EmitOMPReductionClauseFinal( 1262 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1263 if (!HaveInsertPoint()) 1264 return; 1265 llvm::SmallVector<const Expr *, 8> Privates; 1266 llvm::SmallVector<const Expr *, 8> LHSExprs; 1267 llvm::SmallVector<const Expr *, 8> RHSExprs; 1268 llvm::SmallVector<const Expr *, 8> ReductionOps; 1269 bool HasAtLeastOneReduction = false; 1270 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1271 HasAtLeastOneReduction = true; 1272 Privates.append(C->privates().begin(), C->privates().end()); 1273 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1274 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1275 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1276 } 1277 if (HasAtLeastOneReduction) { 1278 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1279 isOpenMPParallelDirective(D.getDirectiveKind()) || 1280 ReductionKind == OMPD_simd; 1281 bool SimpleReduction = ReductionKind == OMPD_simd; 1282 // Emit nowait reduction if nowait clause is present or directive is a 1283 // parallel directive (it always has implicit barrier). 1284 CGM.getOpenMPRuntime().emitReduction( 1285 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1286 {WithNowait, SimpleReduction, ReductionKind}); 1287 } 1288 } 1289 1290 static void emitPostUpdateForReductionClause( 1291 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1292 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1293 if (!CGF.HaveInsertPoint()) 1294 return; 1295 llvm::BasicBlock *DoneBB = nullptr; 1296 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1297 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1298 if (!DoneBB) { 1299 if (llvm::Value *Cond = CondGen(CGF)) { 1300 // If the first post-update expression is found, emit conditional 1301 // block if it was requested. 1302 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1303 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1304 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1305 CGF.EmitBlock(ThenBB); 1306 } 1307 } 1308 CGF.EmitIgnoredExpr(PostUpdate); 1309 } 1310 } 1311 if (DoneBB) 1312 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1313 } 1314 1315 namespace { 1316 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1317 /// parallel function. This is necessary for combined constructs such as 1318 /// 'distribute parallel for' 1319 typedef llvm::function_ref<void(CodeGenFunction &, 1320 const OMPExecutableDirective &, 1321 llvm::SmallVectorImpl<llvm::Value *> &)> 1322 CodeGenBoundParametersTy; 1323 } // anonymous namespace 1324 1325 static void 1326 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1327 const OMPExecutableDirective &S) { 1328 if (CGF.getLangOpts().OpenMP < 50) 1329 return; 1330 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1331 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1332 for (const Expr *Ref : C->varlists()) { 1333 if (!Ref->getType()->isScalarType()) 1334 continue; 1335 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1336 if (!DRE) 1337 continue; 1338 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1339 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1340 } 1341 } 1342 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1343 for (const Expr *Ref : C->varlists()) { 1344 if (!Ref->getType()->isScalarType()) 1345 continue; 1346 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1347 if (!DRE) 1348 continue; 1349 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1350 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1351 } 1352 } 1353 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1354 for (const Expr *Ref : C->varlists()) { 1355 if (!Ref->getType()->isScalarType()) 1356 continue; 1357 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1358 if (!DRE) 1359 continue; 1360 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1361 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1362 } 1363 } 1364 // Privates should ne analyzed since they are not captured at all. 1365 // Task reductions may be skipped - tasks are ignored. 1366 // Firstprivates do not return value but may be passed by reference - no need 1367 // to check for updated lastprivate conditional. 1368 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1369 for (const Expr *Ref : C->varlists()) { 1370 if (!Ref->getType()->isScalarType()) 1371 continue; 1372 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1373 if (!DRE) 1374 continue; 1375 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1376 } 1377 } 1378 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1379 CGF, S, PrivateDecls); 1380 } 1381 1382 static void emitCommonOMPParallelDirective( 1383 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1384 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1385 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1386 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1387 llvm::Function *OutlinedFn = 1388 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1389 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1390 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1391 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1392 llvm::Value *NumThreads = 1393 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1394 /*IgnoreResultAssign=*/true); 1395 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1396 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1397 } 1398 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1399 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1400 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1401 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1402 } 1403 const Expr *IfCond = nullptr; 1404 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1405 if (C->getNameModifier() == OMPD_unknown || 1406 C->getNameModifier() == OMPD_parallel) { 1407 IfCond = C->getCondition(); 1408 break; 1409 } 1410 } 1411 1412 OMPParallelScope Scope(CGF, S); 1413 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1414 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1415 // lower and upper bounds with the pragma 'for' chunking mechanism. 1416 // The following lambda takes care of appending the lower and upper bound 1417 // parameters when necessary 1418 CodeGenBoundParameters(CGF, S, CapturedVars); 1419 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1420 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1421 CapturedVars, IfCond); 1422 } 1423 1424 static void emitEmptyBoundParameters(CodeGenFunction &, 1425 const OMPExecutableDirective &, 1426 llvm::SmallVectorImpl<llvm::Value *> &) {} 1427 1428 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1429 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1430 // Check if we have any if clause associated with the directive. 1431 llvm::Value *IfCond = nullptr; 1432 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1433 IfCond = EmitScalarExpr(C->getCondition(), 1434 /*IgnoreResultAssign=*/true); 1435 1436 llvm::Value *NumThreads = nullptr; 1437 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1438 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1439 /*IgnoreResultAssign=*/true); 1440 1441 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1442 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1443 ProcBind = ProcBindClause->getProcBindKind(); 1444 1445 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1446 1447 // The cleanup callback that finalizes all variabels at the given location, 1448 // thus calls destructors etc. 1449 auto FiniCB = [this](InsertPointTy IP) { 1450 CGBuilderTy::InsertPointGuard IPG(Builder); 1451 assert(IP.getBlock()->end() != IP.getPoint() && 1452 "OpenMP IR Builder should cause terminated block!"); 1453 llvm::BasicBlock *IPBB = IP.getBlock(); 1454 llvm::BasicBlock *DestBB = IPBB->splitBasicBlock(IP.getPoint()); 1455 IPBB->getTerminator()->eraseFromParent(); 1456 Builder.SetInsertPoint(IPBB); 1457 CodeGenFunction::JumpDest Dest = getJumpDestInCurrentScope(DestBB); 1458 EmitBranchThroughCleanup(Dest); 1459 }; 1460 1461 // Privatization callback that performs appropriate action for 1462 // shared/private/firstprivate/lastprivate/copyin/... variables. 1463 // 1464 // TODO: This defaults to shared right now. 1465 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1466 llvm::Value &Val, llvm::Value *&ReplVal) { 1467 // The next line is appropriate only for variables (Val) with the 1468 // data-sharing attribute "shared". 1469 ReplVal = &Val; 1470 1471 return CodeGenIP; 1472 }; 1473 1474 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1475 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1476 1477 auto BodyGenCB = [ParallelRegionBodyStmt, 1478 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1479 llvm::BasicBlock &ContinuationBB) { 1480 auto OldAllocaIP = AllocaInsertPt; 1481 AllocaInsertPt = &*AllocaIP.getPoint(); 1482 1483 auto OldReturnBlock = ReturnBlock; 1484 ReturnBlock = getJumpDestInCurrentScope(&ContinuationBB); 1485 1486 llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock(); 1487 CodeGenIPBB->splitBasicBlock(CodeGenIP.getPoint()); 1488 llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator(); 1489 CodeGenIPBBTI->removeFromParent(); 1490 1491 Builder.SetInsertPoint(CodeGenIPBB); 1492 1493 EmitStmt(ParallelRegionBodyStmt); 1494 1495 Builder.Insert(CodeGenIPBBTI); 1496 1497 AllocaInsertPt = OldAllocaIP; 1498 ReturnBlock = OldReturnBlock; 1499 }; 1500 1501 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1502 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1503 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1504 FiniCB, IfCond, NumThreads, 1505 ProcBind, S.hasCancel())); 1506 return; 1507 } 1508 1509 // Emit parallel region as a standalone region. 1510 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1511 Action.Enter(CGF); 1512 OMPPrivateScope PrivateScope(CGF); 1513 bool Copyins = CGF.EmitOMPCopyinClause(S); 1514 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1515 if (Copyins) { 1516 // Emit implicit barrier to synchronize threads and avoid data races on 1517 // propagation master's thread values of threadprivate variables to local 1518 // instances of that variables of all other implicit threads. 1519 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1520 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1521 /*ForceSimpleCall=*/true); 1522 } 1523 CGF.EmitOMPPrivateClause(S, PrivateScope); 1524 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1525 (void)PrivateScope.Privatize(); 1526 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1527 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1528 }; 1529 { 1530 auto LPCRegion = 1531 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1532 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1533 emitEmptyBoundParameters); 1534 emitPostUpdateForReductionClause(*this, S, 1535 [](CodeGenFunction &) { return nullptr; }); 1536 } 1537 // Check for outer lastprivate conditional update. 1538 checkForLastprivateConditionalUpdate(*this, S); 1539 } 1540 1541 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1542 int MaxLevel, int Level = 0) { 1543 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1544 const Stmt *SimplifiedS = S->IgnoreContainers(); 1545 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1546 PrettyStackTraceLoc CrashInfo( 1547 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1548 "LLVM IR generation of compound statement ('{}')"); 1549 1550 // Keep track of the current cleanup stack depth, including debug scopes. 1551 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1552 for (const Stmt *CurStmt : CS->body()) 1553 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1554 return; 1555 } 1556 if (SimplifiedS == NextLoop) { 1557 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1558 S = For->getBody(); 1559 } else { 1560 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1561 "Expected canonical for loop or range-based for loop."); 1562 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1563 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1564 S = CXXFor->getBody(); 1565 } 1566 if (Level + 1 < MaxLevel) { 1567 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1568 S, /*TryImperfectlyNestedLoops=*/true); 1569 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1570 return; 1571 } 1572 } 1573 CGF.EmitStmt(S); 1574 } 1575 1576 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1577 JumpDest LoopExit) { 1578 RunCleanupsScope BodyScope(*this); 1579 // Update counters values on current iteration. 1580 for (const Expr *UE : D.updates()) 1581 EmitIgnoredExpr(UE); 1582 // Update the linear variables. 1583 // In distribute directives only loop counters may be marked as linear, no 1584 // need to generate the code for them. 1585 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1586 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1587 for (const Expr *UE : C->updates()) 1588 EmitIgnoredExpr(UE); 1589 } 1590 } 1591 1592 // On a continue in the body, jump to the end. 1593 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1594 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1595 for (const Expr *E : D.finals_conditions()) { 1596 if (!E) 1597 continue; 1598 // Check that loop counter in non-rectangular nest fits into the iteration 1599 // space. 1600 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1601 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1602 getProfileCount(D.getBody())); 1603 EmitBlock(NextBB); 1604 } 1605 // Emit loop variables for C++ range loops. 1606 const Stmt *Body = 1607 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1608 // Emit loop body. 1609 emitBody(*this, Body, 1610 OMPLoopDirective::tryToFindNextInnerLoop( 1611 Body, /*TryImperfectlyNestedLoops=*/true), 1612 D.getCollapsedNumber()); 1613 1614 // The end (updates/cleanups). 1615 EmitBlock(Continue.getBlock()); 1616 BreakContinueStack.pop_back(); 1617 } 1618 1619 void CodeGenFunction::EmitOMPInnerLoop( 1620 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 1621 const Expr *IncExpr, 1622 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1623 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1624 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1625 1626 // Start the loop with a block that tests the condition. 1627 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1628 EmitBlock(CondBlock); 1629 const SourceRange R = S.getSourceRange(); 1630 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1631 SourceLocToDebugLoc(R.getEnd())); 1632 1633 // If there are any cleanups between here and the loop-exit scope, 1634 // create a block to stage a loop exit along. 1635 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1636 if (RequiresCleanup) 1637 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1638 1639 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1640 1641 // Emit condition. 1642 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1643 if (ExitBlock != LoopExit.getBlock()) { 1644 EmitBlock(ExitBlock); 1645 EmitBranchThroughCleanup(LoopExit); 1646 } 1647 1648 EmitBlock(LoopBody); 1649 incrementProfileCounter(&S); 1650 1651 // Create a block for the increment. 1652 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1653 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1654 1655 BodyGen(*this); 1656 1657 // Emit "IV = IV + 1" and a back-edge to the condition block. 1658 EmitBlock(Continue.getBlock()); 1659 EmitIgnoredExpr(IncExpr); 1660 PostIncGen(*this); 1661 BreakContinueStack.pop_back(); 1662 EmitBranch(CondBlock); 1663 LoopStack.pop(); 1664 // Emit the fall-through block. 1665 EmitBlock(LoopExit.getBlock()); 1666 } 1667 1668 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1669 if (!HaveInsertPoint()) 1670 return false; 1671 // Emit inits for the linear variables. 1672 bool HasLinears = false; 1673 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1674 for (const Expr *Init : C->inits()) { 1675 HasLinears = true; 1676 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1677 if (const auto *Ref = 1678 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1679 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1680 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1681 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1682 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1683 VD->getInit()->getType(), VK_LValue, 1684 VD->getInit()->getExprLoc()); 1685 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1686 VD->getType()), 1687 /*capturedByInit=*/false); 1688 EmitAutoVarCleanups(Emission); 1689 } else { 1690 EmitVarDecl(*VD); 1691 } 1692 } 1693 // Emit the linear steps for the linear clauses. 1694 // If a step is not constant, it is pre-calculated before the loop. 1695 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1696 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1697 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1698 // Emit calculation of the linear step. 1699 EmitIgnoredExpr(CS); 1700 } 1701 } 1702 return HasLinears; 1703 } 1704 1705 void CodeGenFunction::EmitOMPLinearClauseFinal( 1706 const OMPLoopDirective &D, 1707 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1708 if (!HaveInsertPoint()) 1709 return; 1710 llvm::BasicBlock *DoneBB = nullptr; 1711 // Emit the final values of the linear variables. 1712 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1713 auto IC = C->varlist_begin(); 1714 for (const Expr *F : C->finals()) { 1715 if (!DoneBB) { 1716 if (llvm::Value *Cond = CondGen(*this)) { 1717 // If the first post-update expression is found, emit conditional 1718 // block if it was requested. 1719 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1720 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1721 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1722 EmitBlock(ThenBB); 1723 } 1724 } 1725 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1726 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1727 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1728 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1729 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1730 CodeGenFunction::OMPPrivateScope VarScope(*this); 1731 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1732 (void)VarScope.Privatize(); 1733 EmitIgnoredExpr(F); 1734 ++IC; 1735 } 1736 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1737 EmitIgnoredExpr(PostUpdate); 1738 } 1739 if (DoneBB) 1740 EmitBlock(DoneBB, /*IsFinished=*/true); 1741 } 1742 1743 static void emitAlignedClause(CodeGenFunction &CGF, 1744 const OMPExecutableDirective &D) { 1745 if (!CGF.HaveInsertPoint()) 1746 return; 1747 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1748 llvm::APInt ClauseAlignment(64, 0); 1749 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1750 auto *AlignmentCI = 1751 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1752 ClauseAlignment = AlignmentCI->getValue(); 1753 } 1754 for (const Expr *E : Clause->varlists()) { 1755 llvm::APInt Alignment(ClauseAlignment); 1756 if (Alignment == 0) { 1757 // OpenMP [2.8.1, Description] 1758 // If no optional parameter is specified, implementation-defined default 1759 // alignments for SIMD instructions on the target platforms are assumed. 1760 Alignment = 1761 CGF.getContext() 1762 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1763 E->getType()->getPointeeType())) 1764 .getQuantity(); 1765 } 1766 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1767 "alignment is not power of 2"); 1768 if (Alignment != 0) { 1769 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1770 CGF.EmitAlignmentAssumption( 1771 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1772 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1773 } 1774 } 1775 } 1776 } 1777 1778 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1779 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1780 if (!HaveInsertPoint()) 1781 return; 1782 auto I = S.private_counters().begin(); 1783 for (const Expr *E : S.counters()) { 1784 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1785 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1786 // Emit var without initialization. 1787 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1788 EmitAutoVarCleanups(VarEmission); 1789 LocalDeclMap.erase(PrivateVD); 1790 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1791 return VarEmission.getAllocatedAddress(); 1792 }); 1793 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1794 VD->hasGlobalStorage()) { 1795 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1796 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1797 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1798 E->getType(), VK_LValue, E->getExprLoc()); 1799 return EmitLValue(&DRE).getAddress(*this); 1800 }); 1801 } else { 1802 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1803 return VarEmission.getAllocatedAddress(); 1804 }); 1805 } 1806 ++I; 1807 } 1808 // Privatize extra loop counters used in loops for ordered(n) clauses. 1809 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1810 if (!C->getNumForLoops()) 1811 continue; 1812 for (unsigned I = S.getCollapsedNumber(), 1813 E = C->getLoopNumIterations().size(); 1814 I < E; ++I) { 1815 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1816 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1817 // Override only those variables that can be captured to avoid re-emission 1818 // of the variables declared within the loops. 1819 if (DRE->refersToEnclosingVariableOrCapture()) { 1820 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1821 return CreateMemTemp(DRE->getType(), VD->getName()); 1822 }); 1823 } 1824 } 1825 } 1826 } 1827 1828 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1829 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1830 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1831 if (!CGF.HaveInsertPoint()) 1832 return; 1833 { 1834 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1835 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1836 (void)PreCondScope.Privatize(); 1837 // Get initial values of real counters. 1838 for (const Expr *I : S.inits()) { 1839 CGF.EmitIgnoredExpr(I); 1840 } 1841 } 1842 // Create temp loop control variables with their init values to support 1843 // non-rectangular loops. 1844 CodeGenFunction::OMPMapVars PreCondVars; 1845 for (const Expr * E: S.dependent_counters()) { 1846 if (!E) 1847 continue; 1848 assert(!E->getType().getNonReferenceType()->isRecordType() && 1849 "dependent counter must not be an iterator."); 1850 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1851 Address CounterAddr = 1852 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 1853 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 1854 } 1855 (void)PreCondVars.apply(CGF); 1856 for (const Expr *E : S.dependent_inits()) { 1857 if (!E) 1858 continue; 1859 CGF.EmitIgnoredExpr(E); 1860 } 1861 // Check that loop is executed at least one time. 1862 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 1863 PreCondVars.restore(CGF); 1864 } 1865 1866 void CodeGenFunction::EmitOMPLinearClause( 1867 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 1868 if (!HaveInsertPoint()) 1869 return; 1870 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1871 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1872 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1873 for (const Expr *C : LoopDirective->counters()) { 1874 SIMDLCVs.insert( 1875 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1876 } 1877 } 1878 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1879 auto CurPrivate = C->privates().begin(); 1880 for (const Expr *E : C->varlists()) { 1881 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1882 const auto *PrivateVD = 1883 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 1884 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 1885 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 1886 // Emit private VarDecl with copy init. 1887 EmitVarDecl(*PrivateVD); 1888 return GetAddrOfLocalVar(PrivateVD); 1889 }); 1890 assert(IsRegistered && "linear var already registered as private"); 1891 // Silence the warning about unused variable. 1892 (void)IsRegistered; 1893 } else { 1894 EmitVarDecl(*PrivateVD); 1895 } 1896 ++CurPrivate; 1897 } 1898 } 1899 } 1900 1901 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 1902 const OMPExecutableDirective &D, 1903 bool IsMonotonic) { 1904 if (!CGF.HaveInsertPoint()) 1905 return; 1906 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 1907 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 1908 /*ignoreResult=*/true); 1909 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1910 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1911 // In presence of finite 'safelen', it may be unsafe to mark all 1912 // the memory instructions parallel, because loop-carried 1913 // dependences of 'safelen' iterations are possible. 1914 if (!IsMonotonic) 1915 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 1916 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 1917 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 1918 /*ignoreResult=*/true); 1919 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1920 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1921 // In presence of finite 'safelen', it may be unsafe to mark all 1922 // the memory instructions parallel, because loop-carried 1923 // dependences of 'safelen' iterations are possible. 1924 CGF.LoopStack.setParallel(/*Enable=*/false); 1925 } 1926 } 1927 1928 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 1929 bool IsMonotonic) { 1930 // Walk clauses and process safelen/lastprivate. 1931 LoopStack.setParallel(!IsMonotonic); 1932 LoopStack.setVectorizeEnable(); 1933 emitSimdlenSafelenClause(*this, D, IsMonotonic); 1934 } 1935 1936 void CodeGenFunction::EmitOMPSimdFinal( 1937 const OMPLoopDirective &D, 1938 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1939 if (!HaveInsertPoint()) 1940 return; 1941 llvm::BasicBlock *DoneBB = nullptr; 1942 auto IC = D.counters().begin(); 1943 auto IPC = D.private_counters().begin(); 1944 for (const Expr *F : D.finals()) { 1945 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 1946 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 1947 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 1948 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 1949 OrigVD->hasGlobalStorage() || CED) { 1950 if (!DoneBB) { 1951 if (llvm::Value *Cond = CondGen(*this)) { 1952 // If the first post-update expression is found, emit conditional 1953 // block if it was requested. 1954 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 1955 DoneBB = createBasicBlock(".omp.final.done"); 1956 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1957 EmitBlock(ThenBB); 1958 } 1959 } 1960 Address OrigAddr = Address::invalid(); 1961 if (CED) { 1962 OrigAddr = 1963 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 1964 } else { 1965 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 1966 /*RefersToEnclosingVariableOrCapture=*/false, 1967 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 1968 OrigAddr = EmitLValue(&DRE).getAddress(*this); 1969 } 1970 OMPPrivateScope VarScope(*this); 1971 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1972 (void)VarScope.Privatize(); 1973 EmitIgnoredExpr(F); 1974 } 1975 ++IC; 1976 ++IPC; 1977 } 1978 if (DoneBB) 1979 EmitBlock(DoneBB, /*IsFinished=*/true); 1980 } 1981 1982 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 1983 const OMPLoopDirective &S, 1984 CodeGenFunction::JumpDest LoopExit) { 1985 CGF.EmitOMPLoopBody(S, LoopExit); 1986 CGF.EmitStopPoint(&S); 1987 } 1988 1989 /// Emit a helper variable and return corresponding lvalue. 1990 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1991 const DeclRefExpr *Helper) { 1992 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1993 CGF.EmitVarDecl(*VDecl); 1994 return CGF.EmitLValue(Helper); 1995 } 1996 1997 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 1998 const RegionCodeGenTy &SimdInitGen, 1999 const RegionCodeGenTy &BodyCodeGen) { 2000 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2001 PrePostActionTy &) { 2002 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2003 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2004 SimdInitGen(CGF); 2005 2006 BodyCodeGen(CGF); 2007 }; 2008 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2009 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2010 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2011 2012 BodyCodeGen(CGF); 2013 }; 2014 const Expr *IfCond = nullptr; 2015 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2016 if (CGF.getLangOpts().OpenMP >= 50 && 2017 (C->getNameModifier() == OMPD_unknown || 2018 C->getNameModifier() == OMPD_simd)) { 2019 IfCond = C->getCondition(); 2020 break; 2021 } 2022 } 2023 if (IfCond) { 2024 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2025 } else { 2026 RegionCodeGenTy ThenRCG(ThenGen); 2027 ThenRCG(CGF); 2028 } 2029 } 2030 2031 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2032 PrePostActionTy &Action) { 2033 Action.Enter(CGF); 2034 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2035 "Expected simd directive"); 2036 OMPLoopScope PreInitScope(CGF, S); 2037 // if (PreCond) { 2038 // for (IV in 0..LastIteration) BODY; 2039 // <Final counter/linear vars updates>; 2040 // } 2041 // 2042 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2043 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2044 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2045 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2046 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2047 } 2048 2049 // Emit: if (PreCond) - begin. 2050 // If the condition constant folds and can be elided, avoid emitting the 2051 // whole loop. 2052 bool CondConstant; 2053 llvm::BasicBlock *ContBlock = nullptr; 2054 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2055 if (!CondConstant) 2056 return; 2057 } else { 2058 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2059 ContBlock = CGF.createBasicBlock("simd.if.end"); 2060 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2061 CGF.getProfileCount(&S)); 2062 CGF.EmitBlock(ThenBlock); 2063 CGF.incrementProfileCounter(&S); 2064 } 2065 2066 // Emit the loop iteration variable. 2067 const Expr *IVExpr = S.getIterationVariable(); 2068 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2069 CGF.EmitVarDecl(*IVDecl); 2070 CGF.EmitIgnoredExpr(S.getInit()); 2071 2072 // Emit the iterations count variable. 2073 // If it is not a variable, Sema decided to calculate iterations count on 2074 // each iteration (e.g., it is foldable into a constant). 2075 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2076 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2077 // Emit calculation of the iterations count. 2078 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2079 } 2080 2081 emitAlignedClause(CGF, S); 2082 (void)CGF.EmitOMPLinearClauseInit(S); 2083 { 2084 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2085 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2086 CGF.EmitOMPLinearClause(S, LoopScope); 2087 CGF.EmitOMPPrivateClause(S, LoopScope); 2088 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2089 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2090 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2091 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2092 (void)LoopScope.Privatize(); 2093 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2094 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2095 2096 emitCommonSimdLoop( 2097 CGF, S, 2098 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2099 CGF.EmitOMPSimdInit(S); 2100 }, 2101 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2102 CGF.EmitOMPInnerLoop( 2103 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2104 [&S](CodeGenFunction &CGF) { 2105 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 2106 CGF.EmitStopPoint(&S); 2107 }, 2108 [](CodeGenFunction &) {}); 2109 }); 2110 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2111 // Emit final copy of the lastprivate variables at the end of loops. 2112 if (HasLastprivateClause) 2113 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2114 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2115 emitPostUpdateForReductionClause(CGF, S, 2116 [](CodeGenFunction &) { return nullptr; }); 2117 } 2118 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2119 // Emit: if (PreCond) - end. 2120 if (ContBlock) { 2121 CGF.EmitBranch(ContBlock); 2122 CGF.EmitBlock(ContBlock, true); 2123 } 2124 } 2125 2126 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2127 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2128 emitOMPSimdRegion(CGF, S, Action); 2129 }; 2130 { 2131 auto LPCRegion = 2132 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2133 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2134 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2135 } 2136 // Check for outer lastprivate conditional update. 2137 checkForLastprivateConditionalUpdate(*this, S); 2138 } 2139 2140 void CodeGenFunction::EmitOMPOuterLoop( 2141 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2142 CodeGenFunction::OMPPrivateScope &LoopScope, 2143 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2144 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2145 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2146 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2147 2148 const Expr *IVExpr = S.getIterationVariable(); 2149 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2150 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2151 2152 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2153 2154 // Start the loop with a block that tests the condition. 2155 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2156 EmitBlock(CondBlock); 2157 const SourceRange R = S.getSourceRange(); 2158 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2159 SourceLocToDebugLoc(R.getEnd())); 2160 2161 llvm::Value *BoolCondVal = nullptr; 2162 if (!DynamicOrOrdered) { 2163 // UB = min(UB, GlobalUB) or 2164 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2165 // 'distribute parallel for') 2166 EmitIgnoredExpr(LoopArgs.EUB); 2167 // IV = LB 2168 EmitIgnoredExpr(LoopArgs.Init); 2169 // IV < UB 2170 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2171 } else { 2172 BoolCondVal = 2173 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2174 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2175 } 2176 2177 // If there are any cleanups between here and the loop-exit scope, 2178 // create a block to stage a loop exit along. 2179 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2180 if (LoopScope.requiresCleanups()) 2181 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2182 2183 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2184 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2185 if (ExitBlock != LoopExit.getBlock()) { 2186 EmitBlock(ExitBlock); 2187 EmitBranchThroughCleanup(LoopExit); 2188 } 2189 EmitBlock(LoopBody); 2190 2191 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2192 // LB for loop condition and emitted it above). 2193 if (DynamicOrOrdered) 2194 EmitIgnoredExpr(LoopArgs.Init); 2195 2196 // Create a block for the increment. 2197 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2198 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2199 2200 emitCommonSimdLoop( 2201 *this, S, 2202 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2203 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2204 // with dynamic/guided scheduling and without ordered clause. 2205 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 2206 CGF.LoopStack.setParallel(!IsMonotonic); 2207 else 2208 CGF.EmitOMPSimdInit(S, IsMonotonic); 2209 }, 2210 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2211 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2212 SourceLocation Loc = S.getBeginLoc(); 2213 // when 'distribute' is not combined with a 'for': 2214 // while (idx <= UB) { BODY; ++idx; } 2215 // when 'distribute' is combined with a 'for' 2216 // (e.g. 'distribute parallel for') 2217 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2218 CGF.EmitOMPInnerLoop( 2219 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2220 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2221 CodeGenLoop(CGF, S, LoopExit); 2222 }, 2223 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2224 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2225 }); 2226 }); 2227 2228 EmitBlock(Continue.getBlock()); 2229 BreakContinueStack.pop_back(); 2230 if (!DynamicOrOrdered) { 2231 // Emit "LB = LB + Stride", "UB = UB + Stride". 2232 EmitIgnoredExpr(LoopArgs.NextLB); 2233 EmitIgnoredExpr(LoopArgs.NextUB); 2234 } 2235 2236 EmitBranch(CondBlock); 2237 LoopStack.pop(); 2238 // Emit the fall-through block. 2239 EmitBlock(LoopExit.getBlock()); 2240 2241 // Tell the runtime we are done. 2242 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2243 if (!DynamicOrOrdered) 2244 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2245 S.getDirectiveKind()); 2246 }; 2247 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2248 } 2249 2250 void CodeGenFunction::EmitOMPForOuterLoop( 2251 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2252 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2253 const OMPLoopArguments &LoopArgs, 2254 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2255 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2256 2257 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2258 const bool DynamicOrOrdered = 2259 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2260 2261 assert((Ordered || 2262 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2263 LoopArgs.Chunk != nullptr)) && 2264 "static non-chunked schedule does not need outer loop"); 2265 2266 // Emit outer loop. 2267 // 2268 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2269 // When schedule(dynamic,chunk_size) is specified, the iterations are 2270 // distributed to threads in the team in chunks as the threads request them. 2271 // Each thread executes a chunk of iterations, then requests another chunk, 2272 // until no chunks remain to be distributed. Each chunk contains chunk_size 2273 // iterations, except for the last chunk to be distributed, which may have 2274 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2275 // 2276 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2277 // to threads in the team in chunks as the executing threads request them. 2278 // Each thread executes a chunk of iterations, then requests another chunk, 2279 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2280 // each chunk is proportional to the number of unassigned iterations divided 2281 // by the number of threads in the team, decreasing to 1. For a chunk_size 2282 // with value k (greater than 1), the size of each chunk is determined in the 2283 // same way, with the restriction that the chunks do not contain fewer than k 2284 // iterations (except for the last chunk to be assigned, which may have fewer 2285 // than k iterations). 2286 // 2287 // When schedule(auto) is specified, the decision regarding scheduling is 2288 // delegated to the compiler and/or runtime system. The programmer gives the 2289 // implementation the freedom to choose any possible mapping of iterations to 2290 // threads in the team. 2291 // 2292 // When schedule(runtime) is specified, the decision regarding scheduling is 2293 // deferred until run time, and the schedule and chunk size are taken from the 2294 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2295 // implementation defined 2296 // 2297 // while(__kmpc_dispatch_next(&LB, &UB)) { 2298 // idx = LB; 2299 // while (idx <= UB) { BODY; ++idx; 2300 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2301 // } // inner loop 2302 // } 2303 // 2304 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2305 // When schedule(static, chunk_size) is specified, iterations are divided into 2306 // chunks of size chunk_size, and the chunks are assigned to the threads in 2307 // the team in a round-robin fashion in the order of the thread number. 2308 // 2309 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2310 // while (idx <= UB) { BODY; ++idx; } // inner loop 2311 // LB = LB + ST; 2312 // UB = UB + ST; 2313 // } 2314 // 2315 2316 const Expr *IVExpr = S.getIterationVariable(); 2317 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2318 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2319 2320 if (DynamicOrOrdered) { 2321 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2322 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2323 llvm::Value *LBVal = DispatchBounds.first; 2324 llvm::Value *UBVal = DispatchBounds.second; 2325 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2326 LoopArgs.Chunk}; 2327 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2328 IVSigned, Ordered, DipatchRTInputValues); 2329 } else { 2330 CGOpenMPRuntime::StaticRTInput StaticInit( 2331 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2332 LoopArgs.ST, LoopArgs.Chunk); 2333 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2334 ScheduleKind, StaticInit); 2335 } 2336 2337 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2338 const unsigned IVSize, 2339 const bool IVSigned) { 2340 if (Ordered) { 2341 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2342 IVSigned); 2343 } 2344 }; 2345 2346 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2347 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2348 OuterLoopArgs.IncExpr = S.getInc(); 2349 OuterLoopArgs.Init = S.getInit(); 2350 OuterLoopArgs.Cond = S.getCond(); 2351 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2352 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2353 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2354 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2355 } 2356 2357 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2358 const unsigned IVSize, const bool IVSigned) {} 2359 2360 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2361 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2362 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2363 const CodeGenLoopTy &CodeGenLoopContent) { 2364 2365 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2366 2367 // Emit outer loop. 2368 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2369 // dynamic 2370 // 2371 2372 const Expr *IVExpr = S.getIterationVariable(); 2373 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2374 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2375 2376 CGOpenMPRuntime::StaticRTInput StaticInit( 2377 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2378 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2379 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2380 2381 // for combined 'distribute' and 'for' the increment expression of distribute 2382 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2383 Expr *IncExpr; 2384 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2385 IncExpr = S.getDistInc(); 2386 else 2387 IncExpr = S.getInc(); 2388 2389 // this routine is shared by 'omp distribute parallel for' and 2390 // 'omp distribute': select the right EUB expression depending on the 2391 // directive 2392 OMPLoopArguments OuterLoopArgs; 2393 OuterLoopArgs.LB = LoopArgs.LB; 2394 OuterLoopArgs.UB = LoopArgs.UB; 2395 OuterLoopArgs.ST = LoopArgs.ST; 2396 OuterLoopArgs.IL = LoopArgs.IL; 2397 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2398 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2399 ? S.getCombinedEnsureUpperBound() 2400 : S.getEnsureUpperBound(); 2401 OuterLoopArgs.IncExpr = IncExpr; 2402 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2403 ? S.getCombinedInit() 2404 : S.getInit(); 2405 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2406 ? S.getCombinedCond() 2407 : S.getCond(); 2408 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2409 ? S.getCombinedNextLowerBound() 2410 : S.getNextLowerBound(); 2411 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2412 ? S.getCombinedNextUpperBound() 2413 : S.getNextUpperBound(); 2414 2415 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2416 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2417 emitEmptyOrdered); 2418 } 2419 2420 static std::pair<LValue, LValue> 2421 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2422 const OMPExecutableDirective &S) { 2423 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2424 LValue LB = 2425 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2426 LValue UB = 2427 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2428 2429 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2430 // parallel for') we need to use the 'distribute' 2431 // chunk lower and upper bounds rather than the whole loop iteration 2432 // space. These are parameters to the outlined function for 'parallel' 2433 // and we copy the bounds of the previous schedule into the 2434 // the current ones. 2435 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2436 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2437 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2438 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2439 PrevLBVal = CGF.EmitScalarConversion( 2440 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2441 LS.getIterationVariable()->getType(), 2442 LS.getPrevLowerBoundVariable()->getExprLoc()); 2443 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2444 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2445 PrevUBVal = CGF.EmitScalarConversion( 2446 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2447 LS.getIterationVariable()->getType(), 2448 LS.getPrevUpperBoundVariable()->getExprLoc()); 2449 2450 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2451 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2452 2453 return {LB, UB}; 2454 } 2455 2456 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2457 /// we need to use the LB and UB expressions generated by the worksharing 2458 /// code generation support, whereas in non combined situations we would 2459 /// just emit 0 and the LastIteration expression 2460 /// This function is necessary due to the difference of the LB and UB 2461 /// types for the RT emission routines for 'for_static_init' and 2462 /// 'for_dispatch_init' 2463 static std::pair<llvm::Value *, llvm::Value *> 2464 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2465 const OMPExecutableDirective &S, 2466 Address LB, Address UB) { 2467 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2468 const Expr *IVExpr = LS.getIterationVariable(); 2469 // when implementing a dynamic schedule for a 'for' combined with a 2470 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2471 // is not normalized as each team only executes its own assigned 2472 // distribute chunk 2473 QualType IteratorTy = IVExpr->getType(); 2474 llvm::Value *LBVal = 2475 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2476 llvm::Value *UBVal = 2477 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2478 return {LBVal, UBVal}; 2479 } 2480 2481 static void emitDistributeParallelForDistributeInnerBoundParams( 2482 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2483 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2484 const auto &Dir = cast<OMPLoopDirective>(S); 2485 LValue LB = 2486 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2487 llvm::Value *LBCast = 2488 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2489 CGF.SizeTy, /*isSigned=*/false); 2490 CapturedVars.push_back(LBCast); 2491 LValue UB = 2492 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2493 2494 llvm::Value *UBCast = 2495 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2496 CGF.SizeTy, /*isSigned=*/false); 2497 CapturedVars.push_back(UBCast); 2498 } 2499 2500 static void 2501 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2502 const OMPLoopDirective &S, 2503 CodeGenFunction::JumpDest LoopExit) { 2504 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2505 PrePostActionTy &Action) { 2506 Action.Enter(CGF); 2507 bool HasCancel = false; 2508 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2509 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2510 HasCancel = D->hasCancel(); 2511 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2512 HasCancel = D->hasCancel(); 2513 else if (const auto *D = 2514 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2515 HasCancel = D->hasCancel(); 2516 } 2517 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2518 HasCancel); 2519 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2520 emitDistributeParallelForInnerBounds, 2521 emitDistributeParallelForDispatchBounds); 2522 }; 2523 2524 emitCommonOMPParallelDirective( 2525 CGF, S, 2526 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2527 CGInlinedWorksharingLoop, 2528 emitDistributeParallelForDistributeInnerBoundParams); 2529 } 2530 2531 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2532 const OMPDistributeParallelForDirective &S) { 2533 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2534 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2535 S.getDistInc()); 2536 }; 2537 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2538 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2539 } 2540 2541 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2542 const OMPDistributeParallelForSimdDirective &S) { 2543 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2544 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2545 S.getDistInc()); 2546 }; 2547 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2548 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2549 } 2550 2551 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2552 const OMPDistributeSimdDirective &S) { 2553 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2554 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2555 }; 2556 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2557 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2558 } 2559 2560 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2561 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2562 // Emit SPMD target parallel for region as a standalone region. 2563 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2564 emitOMPSimdRegion(CGF, S, Action); 2565 }; 2566 llvm::Function *Fn; 2567 llvm::Constant *Addr; 2568 // Emit target region as a standalone region. 2569 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2570 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2571 assert(Fn && Addr && "Target device function emission failed."); 2572 } 2573 2574 void CodeGenFunction::EmitOMPTargetSimdDirective( 2575 const OMPTargetSimdDirective &S) { 2576 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2577 emitOMPSimdRegion(CGF, S, Action); 2578 }; 2579 emitCommonOMPTargetDirective(*this, S, CodeGen); 2580 } 2581 2582 namespace { 2583 struct ScheduleKindModifiersTy { 2584 OpenMPScheduleClauseKind Kind; 2585 OpenMPScheduleClauseModifier M1; 2586 OpenMPScheduleClauseModifier M2; 2587 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2588 OpenMPScheduleClauseModifier M1, 2589 OpenMPScheduleClauseModifier M2) 2590 : Kind(Kind), M1(M1), M2(M2) {} 2591 }; 2592 } // namespace 2593 2594 bool CodeGenFunction::EmitOMPWorksharingLoop( 2595 const OMPLoopDirective &S, Expr *EUB, 2596 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2597 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2598 // Emit the loop iteration variable. 2599 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2600 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2601 EmitVarDecl(*IVDecl); 2602 2603 // Emit the iterations count variable. 2604 // If it is not a variable, Sema decided to calculate iterations count on each 2605 // iteration (e.g., it is foldable into a constant). 2606 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2607 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2608 // Emit calculation of the iterations count. 2609 EmitIgnoredExpr(S.getCalcLastIteration()); 2610 } 2611 2612 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2613 2614 bool HasLastprivateClause; 2615 // Check pre-condition. 2616 { 2617 OMPLoopScope PreInitScope(*this, S); 2618 // Skip the entire loop if we don't meet the precondition. 2619 // If the condition constant folds and can be elided, avoid emitting the 2620 // whole loop. 2621 bool CondConstant; 2622 llvm::BasicBlock *ContBlock = nullptr; 2623 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2624 if (!CondConstant) 2625 return false; 2626 } else { 2627 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2628 ContBlock = createBasicBlock("omp.precond.end"); 2629 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2630 getProfileCount(&S)); 2631 EmitBlock(ThenBlock); 2632 incrementProfileCounter(&S); 2633 } 2634 2635 RunCleanupsScope DoacrossCleanupScope(*this); 2636 bool Ordered = false; 2637 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2638 if (OrderedClause->getNumForLoops()) 2639 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2640 else 2641 Ordered = true; 2642 } 2643 2644 llvm::DenseSet<const Expr *> EmittedFinals; 2645 emitAlignedClause(*this, S); 2646 bool HasLinears = EmitOMPLinearClauseInit(S); 2647 // Emit helper vars inits. 2648 2649 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2650 LValue LB = Bounds.first; 2651 LValue UB = Bounds.second; 2652 LValue ST = 2653 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2654 LValue IL = 2655 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2656 2657 // Emit 'then' code. 2658 { 2659 OMPPrivateScope LoopScope(*this); 2660 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2661 // Emit implicit barrier to synchronize threads and avoid data races on 2662 // initialization of firstprivate variables and post-update of 2663 // lastprivate variables. 2664 CGM.getOpenMPRuntime().emitBarrierCall( 2665 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2666 /*ForceSimpleCall=*/true); 2667 } 2668 EmitOMPPrivateClause(S, LoopScope); 2669 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2670 *this, S, EmitLValue(S.getIterationVariable())); 2671 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2672 EmitOMPReductionClauseInit(S, LoopScope); 2673 EmitOMPPrivateLoopCounters(S, LoopScope); 2674 EmitOMPLinearClause(S, LoopScope); 2675 (void)LoopScope.Privatize(); 2676 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2677 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2678 2679 // Detect the loop schedule kind and chunk. 2680 const Expr *ChunkExpr = nullptr; 2681 OpenMPScheduleTy ScheduleKind; 2682 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2683 ScheduleKind.Schedule = C->getScheduleKind(); 2684 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2685 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2686 ChunkExpr = C->getChunkSize(); 2687 } else { 2688 // Default behaviour for schedule clause. 2689 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2690 *this, S, ScheduleKind.Schedule, ChunkExpr); 2691 } 2692 bool HasChunkSizeOne = false; 2693 llvm::Value *Chunk = nullptr; 2694 if (ChunkExpr) { 2695 Chunk = EmitScalarExpr(ChunkExpr); 2696 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2697 S.getIterationVariable()->getType(), 2698 S.getBeginLoc()); 2699 Expr::EvalResult Result; 2700 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2701 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2702 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2703 } 2704 } 2705 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2706 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2707 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2708 // If the static schedule kind is specified or if the ordered clause is 2709 // specified, and if no monotonic modifier is specified, the effect will 2710 // be as if the monotonic modifier was specified. 2711 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2712 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2713 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2714 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2715 /* Chunked */ Chunk != nullptr) || 2716 StaticChunkedOne) && 2717 !Ordered) { 2718 JumpDest LoopExit = 2719 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2720 emitCommonSimdLoop( 2721 *this, S, 2722 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2723 if (isOpenMPSimdDirective(S.getDirectiveKind())) 2724 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2725 }, 2726 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2727 &S, ScheduleKind, LoopExit, 2728 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2729 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2730 // When no chunk_size is specified, the iteration space is divided 2731 // into chunks that are approximately equal in size, and at most 2732 // one chunk is distributed to each thread. Note that the size of 2733 // the chunks is unspecified in this case. 2734 CGOpenMPRuntime::StaticRTInput StaticInit( 2735 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2736 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2737 StaticChunkedOne ? Chunk : nullptr); 2738 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2739 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2740 StaticInit); 2741 // UB = min(UB, GlobalUB); 2742 if (!StaticChunkedOne) 2743 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2744 // IV = LB; 2745 CGF.EmitIgnoredExpr(S.getInit()); 2746 // For unchunked static schedule generate: 2747 // 2748 // while (idx <= UB) { 2749 // BODY; 2750 // ++idx; 2751 // } 2752 // 2753 // For static schedule with chunk one: 2754 // 2755 // while (IV <= PrevUB) { 2756 // BODY; 2757 // IV += ST; 2758 // } 2759 CGF.EmitOMPInnerLoop( 2760 S, LoopScope.requiresCleanups(), 2761 StaticChunkedOne ? S.getCombinedParForInDistCond() 2762 : S.getCond(), 2763 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2764 [&S, LoopExit](CodeGenFunction &CGF) { 2765 CGF.EmitOMPLoopBody(S, LoopExit); 2766 CGF.EmitStopPoint(&S); 2767 }, 2768 [](CodeGenFunction &) {}); 2769 }); 2770 EmitBlock(LoopExit.getBlock()); 2771 // Tell the runtime we are done. 2772 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2773 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2774 S.getDirectiveKind()); 2775 }; 2776 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2777 } else { 2778 const bool IsMonotonic = 2779 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2780 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2781 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2782 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2783 // Emit the outer loop, which requests its work chunk [LB..UB] from 2784 // runtime and runs the inner loop to process it. 2785 const OMPLoopArguments LoopArguments( 2786 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2787 IL.getAddress(*this), Chunk, EUB); 2788 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2789 LoopArguments, CGDispatchBounds); 2790 } 2791 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2792 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2793 return CGF.Builder.CreateIsNotNull( 2794 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2795 }); 2796 } 2797 EmitOMPReductionClauseFinal( 2798 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2799 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2800 : /*Parallel only*/ OMPD_parallel); 2801 // Emit post-update of the reduction variables if IsLastIter != 0. 2802 emitPostUpdateForReductionClause( 2803 *this, S, [IL, &S](CodeGenFunction &CGF) { 2804 return CGF.Builder.CreateIsNotNull( 2805 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2806 }); 2807 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2808 if (HasLastprivateClause) 2809 EmitOMPLastprivateClauseFinal( 2810 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2811 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2812 } 2813 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2814 return CGF.Builder.CreateIsNotNull( 2815 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2816 }); 2817 DoacrossCleanupScope.ForceCleanup(); 2818 // We're now done with the loop, so jump to the continuation block. 2819 if (ContBlock) { 2820 EmitBranch(ContBlock); 2821 EmitBlock(ContBlock, /*IsFinished=*/true); 2822 } 2823 } 2824 return HasLastprivateClause; 2825 } 2826 2827 /// The following two functions generate expressions for the loop lower 2828 /// and upper bounds in case of static and dynamic (dispatch) schedule 2829 /// of the associated 'for' or 'distribute' loop. 2830 static std::pair<LValue, LValue> 2831 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 2832 const auto &LS = cast<OMPLoopDirective>(S); 2833 LValue LB = 2834 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2835 LValue UB = 2836 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2837 return {LB, UB}; 2838 } 2839 2840 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 2841 /// consider the lower and upper bound expressions generated by the 2842 /// worksharing loop support, but we use 0 and the iteration space size as 2843 /// constants 2844 static std::pair<llvm::Value *, llvm::Value *> 2845 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 2846 Address LB, Address UB) { 2847 const auto &LS = cast<OMPLoopDirective>(S); 2848 const Expr *IVExpr = LS.getIterationVariable(); 2849 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 2850 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 2851 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 2852 return {LBVal, UBVal}; 2853 } 2854 2855 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 2856 bool HasLastprivates = false; 2857 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2858 PrePostActionTy &) { 2859 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 2860 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2861 emitForLoopBounds, 2862 emitDispatchForLoopBounds); 2863 }; 2864 { 2865 auto LPCRegion = 2866 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2867 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2868 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 2869 S.hasCancel()); 2870 } 2871 2872 // Emit an implicit barrier at the end. 2873 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2874 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2875 // Check for outer lastprivate conditional update. 2876 checkForLastprivateConditionalUpdate(*this, S); 2877 } 2878 2879 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 2880 bool HasLastprivates = false; 2881 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2882 PrePostActionTy &) { 2883 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2884 emitForLoopBounds, 2885 emitDispatchForLoopBounds); 2886 }; 2887 { 2888 auto LPCRegion = 2889 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2890 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2891 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2892 } 2893 2894 // Emit an implicit barrier at the end. 2895 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2896 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2897 // Check for outer lastprivate conditional update. 2898 checkForLastprivateConditionalUpdate(*this, S); 2899 } 2900 2901 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 2902 const Twine &Name, 2903 llvm::Value *Init = nullptr) { 2904 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 2905 if (Init) 2906 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 2907 return LVal; 2908 } 2909 2910 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 2911 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 2912 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 2913 bool HasLastprivates = false; 2914 auto &&CodeGen = [&S, CapturedStmt, CS, 2915 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 2916 ASTContext &C = CGF.getContext(); 2917 QualType KmpInt32Ty = 2918 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 2919 // Emit helper vars inits. 2920 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 2921 CGF.Builder.getInt32(0)); 2922 llvm::ConstantInt *GlobalUBVal = CS != nullptr 2923 ? CGF.Builder.getInt32(CS->size() - 1) 2924 : CGF.Builder.getInt32(0); 2925 LValue UB = 2926 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 2927 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 2928 CGF.Builder.getInt32(1)); 2929 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 2930 CGF.Builder.getInt32(0)); 2931 // Loop counter. 2932 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 2933 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2934 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 2935 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2936 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 2937 // Generate condition for loop. 2938 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 2939 OK_Ordinary, S.getBeginLoc(), FPOptions()); 2940 // Increment for loop counter. 2941 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 2942 S.getBeginLoc(), true); 2943 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 2944 // Iterate through all sections and emit a switch construct: 2945 // switch (IV) { 2946 // case 0: 2947 // <SectionStmt[0]>; 2948 // break; 2949 // ... 2950 // case <NumSection> - 1: 2951 // <SectionStmt[<NumSection> - 1]>; 2952 // break; 2953 // } 2954 // .omp.sections.exit: 2955 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 2956 llvm::SwitchInst *SwitchStmt = 2957 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 2958 ExitBB, CS == nullptr ? 1 : CS->size()); 2959 if (CS) { 2960 unsigned CaseNumber = 0; 2961 for (const Stmt *SubStmt : CS->children()) { 2962 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2963 CGF.EmitBlock(CaseBB); 2964 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 2965 CGF.EmitStmt(SubStmt); 2966 CGF.EmitBranch(ExitBB); 2967 ++CaseNumber; 2968 } 2969 } else { 2970 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2971 CGF.EmitBlock(CaseBB); 2972 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 2973 CGF.EmitStmt(CapturedStmt); 2974 CGF.EmitBranch(ExitBB); 2975 } 2976 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2977 }; 2978 2979 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2980 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 2981 // Emit implicit barrier to synchronize threads and avoid data races on 2982 // initialization of firstprivate variables and post-update of lastprivate 2983 // variables. 2984 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 2985 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2986 /*ForceSimpleCall=*/true); 2987 } 2988 CGF.EmitOMPPrivateClause(S, LoopScope); 2989 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 2990 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2991 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2992 (void)LoopScope.Privatize(); 2993 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2994 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2995 2996 // Emit static non-chunked loop. 2997 OpenMPScheduleTy ScheduleKind; 2998 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 2999 CGOpenMPRuntime::StaticRTInput StaticInit( 3000 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3001 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3002 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3003 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3004 // UB = min(UB, GlobalUB); 3005 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3006 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3007 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3008 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3009 // IV = LB; 3010 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3011 // while (idx <= UB) { BODY; ++idx; } 3012 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 3013 [](CodeGenFunction &) {}); 3014 // Tell the runtime we are done. 3015 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3016 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3017 S.getDirectiveKind()); 3018 }; 3019 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3020 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3021 // Emit post-update of the reduction variables if IsLastIter != 0. 3022 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3023 return CGF.Builder.CreateIsNotNull( 3024 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3025 }); 3026 3027 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3028 if (HasLastprivates) 3029 CGF.EmitOMPLastprivateClauseFinal( 3030 S, /*NoFinals=*/false, 3031 CGF.Builder.CreateIsNotNull( 3032 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3033 }; 3034 3035 bool HasCancel = false; 3036 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3037 HasCancel = OSD->hasCancel(); 3038 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3039 HasCancel = OPSD->hasCancel(); 3040 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3041 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3042 HasCancel); 3043 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3044 // clause. Otherwise the barrier will be generated by the codegen for the 3045 // directive. 3046 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3047 // Emit implicit barrier to synchronize threads and avoid data races on 3048 // initialization of firstprivate variables. 3049 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3050 OMPD_unknown); 3051 } 3052 } 3053 3054 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3055 { 3056 auto LPCRegion = 3057 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3058 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3059 EmitSections(S); 3060 } 3061 // Emit an implicit barrier at the end. 3062 if (!S.getSingleClause<OMPNowaitClause>()) { 3063 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3064 OMPD_sections); 3065 } 3066 // Check for outer lastprivate conditional update. 3067 checkForLastprivateConditionalUpdate(*this, S); 3068 } 3069 3070 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3071 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3072 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3073 }; 3074 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3075 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3076 S.hasCancel()); 3077 } 3078 3079 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3080 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3081 llvm::SmallVector<const Expr *, 8> DestExprs; 3082 llvm::SmallVector<const Expr *, 8> SrcExprs; 3083 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3084 // Check if there are any 'copyprivate' clauses associated with this 3085 // 'single' construct. 3086 // Build a list of copyprivate variables along with helper expressions 3087 // (<source>, <destination>, <destination>=<source> expressions) 3088 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3089 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3090 DestExprs.append(C->destination_exprs().begin(), 3091 C->destination_exprs().end()); 3092 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3093 AssignmentOps.append(C->assignment_ops().begin(), 3094 C->assignment_ops().end()); 3095 } 3096 // Emit code for 'single' region along with 'copyprivate' clauses 3097 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3098 Action.Enter(CGF); 3099 OMPPrivateScope SingleScope(CGF); 3100 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3101 CGF.EmitOMPPrivateClause(S, SingleScope); 3102 (void)SingleScope.Privatize(); 3103 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3104 }; 3105 { 3106 auto LPCRegion = 3107 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3108 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3109 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3110 CopyprivateVars, DestExprs, 3111 SrcExprs, AssignmentOps); 3112 } 3113 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3114 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3115 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3116 CGM.getOpenMPRuntime().emitBarrierCall( 3117 *this, S.getBeginLoc(), 3118 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3119 } 3120 // Check for outer lastprivate conditional update. 3121 checkForLastprivateConditionalUpdate(*this, S); 3122 } 3123 3124 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3125 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3126 Action.Enter(CGF); 3127 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3128 }; 3129 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3130 } 3131 3132 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3133 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3134 emitMaster(*this, S); 3135 } 3136 3137 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3138 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3139 Action.Enter(CGF); 3140 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3141 }; 3142 const Expr *Hint = nullptr; 3143 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3144 Hint = HintClause->getHint(); 3145 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3146 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3147 S.getDirectiveName().getAsString(), 3148 CodeGen, S.getBeginLoc(), Hint); 3149 } 3150 3151 void CodeGenFunction::EmitOMPParallelForDirective( 3152 const OMPParallelForDirective &S) { 3153 // Emit directive as a combined directive that consists of two implicit 3154 // directives: 'parallel' with 'for' directive. 3155 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3156 Action.Enter(CGF); 3157 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 3158 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3159 emitDispatchForLoopBounds); 3160 }; 3161 { 3162 auto LPCRegion = 3163 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3164 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3165 emitEmptyBoundParameters); 3166 } 3167 // Check for outer lastprivate conditional update. 3168 checkForLastprivateConditionalUpdate(*this, S); 3169 } 3170 3171 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3172 const OMPParallelForSimdDirective &S) { 3173 // Emit directive as a combined directive that consists of two implicit 3174 // directives: 'parallel' with 'for' directive. 3175 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3176 Action.Enter(CGF); 3177 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3178 emitDispatchForLoopBounds); 3179 }; 3180 { 3181 auto LPCRegion = 3182 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3183 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3184 emitEmptyBoundParameters); 3185 } 3186 // Check for outer lastprivate conditional update. 3187 checkForLastprivateConditionalUpdate(*this, S); 3188 } 3189 3190 void CodeGenFunction::EmitOMPParallelMasterDirective( 3191 const OMPParallelMasterDirective &S) { 3192 // Emit directive as a combined directive that consists of two implicit 3193 // directives: 'parallel' with 'master' directive. 3194 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3195 Action.Enter(CGF); 3196 OMPPrivateScope PrivateScope(CGF); 3197 bool Copyins = CGF.EmitOMPCopyinClause(S); 3198 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3199 if (Copyins) { 3200 // Emit implicit barrier to synchronize threads and avoid data races on 3201 // propagation master's thread values of threadprivate variables to local 3202 // instances of that variables of all other implicit threads. 3203 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3204 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3205 /*ForceSimpleCall=*/true); 3206 } 3207 CGF.EmitOMPPrivateClause(S, PrivateScope); 3208 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3209 (void)PrivateScope.Privatize(); 3210 emitMaster(CGF, S); 3211 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3212 }; 3213 { 3214 auto LPCRegion = 3215 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3216 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3217 emitEmptyBoundParameters); 3218 emitPostUpdateForReductionClause(*this, S, 3219 [](CodeGenFunction &) { return nullptr; }); 3220 } 3221 // Check for outer lastprivate conditional update. 3222 checkForLastprivateConditionalUpdate(*this, S); 3223 } 3224 3225 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3226 const OMPParallelSectionsDirective &S) { 3227 // Emit directive as a combined directive that consists of two implicit 3228 // directives: 'parallel' with 'sections' directive. 3229 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3230 Action.Enter(CGF); 3231 CGF.EmitSections(S); 3232 }; 3233 { 3234 auto LPCRegion = 3235 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3236 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3237 emitEmptyBoundParameters); 3238 } 3239 // Check for outer lastprivate conditional update. 3240 checkForLastprivateConditionalUpdate(*this, S); 3241 } 3242 3243 void CodeGenFunction::EmitOMPTaskBasedDirective( 3244 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3245 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3246 OMPTaskDataTy &Data) { 3247 // Emit outlined function for task construct. 3248 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3249 auto I = CS->getCapturedDecl()->param_begin(); 3250 auto PartId = std::next(I); 3251 auto TaskT = std::next(I, 4); 3252 // Check if the task is final 3253 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3254 // If the condition constant folds and can be elided, try to avoid emitting 3255 // the condition and the dead arm of the if/else. 3256 const Expr *Cond = Clause->getCondition(); 3257 bool CondConstant; 3258 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3259 Data.Final.setInt(CondConstant); 3260 else 3261 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3262 } else { 3263 // By default the task is not final. 3264 Data.Final.setInt(/*IntVal=*/false); 3265 } 3266 // Check if the task has 'priority' clause. 3267 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3268 const Expr *Prio = Clause->getPriority(); 3269 Data.Priority.setInt(/*IntVal=*/true); 3270 Data.Priority.setPointer(EmitScalarConversion( 3271 EmitScalarExpr(Prio), Prio->getType(), 3272 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3273 Prio->getExprLoc())); 3274 } 3275 // The first function argument for tasks is a thread id, the second one is a 3276 // part id (0 for tied tasks, >=0 for untied task). 3277 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3278 // Get list of private variables. 3279 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3280 auto IRef = C->varlist_begin(); 3281 for (const Expr *IInit : C->private_copies()) { 3282 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3283 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3284 Data.PrivateVars.push_back(*IRef); 3285 Data.PrivateCopies.push_back(IInit); 3286 } 3287 ++IRef; 3288 } 3289 } 3290 EmittedAsPrivate.clear(); 3291 // Get list of firstprivate variables. 3292 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3293 auto IRef = C->varlist_begin(); 3294 auto IElemInitRef = C->inits().begin(); 3295 for (const Expr *IInit : C->private_copies()) { 3296 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3297 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3298 Data.FirstprivateVars.push_back(*IRef); 3299 Data.FirstprivateCopies.push_back(IInit); 3300 Data.FirstprivateInits.push_back(*IElemInitRef); 3301 } 3302 ++IRef; 3303 ++IElemInitRef; 3304 } 3305 } 3306 // Get list of lastprivate variables (for taskloops). 3307 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3308 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3309 auto IRef = C->varlist_begin(); 3310 auto ID = C->destination_exprs().begin(); 3311 for (const Expr *IInit : C->private_copies()) { 3312 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3313 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3314 Data.LastprivateVars.push_back(*IRef); 3315 Data.LastprivateCopies.push_back(IInit); 3316 } 3317 LastprivateDstsOrigs.insert( 3318 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3319 cast<DeclRefExpr>(*IRef)}); 3320 ++IRef; 3321 ++ID; 3322 } 3323 } 3324 SmallVector<const Expr *, 4> LHSs; 3325 SmallVector<const Expr *, 4> RHSs; 3326 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3327 auto IPriv = C->privates().begin(); 3328 auto IRed = C->reduction_ops().begin(); 3329 auto ILHS = C->lhs_exprs().begin(); 3330 auto IRHS = C->rhs_exprs().begin(); 3331 for (const Expr *Ref : C->varlists()) { 3332 Data.ReductionVars.emplace_back(Ref); 3333 Data.ReductionCopies.emplace_back(*IPriv); 3334 Data.ReductionOps.emplace_back(*IRed); 3335 LHSs.emplace_back(*ILHS); 3336 RHSs.emplace_back(*IRHS); 3337 std::advance(IPriv, 1); 3338 std::advance(IRed, 1); 3339 std::advance(ILHS, 1); 3340 std::advance(IRHS, 1); 3341 } 3342 } 3343 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3344 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3345 // Build list of dependences. 3346 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3347 for (const Expr *IRef : C->varlists()) 3348 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3349 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3350 CapturedRegion](CodeGenFunction &CGF, 3351 PrePostActionTy &Action) { 3352 // Set proper addresses for generated private copies. 3353 OMPPrivateScope Scope(CGF); 3354 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3355 !Data.LastprivateVars.empty()) { 3356 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3357 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3358 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3359 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3360 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3361 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3362 CS->getCapturedDecl()->getParam(PrivatesParam))); 3363 // Map privates. 3364 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3365 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3366 CallArgs.push_back(PrivatesPtr); 3367 for (const Expr *E : Data.PrivateVars) { 3368 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3369 Address PrivatePtr = CGF.CreateMemTemp( 3370 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3371 PrivatePtrs.emplace_back(VD, PrivatePtr); 3372 CallArgs.push_back(PrivatePtr.getPointer()); 3373 } 3374 for (const Expr *E : Data.FirstprivateVars) { 3375 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3376 Address PrivatePtr = 3377 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3378 ".firstpriv.ptr.addr"); 3379 PrivatePtrs.emplace_back(VD, PrivatePtr); 3380 CallArgs.push_back(PrivatePtr.getPointer()); 3381 } 3382 for (const Expr *E : Data.LastprivateVars) { 3383 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3384 Address PrivatePtr = 3385 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3386 ".lastpriv.ptr.addr"); 3387 PrivatePtrs.emplace_back(VD, PrivatePtr); 3388 CallArgs.push_back(PrivatePtr.getPointer()); 3389 } 3390 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3391 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3392 for (const auto &Pair : LastprivateDstsOrigs) { 3393 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3394 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3395 /*RefersToEnclosingVariableOrCapture=*/ 3396 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3397 Pair.second->getType(), VK_LValue, 3398 Pair.second->getExprLoc()); 3399 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3400 return CGF.EmitLValue(&DRE).getAddress(CGF); 3401 }); 3402 } 3403 for (const auto &Pair : PrivatePtrs) { 3404 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3405 CGF.getContext().getDeclAlign(Pair.first)); 3406 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3407 } 3408 } 3409 if (Data.Reductions) { 3410 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3411 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies, 3412 Data.ReductionOps); 3413 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3414 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3415 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3416 RedCG.emitSharedLValue(CGF, Cnt); 3417 RedCG.emitAggregateType(CGF, Cnt); 3418 // FIXME: This must removed once the runtime library is fixed. 3419 // Emit required threadprivate variables for 3420 // initializer/combiner/finalizer. 3421 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3422 RedCG, Cnt); 3423 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3424 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3425 Replacement = 3426 Address(CGF.EmitScalarConversion( 3427 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3428 CGF.getContext().getPointerType( 3429 Data.ReductionCopies[Cnt]->getType()), 3430 Data.ReductionCopies[Cnt]->getExprLoc()), 3431 Replacement.getAlignment()); 3432 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3433 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3434 [Replacement]() { return Replacement; }); 3435 } 3436 } 3437 // Privatize all private variables except for in_reduction items. 3438 (void)Scope.Privatize(); 3439 SmallVector<const Expr *, 4> InRedVars; 3440 SmallVector<const Expr *, 4> InRedPrivs; 3441 SmallVector<const Expr *, 4> InRedOps; 3442 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3443 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3444 auto IPriv = C->privates().begin(); 3445 auto IRed = C->reduction_ops().begin(); 3446 auto ITD = C->taskgroup_descriptors().begin(); 3447 for (const Expr *Ref : C->varlists()) { 3448 InRedVars.emplace_back(Ref); 3449 InRedPrivs.emplace_back(*IPriv); 3450 InRedOps.emplace_back(*IRed); 3451 TaskgroupDescriptors.emplace_back(*ITD); 3452 std::advance(IPriv, 1); 3453 std::advance(IRed, 1); 3454 std::advance(ITD, 1); 3455 } 3456 } 3457 // Privatize in_reduction items here, because taskgroup descriptors must be 3458 // privatized earlier. 3459 OMPPrivateScope InRedScope(CGF); 3460 if (!InRedVars.empty()) { 3461 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps); 3462 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3463 RedCG.emitSharedLValue(CGF, Cnt); 3464 RedCG.emitAggregateType(CGF, Cnt); 3465 // The taskgroup descriptor variable is always implicit firstprivate and 3466 // privatized already during processing of the firstprivates. 3467 // FIXME: This must removed once the runtime library is fixed. 3468 // Emit required threadprivate variables for 3469 // initializer/combiner/finalizer. 3470 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3471 RedCG, Cnt); 3472 llvm::Value *ReductionsPtr = 3473 CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]), 3474 TaskgroupDescriptors[Cnt]->getExprLoc()); 3475 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3476 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3477 Replacement = Address( 3478 CGF.EmitScalarConversion( 3479 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3480 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3481 InRedPrivs[Cnt]->getExprLoc()), 3482 Replacement.getAlignment()); 3483 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3484 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3485 [Replacement]() { return Replacement; }); 3486 } 3487 } 3488 (void)InRedScope.Privatize(); 3489 3490 Action.Enter(CGF); 3491 BodyGen(CGF); 3492 }; 3493 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3494 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3495 Data.NumberOfParts); 3496 OMPLexicalScope Scope(*this, S, llvm::None, 3497 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3498 !isOpenMPSimdDirective(S.getDirectiveKind())); 3499 TaskGen(*this, OutlinedFn, Data); 3500 } 3501 3502 static ImplicitParamDecl * 3503 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3504 QualType Ty, CapturedDecl *CD, 3505 SourceLocation Loc) { 3506 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3507 ImplicitParamDecl::Other); 3508 auto *OrigRef = DeclRefExpr::Create( 3509 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3510 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3511 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3512 ImplicitParamDecl::Other); 3513 auto *PrivateRef = DeclRefExpr::Create( 3514 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3515 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3516 QualType ElemType = C.getBaseElementType(Ty); 3517 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3518 ImplicitParamDecl::Other); 3519 auto *InitRef = DeclRefExpr::Create( 3520 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3521 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3522 PrivateVD->setInitStyle(VarDecl::CInit); 3523 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3524 InitRef, /*BasePath=*/nullptr, 3525 VK_RValue)); 3526 Data.FirstprivateVars.emplace_back(OrigRef); 3527 Data.FirstprivateCopies.emplace_back(PrivateRef); 3528 Data.FirstprivateInits.emplace_back(InitRef); 3529 return OrigVD; 3530 } 3531 3532 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3533 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3534 OMPTargetDataInfo &InputInfo) { 3535 // Emit outlined function for task construct. 3536 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3537 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3538 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3539 auto I = CS->getCapturedDecl()->param_begin(); 3540 auto PartId = std::next(I); 3541 auto TaskT = std::next(I, 4); 3542 OMPTaskDataTy Data; 3543 // The task is not final. 3544 Data.Final.setInt(/*IntVal=*/false); 3545 // Get list of firstprivate variables. 3546 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3547 auto IRef = C->varlist_begin(); 3548 auto IElemInitRef = C->inits().begin(); 3549 for (auto *IInit : C->private_copies()) { 3550 Data.FirstprivateVars.push_back(*IRef); 3551 Data.FirstprivateCopies.push_back(IInit); 3552 Data.FirstprivateInits.push_back(*IElemInitRef); 3553 ++IRef; 3554 ++IElemInitRef; 3555 } 3556 } 3557 OMPPrivateScope TargetScope(*this); 3558 VarDecl *BPVD = nullptr; 3559 VarDecl *PVD = nullptr; 3560 VarDecl *SVD = nullptr; 3561 if (InputInfo.NumberOfTargetItems > 0) { 3562 auto *CD = CapturedDecl::Create( 3563 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 3564 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 3565 QualType BaseAndPointersType = getContext().getConstantArrayType( 3566 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 3567 /*IndexTypeQuals=*/0); 3568 BPVD = createImplicitFirstprivateForType( 3569 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3570 PVD = createImplicitFirstprivateForType( 3571 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3572 QualType SizesType = getContext().getConstantArrayType( 3573 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 3574 ArrSize, nullptr, ArrayType::Normal, 3575 /*IndexTypeQuals=*/0); 3576 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 3577 S.getBeginLoc()); 3578 TargetScope.addPrivate( 3579 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 3580 TargetScope.addPrivate(PVD, 3581 [&InputInfo]() { return InputInfo.PointersArray; }); 3582 TargetScope.addPrivate(SVD, 3583 [&InputInfo]() { return InputInfo.SizesArray; }); 3584 } 3585 (void)TargetScope.Privatize(); 3586 // Build list of dependences. 3587 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3588 for (const Expr *IRef : C->varlists()) 3589 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3590 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 3591 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 3592 // Set proper addresses for generated private copies. 3593 OMPPrivateScope Scope(CGF); 3594 if (!Data.FirstprivateVars.empty()) { 3595 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3596 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3597 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3598 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3599 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3600 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3601 CS->getCapturedDecl()->getParam(PrivatesParam))); 3602 // Map privates. 3603 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3604 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3605 CallArgs.push_back(PrivatesPtr); 3606 for (const Expr *E : Data.FirstprivateVars) { 3607 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3608 Address PrivatePtr = 3609 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3610 ".firstpriv.ptr.addr"); 3611 PrivatePtrs.emplace_back(VD, PrivatePtr); 3612 CallArgs.push_back(PrivatePtr.getPointer()); 3613 } 3614 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3615 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3616 for (const auto &Pair : PrivatePtrs) { 3617 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3618 CGF.getContext().getDeclAlign(Pair.first)); 3619 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3620 } 3621 } 3622 // Privatize all private variables except for in_reduction items. 3623 (void)Scope.Privatize(); 3624 if (InputInfo.NumberOfTargetItems > 0) { 3625 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 3626 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 3627 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 3628 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 3629 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 3630 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 3631 } 3632 3633 Action.Enter(CGF); 3634 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 3635 BodyGen(CGF); 3636 }; 3637 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3638 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 3639 Data.NumberOfParts); 3640 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 3641 IntegerLiteral IfCond(getContext(), TrueOrFalse, 3642 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3643 SourceLocation()); 3644 3645 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 3646 SharedsTy, CapturedStruct, &IfCond, Data); 3647 } 3648 3649 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 3650 // Emit outlined function for task construct. 3651 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3652 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3653 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3654 const Expr *IfCond = nullptr; 3655 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 3656 if (C->getNameModifier() == OMPD_unknown || 3657 C->getNameModifier() == OMPD_task) { 3658 IfCond = C->getCondition(); 3659 break; 3660 } 3661 } 3662 3663 OMPTaskDataTy Data; 3664 // Check if we should emit tied or untied task. 3665 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 3666 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 3667 CGF.EmitStmt(CS->getCapturedStmt()); 3668 }; 3669 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 3670 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 3671 const OMPTaskDataTy &Data) { 3672 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 3673 SharedsTy, CapturedStruct, IfCond, 3674 Data); 3675 }; 3676 auto LPCRegion = 3677 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3678 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 3679 } 3680 3681 void CodeGenFunction::EmitOMPTaskyieldDirective( 3682 const OMPTaskyieldDirective &S) { 3683 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 3684 } 3685 3686 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 3687 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 3688 } 3689 3690 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 3691 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 3692 } 3693 3694 void CodeGenFunction::EmitOMPTaskgroupDirective( 3695 const OMPTaskgroupDirective &S) { 3696 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3697 Action.Enter(CGF); 3698 if (const Expr *E = S.getReductionRef()) { 3699 SmallVector<const Expr *, 4> LHSs; 3700 SmallVector<const Expr *, 4> RHSs; 3701 OMPTaskDataTy Data; 3702 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 3703 auto IPriv = C->privates().begin(); 3704 auto IRed = C->reduction_ops().begin(); 3705 auto ILHS = C->lhs_exprs().begin(); 3706 auto IRHS = C->rhs_exprs().begin(); 3707 for (const Expr *Ref : C->varlists()) { 3708 Data.ReductionVars.emplace_back(Ref); 3709 Data.ReductionCopies.emplace_back(*IPriv); 3710 Data.ReductionOps.emplace_back(*IRed); 3711 LHSs.emplace_back(*ILHS); 3712 RHSs.emplace_back(*IRHS); 3713 std::advance(IPriv, 1); 3714 std::advance(IRed, 1); 3715 std::advance(ILHS, 1); 3716 std::advance(IRHS, 1); 3717 } 3718 } 3719 llvm::Value *ReductionDesc = 3720 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 3721 LHSs, RHSs, Data); 3722 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3723 CGF.EmitVarDecl(*VD); 3724 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 3725 /*Volatile=*/false, E->getType()); 3726 } 3727 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3728 }; 3729 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3730 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 3731 } 3732 3733 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 3734 CGM.getOpenMPRuntime().emitFlush( 3735 *this, 3736 [&S]() -> ArrayRef<const Expr *> { 3737 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 3738 return llvm::makeArrayRef(FlushClause->varlist_begin(), 3739 FlushClause->varlist_end()); 3740 return llvm::None; 3741 }(), 3742 S.getBeginLoc()); 3743 } 3744 3745 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 3746 const CodeGenLoopTy &CodeGenLoop, 3747 Expr *IncExpr) { 3748 // Emit the loop iteration variable. 3749 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3750 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3751 EmitVarDecl(*IVDecl); 3752 3753 // Emit the iterations count variable. 3754 // If it is not a variable, Sema decided to calculate iterations count on each 3755 // iteration (e.g., it is foldable into a constant). 3756 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3757 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3758 // Emit calculation of the iterations count. 3759 EmitIgnoredExpr(S.getCalcLastIteration()); 3760 } 3761 3762 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3763 3764 bool HasLastprivateClause = false; 3765 // Check pre-condition. 3766 { 3767 OMPLoopScope PreInitScope(*this, S); 3768 // Skip the entire loop if we don't meet the precondition. 3769 // If the condition constant folds and can be elided, avoid emitting the 3770 // whole loop. 3771 bool CondConstant; 3772 llvm::BasicBlock *ContBlock = nullptr; 3773 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3774 if (!CondConstant) 3775 return; 3776 } else { 3777 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3778 ContBlock = createBasicBlock("omp.precond.end"); 3779 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3780 getProfileCount(&S)); 3781 EmitBlock(ThenBlock); 3782 incrementProfileCounter(&S); 3783 } 3784 3785 emitAlignedClause(*this, S); 3786 // Emit 'then' code. 3787 { 3788 // Emit helper vars inits. 3789 3790 LValue LB = EmitOMPHelperVar( 3791 *this, cast<DeclRefExpr>( 3792 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3793 ? S.getCombinedLowerBoundVariable() 3794 : S.getLowerBoundVariable()))); 3795 LValue UB = EmitOMPHelperVar( 3796 *this, cast<DeclRefExpr>( 3797 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3798 ? S.getCombinedUpperBoundVariable() 3799 : S.getUpperBoundVariable()))); 3800 LValue ST = 3801 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3802 LValue IL = 3803 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3804 3805 OMPPrivateScope LoopScope(*this); 3806 if (EmitOMPFirstprivateClause(S, LoopScope)) { 3807 // Emit implicit barrier to synchronize threads and avoid data races 3808 // on initialization of firstprivate variables and post-update of 3809 // lastprivate variables. 3810 CGM.getOpenMPRuntime().emitBarrierCall( 3811 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3812 /*ForceSimpleCall=*/true); 3813 } 3814 EmitOMPPrivateClause(S, LoopScope); 3815 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 3816 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3817 !isOpenMPTeamsDirective(S.getDirectiveKind())) 3818 EmitOMPReductionClauseInit(S, LoopScope); 3819 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3820 EmitOMPPrivateLoopCounters(S, LoopScope); 3821 (void)LoopScope.Privatize(); 3822 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3823 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3824 3825 // Detect the distribute schedule kind and chunk. 3826 llvm::Value *Chunk = nullptr; 3827 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 3828 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 3829 ScheduleKind = C->getDistScheduleKind(); 3830 if (const Expr *Ch = C->getChunkSize()) { 3831 Chunk = EmitScalarExpr(Ch); 3832 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 3833 S.getIterationVariable()->getType(), 3834 S.getBeginLoc()); 3835 } 3836 } else { 3837 // Default behaviour for dist_schedule clause. 3838 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 3839 *this, S, ScheduleKind, Chunk); 3840 } 3841 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3842 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3843 3844 // OpenMP [2.10.8, distribute Construct, Description] 3845 // If dist_schedule is specified, kind must be static. If specified, 3846 // iterations are divided into chunks of size chunk_size, chunks are 3847 // assigned to the teams of the league in a round-robin fashion in the 3848 // order of the team number. When no chunk_size is specified, the 3849 // iteration space is divided into chunks that are approximately equal 3850 // in size, and at most one chunk is distributed to each team of the 3851 // league. The size of the chunks is unspecified in this case. 3852 bool StaticChunked = RT.isStaticChunked( 3853 ScheduleKind, /* Chunked */ Chunk != nullptr) && 3854 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3855 if (RT.isStaticNonchunked(ScheduleKind, 3856 /* Chunked */ Chunk != nullptr) || 3857 StaticChunked) { 3858 CGOpenMPRuntime::StaticRTInput StaticInit( 3859 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 3860 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3861 StaticChunked ? Chunk : nullptr); 3862 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 3863 StaticInit); 3864 JumpDest LoopExit = 3865 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3866 // UB = min(UB, GlobalUB); 3867 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3868 ? S.getCombinedEnsureUpperBound() 3869 : S.getEnsureUpperBound()); 3870 // IV = LB; 3871 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3872 ? S.getCombinedInit() 3873 : S.getInit()); 3874 3875 const Expr *Cond = 3876 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3877 ? S.getCombinedCond() 3878 : S.getCond(); 3879 3880 if (StaticChunked) 3881 Cond = S.getCombinedDistCond(); 3882 3883 // For static unchunked schedules generate: 3884 // 3885 // 1. For distribute alone, codegen 3886 // while (idx <= UB) { 3887 // BODY; 3888 // ++idx; 3889 // } 3890 // 3891 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 3892 // while (idx <= UB) { 3893 // <CodeGen rest of pragma>(LB, UB); 3894 // idx += ST; 3895 // } 3896 // 3897 // For static chunk one schedule generate: 3898 // 3899 // while (IV <= GlobalUB) { 3900 // <CodeGen rest of pragma>(LB, UB); 3901 // LB += ST; 3902 // UB += ST; 3903 // UB = min(UB, GlobalUB); 3904 // IV = LB; 3905 // } 3906 // 3907 emitCommonSimdLoop( 3908 *this, S, 3909 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3910 if (isOpenMPSimdDirective(S.getDirectiveKind())) 3911 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 3912 }, 3913 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 3914 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 3915 CGF.EmitOMPInnerLoop( 3916 S, LoopScope.requiresCleanups(), Cond, IncExpr, 3917 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 3918 CodeGenLoop(CGF, S, LoopExit); 3919 }, 3920 [&S, StaticChunked](CodeGenFunction &CGF) { 3921 if (StaticChunked) { 3922 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 3923 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 3924 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 3925 CGF.EmitIgnoredExpr(S.getCombinedInit()); 3926 } 3927 }); 3928 }); 3929 EmitBlock(LoopExit.getBlock()); 3930 // Tell the runtime we are done. 3931 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 3932 } else { 3933 // Emit the outer loop, which requests its work chunk [LB..UB] from 3934 // runtime and runs the inner loop to process it. 3935 const OMPLoopArguments LoopArguments = { 3936 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3937 IL.getAddress(*this), Chunk}; 3938 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 3939 CodeGenLoop); 3940 } 3941 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3942 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3943 return CGF.Builder.CreateIsNotNull( 3944 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3945 }); 3946 } 3947 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 3948 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3949 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 3950 EmitOMPReductionClauseFinal(S, OMPD_simd); 3951 // Emit post-update of the reduction variables if IsLastIter != 0. 3952 emitPostUpdateForReductionClause( 3953 *this, S, [IL, &S](CodeGenFunction &CGF) { 3954 return CGF.Builder.CreateIsNotNull( 3955 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3956 }); 3957 } 3958 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3959 if (HasLastprivateClause) { 3960 EmitOMPLastprivateClauseFinal( 3961 S, /*NoFinals=*/false, 3962 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3963 } 3964 } 3965 3966 // We're now done with the loop, so jump to the continuation block. 3967 if (ContBlock) { 3968 EmitBranch(ContBlock); 3969 EmitBlock(ContBlock, true); 3970 } 3971 } 3972 } 3973 3974 void CodeGenFunction::EmitOMPDistributeDirective( 3975 const OMPDistributeDirective &S) { 3976 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3977 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3978 }; 3979 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3980 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 3981 } 3982 3983 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 3984 const CapturedStmt *S, 3985 SourceLocation Loc) { 3986 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 3987 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 3988 CGF.CapturedStmtInfo = &CapStmtInfo; 3989 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 3990 Fn->setDoesNotRecurse(); 3991 return Fn; 3992 } 3993 3994 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 3995 if (S.hasClausesOfKind<OMPDependClause>()) { 3996 assert(!S.getAssociatedStmt() && 3997 "No associated statement must be in ordered depend construct."); 3998 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 3999 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4000 return; 4001 } 4002 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4003 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4004 PrePostActionTy &Action) { 4005 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4006 if (C) { 4007 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4008 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4009 llvm::Function *OutlinedFn = 4010 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4011 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4012 OutlinedFn, CapturedVars); 4013 } else { 4014 Action.Enter(CGF); 4015 CGF.EmitStmt(CS->getCapturedStmt()); 4016 } 4017 }; 4018 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4019 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4020 } 4021 4022 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4023 QualType SrcType, QualType DestType, 4024 SourceLocation Loc) { 4025 assert(CGF.hasScalarEvaluationKind(DestType) && 4026 "DestType must have scalar evaluation kind."); 4027 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4028 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4029 DestType, Loc) 4030 : CGF.EmitComplexToScalarConversion( 4031 Val.getComplexVal(), SrcType, DestType, Loc); 4032 } 4033 4034 static CodeGenFunction::ComplexPairTy 4035 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4036 QualType DestType, SourceLocation Loc) { 4037 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4038 "DestType must have complex evaluation kind."); 4039 CodeGenFunction::ComplexPairTy ComplexVal; 4040 if (Val.isScalar()) { 4041 // Convert the input element to the element type of the complex. 4042 QualType DestElementType = 4043 DestType->castAs<ComplexType>()->getElementType(); 4044 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4045 Val.getScalarVal(), SrcType, DestElementType, Loc); 4046 ComplexVal = CodeGenFunction::ComplexPairTy( 4047 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4048 } else { 4049 assert(Val.isComplex() && "Must be a scalar or complex."); 4050 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4051 QualType DestElementType = 4052 DestType->castAs<ComplexType>()->getElementType(); 4053 ComplexVal.first = CGF.EmitScalarConversion( 4054 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4055 ComplexVal.second = CGF.EmitScalarConversion( 4056 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4057 } 4058 return ComplexVal; 4059 } 4060 4061 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 4062 LValue LVal, RValue RVal) { 4063 if (LVal.isGlobalReg()) { 4064 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4065 } else { 4066 CGF.EmitAtomicStore(RVal, LVal, 4067 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 4068 : llvm::AtomicOrdering::Monotonic, 4069 LVal.isVolatile(), /*isInit=*/false); 4070 } 4071 } 4072 4073 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4074 QualType RValTy, SourceLocation Loc) { 4075 switch (getEvaluationKind(LVal.getType())) { 4076 case TEK_Scalar: 4077 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4078 *this, RVal, RValTy, LVal.getType(), Loc)), 4079 LVal); 4080 break; 4081 case TEK_Complex: 4082 EmitStoreOfComplex( 4083 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4084 /*isInit=*/false); 4085 break; 4086 case TEK_Aggregate: 4087 llvm_unreachable("Must be a scalar or complex."); 4088 } 4089 } 4090 4091 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 4092 const Expr *X, const Expr *V, 4093 SourceLocation Loc) { 4094 // v = x; 4095 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4096 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4097 LValue XLValue = CGF.EmitLValue(X); 4098 LValue VLValue = CGF.EmitLValue(V); 4099 RValue Res = XLValue.isGlobalReg() 4100 ? CGF.EmitLoadOfLValue(XLValue, Loc) 4101 : CGF.EmitAtomicLoad( 4102 XLValue, Loc, 4103 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 4104 : llvm::AtomicOrdering::Monotonic, 4105 XLValue.isVolatile()); 4106 // OpenMP, 2.12.6, atomic Construct 4107 // Any atomic construct with a seq_cst clause forces the atomically 4108 // performed operation to include an implicit flush operation without a 4109 // list. 4110 if (IsSeqCst) 4111 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 4112 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4113 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4114 } 4115 4116 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 4117 const Expr *X, const Expr *E, 4118 SourceLocation Loc) { 4119 // x = expr; 4120 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4121 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4122 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4123 // OpenMP, 2.12.6, atomic Construct 4124 // Any atomic construct with a seq_cst clause forces the atomically 4125 // performed operation to include an implicit flush operation without a 4126 // list. 4127 if (IsSeqCst) 4128 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 4129 } 4130 4131 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4132 RValue Update, 4133 BinaryOperatorKind BO, 4134 llvm::AtomicOrdering AO, 4135 bool IsXLHSInRHSPart) { 4136 ASTContext &Context = CGF.getContext(); 4137 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4138 // expression is simple and atomic is allowed for the given type for the 4139 // target platform. 4140 if (BO == BO_Comma || !Update.isScalar() || 4141 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4142 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4143 (Update.getScalarVal()->getType() != 4144 X.getAddress(CGF).getElementType())) || 4145 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4146 !Context.getTargetInfo().hasBuiltinAtomic( 4147 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4148 return std::make_pair(false, RValue::get(nullptr)); 4149 4150 llvm::AtomicRMWInst::BinOp RMWOp; 4151 switch (BO) { 4152 case BO_Add: 4153 RMWOp = llvm::AtomicRMWInst::Add; 4154 break; 4155 case BO_Sub: 4156 if (!IsXLHSInRHSPart) 4157 return std::make_pair(false, RValue::get(nullptr)); 4158 RMWOp = llvm::AtomicRMWInst::Sub; 4159 break; 4160 case BO_And: 4161 RMWOp = llvm::AtomicRMWInst::And; 4162 break; 4163 case BO_Or: 4164 RMWOp = llvm::AtomicRMWInst::Or; 4165 break; 4166 case BO_Xor: 4167 RMWOp = llvm::AtomicRMWInst::Xor; 4168 break; 4169 case BO_LT: 4170 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4171 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4172 : llvm::AtomicRMWInst::Max) 4173 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4174 : llvm::AtomicRMWInst::UMax); 4175 break; 4176 case BO_GT: 4177 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4178 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4179 : llvm::AtomicRMWInst::Min) 4180 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4181 : llvm::AtomicRMWInst::UMin); 4182 break; 4183 case BO_Assign: 4184 RMWOp = llvm::AtomicRMWInst::Xchg; 4185 break; 4186 case BO_Mul: 4187 case BO_Div: 4188 case BO_Rem: 4189 case BO_Shl: 4190 case BO_Shr: 4191 case BO_LAnd: 4192 case BO_LOr: 4193 return std::make_pair(false, RValue::get(nullptr)); 4194 case BO_PtrMemD: 4195 case BO_PtrMemI: 4196 case BO_LE: 4197 case BO_GE: 4198 case BO_EQ: 4199 case BO_NE: 4200 case BO_Cmp: 4201 case BO_AddAssign: 4202 case BO_SubAssign: 4203 case BO_AndAssign: 4204 case BO_OrAssign: 4205 case BO_XorAssign: 4206 case BO_MulAssign: 4207 case BO_DivAssign: 4208 case BO_RemAssign: 4209 case BO_ShlAssign: 4210 case BO_ShrAssign: 4211 case BO_Comma: 4212 llvm_unreachable("Unsupported atomic update operation"); 4213 } 4214 llvm::Value *UpdateVal = Update.getScalarVal(); 4215 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4216 UpdateVal = CGF.Builder.CreateIntCast( 4217 IC, X.getAddress(CGF).getElementType(), 4218 X.getType()->hasSignedIntegerRepresentation()); 4219 } 4220 llvm::Value *Res = 4221 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4222 return std::make_pair(true, RValue::get(Res)); 4223 } 4224 4225 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4226 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4227 llvm::AtomicOrdering AO, SourceLocation Loc, 4228 const llvm::function_ref<RValue(RValue)> CommonGen) { 4229 // Update expressions are allowed to have the following forms: 4230 // x binop= expr; -> xrval + expr; 4231 // x++, ++x -> xrval + 1; 4232 // x--, --x -> xrval - 1; 4233 // x = x binop expr; -> xrval binop expr 4234 // x = expr Op x; - > expr binop xrval; 4235 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4236 if (!Res.first) { 4237 if (X.isGlobalReg()) { 4238 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4239 // 'xrval'. 4240 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4241 } else { 4242 // Perform compare-and-swap procedure. 4243 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4244 } 4245 } 4246 return Res; 4247 } 4248 4249 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 4250 const Expr *X, const Expr *E, 4251 const Expr *UE, bool IsXLHSInRHSPart, 4252 SourceLocation Loc) { 4253 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4254 "Update expr in 'atomic update' must be a binary operator."); 4255 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4256 // Update expressions are allowed to have the following forms: 4257 // x binop= expr; -> xrval + expr; 4258 // x++, ++x -> xrval + 1; 4259 // x--, --x -> xrval - 1; 4260 // x = x binop expr; -> xrval binop expr 4261 // x = expr Op x; - > expr binop xrval; 4262 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4263 LValue XLValue = CGF.EmitLValue(X); 4264 RValue ExprRValue = CGF.EmitAnyExpr(E); 4265 llvm::AtomicOrdering AO = IsSeqCst 4266 ? llvm::AtomicOrdering::SequentiallyConsistent 4267 : llvm::AtomicOrdering::Monotonic; 4268 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4269 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4270 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4271 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4272 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4273 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4274 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4275 return CGF.EmitAnyExpr(UE); 4276 }; 4277 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4278 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4279 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4280 // OpenMP, 2.12.6, atomic Construct 4281 // Any atomic construct with a seq_cst clause forces the atomically 4282 // performed operation to include an implicit flush operation without a 4283 // list. 4284 if (IsSeqCst) 4285 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 4286 } 4287 4288 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 4289 QualType SourceType, QualType ResType, 4290 SourceLocation Loc) { 4291 switch (CGF.getEvaluationKind(ResType)) { 4292 case TEK_Scalar: 4293 return RValue::get( 4294 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 4295 case TEK_Complex: { 4296 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 4297 return RValue::getComplex(Res.first, Res.second); 4298 } 4299 case TEK_Aggregate: 4300 break; 4301 } 4302 llvm_unreachable("Must be a scalar or complex."); 4303 } 4304 4305 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 4306 bool IsPostfixUpdate, const Expr *V, 4307 const Expr *X, const Expr *E, 4308 const Expr *UE, bool IsXLHSInRHSPart, 4309 SourceLocation Loc) { 4310 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 4311 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 4312 RValue NewVVal; 4313 LValue VLValue = CGF.EmitLValue(V); 4314 LValue XLValue = CGF.EmitLValue(X); 4315 RValue ExprRValue = CGF.EmitAnyExpr(E); 4316 llvm::AtomicOrdering AO = IsSeqCst 4317 ? llvm::AtomicOrdering::SequentiallyConsistent 4318 : llvm::AtomicOrdering::Monotonic; 4319 QualType NewVValType; 4320 if (UE) { 4321 // 'x' is updated with some additional value. 4322 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4323 "Update expr in 'atomic capture' must be a binary operator."); 4324 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4325 // Update expressions are allowed to have the following forms: 4326 // x binop= expr; -> xrval + expr; 4327 // x++, ++x -> xrval + 1; 4328 // x--, --x -> xrval - 1; 4329 // x = x binop expr; -> xrval binop expr 4330 // x = expr Op x; - > expr binop xrval; 4331 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4332 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4333 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4334 NewVValType = XRValExpr->getType(); 4335 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4336 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 4337 IsPostfixUpdate](RValue XRValue) { 4338 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4339 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4340 RValue Res = CGF.EmitAnyExpr(UE); 4341 NewVVal = IsPostfixUpdate ? XRValue : Res; 4342 return Res; 4343 }; 4344 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4345 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4346 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4347 if (Res.first) { 4348 // 'atomicrmw' instruction was generated. 4349 if (IsPostfixUpdate) { 4350 // Use old value from 'atomicrmw'. 4351 NewVVal = Res.second; 4352 } else { 4353 // 'atomicrmw' does not provide new value, so evaluate it using old 4354 // value of 'x'. 4355 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4356 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 4357 NewVVal = CGF.EmitAnyExpr(UE); 4358 } 4359 } 4360 } else { 4361 // 'x' is simply rewritten with some 'expr'. 4362 NewVValType = X->getType().getNonReferenceType(); 4363 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 4364 X->getType().getNonReferenceType(), Loc); 4365 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 4366 NewVVal = XRValue; 4367 return ExprRValue; 4368 }; 4369 // Try to perform atomicrmw xchg, otherwise simple exchange. 4370 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4371 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 4372 Loc, Gen); 4373 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4374 if (Res.first) { 4375 // 'atomicrmw' instruction was generated. 4376 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 4377 } 4378 } 4379 // Emit post-update store to 'v' of old/new 'x' value. 4380 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 4381 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4382 // OpenMP, 2.12.6, atomic Construct 4383 // Any atomic construct with a seq_cst clause forces the atomically 4384 // performed operation to include an implicit flush operation without a 4385 // list. 4386 if (IsSeqCst) 4387 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 4388 } 4389 4390 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 4391 bool IsSeqCst, bool IsPostfixUpdate, 4392 const Expr *X, const Expr *V, const Expr *E, 4393 const Expr *UE, bool IsXLHSInRHSPart, 4394 SourceLocation Loc) { 4395 switch (Kind) { 4396 case OMPC_read: 4397 emitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 4398 break; 4399 case OMPC_write: 4400 emitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 4401 break; 4402 case OMPC_unknown: 4403 case OMPC_update: 4404 emitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 4405 break; 4406 case OMPC_capture: 4407 emitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 4408 IsXLHSInRHSPart, Loc); 4409 break; 4410 case OMPC_if: 4411 case OMPC_final: 4412 case OMPC_num_threads: 4413 case OMPC_private: 4414 case OMPC_firstprivate: 4415 case OMPC_lastprivate: 4416 case OMPC_reduction: 4417 case OMPC_task_reduction: 4418 case OMPC_in_reduction: 4419 case OMPC_safelen: 4420 case OMPC_simdlen: 4421 case OMPC_allocator: 4422 case OMPC_allocate: 4423 case OMPC_collapse: 4424 case OMPC_default: 4425 case OMPC_seq_cst: 4426 case OMPC_shared: 4427 case OMPC_linear: 4428 case OMPC_aligned: 4429 case OMPC_copyin: 4430 case OMPC_copyprivate: 4431 case OMPC_flush: 4432 case OMPC_proc_bind: 4433 case OMPC_schedule: 4434 case OMPC_ordered: 4435 case OMPC_nowait: 4436 case OMPC_untied: 4437 case OMPC_threadprivate: 4438 case OMPC_depend: 4439 case OMPC_mergeable: 4440 case OMPC_device: 4441 case OMPC_threads: 4442 case OMPC_simd: 4443 case OMPC_map: 4444 case OMPC_num_teams: 4445 case OMPC_thread_limit: 4446 case OMPC_priority: 4447 case OMPC_grainsize: 4448 case OMPC_nogroup: 4449 case OMPC_num_tasks: 4450 case OMPC_hint: 4451 case OMPC_dist_schedule: 4452 case OMPC_defaultmap: 4453 case OMPC_uniform: 4454 case OMPC_to: 4455 case OMPC_from: 4456 case OMPC_use_device_ptr: 4457 case OMPC_is_device_ptr: 4458 case OMPC_unified_address: 4459 case OMPC_unified_shared_memory: 4460 case OMPC_reverse_offload: 4461 case OMPC_dynamic_allocators: 4462 case OMPC_atomic_default_mem_order: 4463 case OMPC_device_type: 4464 case OMPC_match: 4465 case OMPC_nontemporal: 4466 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 4467 } 4468 } 4469 4470 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 4471 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>(); 4472 OpenMPClauseKind Kind = OMPC_unknown; 4473 for (const OMPClause *C : S.clauses()) { 4474 // Find first clause (skip seq_cst clause, if it is first). 4475 if (C->getClauseKind() != OMPC_seq_cst) { 4476 Kind = C->getClauseKind(); 4477 break; 4478 } 4479 } 4480 4481 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 4482 if (const auto *FE = dyn_cast<FullExpr>(CS)) 4483 enterFullExpression(FE); 4484 // Processing for statements under 'atomic capture'. 4485 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 4486 for (const Stmt *C : Compound->body()) { 4487 if (const auto *FE = dyn_cast<FullExpr>(C)) 4488 enterFullExpression(FE); 4489 } 4490 } 4491 4492 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF, 4493 PrePostActionTy &) { 4494 CGF.EmitStopPoint(CS); 4495 emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 4496 S.getV(), S.getExpr(), S.getUpdateExpr(), 4497 S.isXLHSInRHSPart(), S.getBeginLoc()); 4498 }; 4499 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4500 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 4501 } 4502 4503 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 4504 const OMPExecutableDirective &S, 4505 const RegionCodeGenTy &CodeGen) { 4506 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 4507 CodeGenModule &CGM = CGF.CGM; 4508 4509 // On device emit this construct as inlined code. 4510 if (CGM.getLangOpts().OpenMPIsDevice) { 4511 OMPLexicalScope Scope(CGF, S, OMPD_target); 4512 CGM.getOpenMPRuntime().emitInlinedDirective( 4513 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4514 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4515 }); 4516 return; 4517 } 4518 4519 auto LPCRegion = 4520 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 4521 llvm::Function *Fn = nullptr; 4522 llvm::Constant *FnID = nullptr; 4523 4524 const Expr *IfCond = nullptr; 4525 // Check for the at most one if clause associated with the target region. 4526 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4527 if (C->getNameModifier() == OMPD_unknown || 4528 C->getNameModifier() == OMPD_target) { 4529 IfCond = C->getCondition(); 4530 break; 4531 } 4532 } 4533 4534 // Check if we have any device clause associated with the directive. 4535 const Expr *Device = nullptr; 4536 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4537 Device = C->getDevice(); 4538 4539 // Check if we have an if clause whose conditional always evaluates to false 4540 // or if we do not have any targets specified. If so the target region is not 4541 // an offload entry point. 4542 bool IsOffloadEntry = true; 4543 if (IfCond) { 4544 bool Val; 4545 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 4546 IsOffloadEntry = false; 4547 } 4548 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4549 IsOffloadEntry = false; 4550 4551 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 4552 StringRef ParentName; 4553 // In case we have Ctors/Dtors we use the complete type variant to produce 4554 // the mangling of the device outlined kernel. 4555 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 4556 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 4557 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 4558 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 4559 else 4560 ParentName = 4561 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 4562 4563 // Emit target region as a standalone region. 4564 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 4565 IsOffloadEntry, CodeGen); 4566 OMPLexicalScope Scope(CGF, S, OMPD_task); 4567 auto &&SizeEmitter = 4568 [IsOffloadEntry](CodeGenFunction &CGF, 4569 const OMPLoopDirective &D) -> llvm::Value * { 4570 if (IsOffloadEntry) { 4571 OMPLoopScope(CGF, D); 4572 // Emit calculation of the iterations count. 4573 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 4574 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 4575 /*isSigned=*/false); 4576 return NumIterations; 4577 } 4578 return nullptr; 4579 }; 4580 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 4581 SizeEmitter); 4582 } 4583 4584 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 4585 PrePostActionTy &Action) { 4586 Action.Enter(CGF); 4587 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4588 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4589 CGF.EmitOMPPrivateClause(S, PrivateScope); 4590 (void)PrivateScope.Privatize(); 4591 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4592 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4593 4594 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 4595 } 4596 4597 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 4598 StringRef ParentName, 4599 const OMPTargetDirective &S) { 4600 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4601 emitTargetRegion(CGF, S, Action); 4602 }; 4603 llvm::Function *Fn; 4604 llvm::Constant *Addr; 4605 // Emit target region as a standalone region. 4606 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4607 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4608 assert(Fn && Addr && "Target device function emission failed."); 4609 } 4610 4611 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 4612 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4613 emitTargetRegion(CGF, S, Action); 4614 }; 4615 emitCommonOMPTargetDirective(*this, S, CodeGen); 4616 } 4617 4618 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 4619 const OMPExecutableDirective &S, 4620 OpenMPDirectiveKind InnermostKind, 4621 const RegionCodeGenTy &CodeGen) { 4622 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 4623 llvm::Function *OutlinedFn = 4624 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 4625 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 4626 4627 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 4628 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 4629 if (NT || TL) { 4630 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 4631 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 4632 4633 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 4634 S.getBeginLoc()); 4635 } 4636 4637 OMPTeamsScope Scope(CGF, S); 4638 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4639 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4640 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 4641 CapturedVars); 4642 } 4643 4644 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 4645 // Emit teams region as a standalone region. 4646 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4647 Action.Enter(CGF); 4648 OMPPrivateScope PrivateScope(CGF); 4649 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4650 CGF.EmitOMPPrivateClause(S, PrivateScope); 4651 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4652 (void)PrivateScope.Privatize(); 4653 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 4654 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4655 }; 4656 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 4657 emitPostUpdateForReductionClause(*this, S, 4658 [](CodeGenFunction &) { return nullptr; }); 4659 } 4660 4661 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4662 const OMPTargetTeamsDirective &S) { 4663 auto *CS = S.getCapturedStmt(OMPD_teams); 4664 Action.Enter(CGF); 4665 // Emit teams region as a standalone region. 4666 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 4667 Action.Enter(CGF); 4668 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4669 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4670 CGF.EmitOMPPrivateClause(S, PrivateScope); 4671 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4672 (void)PrivateScope.Privatize(); 4673 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4674 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4675 CGF.EmitStmt(CS->getCapturedStmt()); 4676 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4677 }; 4678 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 4679 emitPostUpdateForReductionClause(CGF, S, 4680 [](CodeGenFunction &) { return nullptr; }); 4681 } 4682 4683 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 4684 CodeGenModule &CGM, StringRef ParentName, 4685 const OMPTargetTeamsDirective &S) { 4686 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4687 emitTargetTeamsRegion(CGF, Action, S); 4688 }; 4689 llvm::Function *Fn; 4690 llvm::Constant *Addr; 4691 // Emit target region as a standalone region. 4692 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4693 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4694 assert(Fn && Addr && "Target device function emission failed."); 4695 } 4696 4697 void CodeGenFunction::EmitOMPTargetTeamsDirective( 4698 const OMPTargetTeamsDirective &S) { 4699 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4700 emitTargetTeamsRegion(CGF, Action, S); 4701 }; 4702 emitCommonOMPTargetDirective(*this, S, CodeGen); 4703 } 4704 4705 static void 4706 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4707 const OMPTargetTeamsDistributeDirective &S) { 4708 Action.Enter(CGF); 4709 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4710 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4711 }; 4712 4713 // Emit teams region as a standalone region. 4714 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4715 PrePostActionTy &Action) { 4716 Action.Enter(CGF); 4717 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4718 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4719 (void)PrivateScope.Privatize(); 4720 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4721 CodeGenDistribute); 4722 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4723 }; 4724 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 4725 emitPostUpdateForReductionClause(CGF, S, 4726 [](CodeGenFunction &) { return nullptr; }); 4727 } 4728 4729 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 4730 CodeGenModule &CGM, StringRef ParentName, 4731 const OMPTargetTeamsDistributeDirective &S) { 4732 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4733 emitTargetTeamsDistributeRegion(CGF, Action, S); 4734 }; 4735 llvm::Function *Fn; 4736 llvm::Constant *Addr; 4737 // Emit target region as a standalone region. 4738 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4739 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4740 assert(Fn && Addr && "Target device function emission failed."); 4741 } 4742 4743 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 4744 const OMPTargetTeamsDistributeDirective &S) { 4745 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4746 emitTargetTeamsDistributeRegion(CGF, Action, S); 4747 }; 4748 emitCommonOMPTargetDirective(*this, S, CodeGen); 4749 } 4750 4751 static void emitTargetTeamsDistributeSimdRegion( 4752 CodeGenFunction &CGF, PrePostActionTy &Action, 4753 const OMPTargetTeamsDistributeSimdDirective &S) { 4754 Action.Enter(CGF); 4755 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4756 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4757 }; 4758 4759 // Emit teams region as a standalone region. 4760 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4761 PrePostActionTy &Action) { 4762 Action.Enter(CGF); 4763 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4764 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4765 (void)PrivateScope.Privatize(); 4766 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4767 CodeGenDistribute); 4768 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4769 }; 4770 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 4771 emitPostUpdateForReductionClause(CGF, S, 4772 [](CodeGenFunction &) { return nullptr; }); 4773 } 4774 4775 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 4776 CodeGenModule &CGM, StringRef ParentName, 4777 const OMPTargetTeamsDistributeSimdDirective &S) { 4778 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4779 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4780 }; 4781 llvm::Function *Fn; 4782 llvm::Constant *Addr; 4783 // Emit target region as a standalone region. 4784 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4785 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4786 assert(Fn && Addr && "Target device function emission failed."); 4787 } 4788 4789 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 4790 const OMPTargetTeamsDistributeSimdDirective &S) { 4791 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4792 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4793 }; 4794 emitCommonOMPTargetDirective(*this, S, CodeGen); 4795 } 4796 4797 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 4798 const OMPTeamsDistributeDirective &S) { 4799 4800 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4801 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4802 }; 4803 4804 // Emit teams region as a standalone region. 4805 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4806 PrePostActionTy &Action) { 4807 Action.Enter(CGF); 4808 OMPPrivateScope PrivateScope(CGF); 4809 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4810 (void)PrivateScope.Privatize(); 4811 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4812 CodeGenDistribute); 4813 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4814 }; 4815 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 4816 emitPostUpdateForReductionClause(*this, S, 4817 [](CodeGenFunction &) { return nullptr; }); 4818 } 4819 4820 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 4821 const OMPTeamsDistributeSimdDirective &S) { 4822 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4823 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4824 }; 4825 4826 // Emit teams region as a standalone region. 4827 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4828 PrePostActionTy &Action) { 4829 Action.Enter(CGF); 4830 OMPPrivateScope PrivateScope(CGF); 4831 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4832 (void)PrivateScope.Privatize(); 4833 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 4834 CodeGenDistribute); 4835 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4836 }; 4837 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 4838 emitPostUpdateForReductionClause(*this, S, 4839 [](CodeGenFunction &) { return nullptr; }); 4840 } 4841 4842 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 4843 const OMPTeamsDistributeParallelForDirective &S) { 4844 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4845 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 4846 S.getDistInc()); 4847 }; 4848 4849 // Emit teams region as a standalone region. 4850 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4851 PrePostActionTy &Action) { 4852 Action.Enter(CGF); 4853 OMPPrivateScope PrivateScope(CGF); 4854 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4855 (void)PrivateScope.Privatize(); 4856 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4857 CodeGenDistribute); 4858 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4859 }; 4860 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 4861 emitPostUpdateForReductionClause(*this, S, 4862 [](CodeGenFunction &) { return nullptr; }); 4863 } 4864 4865 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 4866 const OMPTeamsDistributeParallelForSimdDirective &S) { 4867 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4868 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 4869 S.getDistInc()); 4870 }; 4871 4872 // Emit teams region as a standalone region. 4873 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4874 PrePostActionTy &Action) { 4875 Action.Enter(CGF); 4876 OMPPrivateScope PrivateScope(CGF); 4877 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4878 (void)PrivateScope.Privatize(); 4879 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 4880 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 4881 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4882 }; 4883 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 4884 CodeGen); 4885 emitPostUpdateForReductionClause(*this, S, 4886 [](CodeGenFunction &) { return nullptr; }); 4887 } 4888 4889 static void emitTargetTeamsDistributeParallelForRegion( 4890 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 4891 PrePostActionTy &Action) { 4892 Action.Enter(CGF); 4893 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4894 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 4895 S.getDistInc()); 4896 }; 4897 4898 // Emit teams region as a standalone region. 4899 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4900 PrePostActionTy &Action) { 4901 Action.Enter(CGF); 4902 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4903 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4904 (void)PrivateScope.Privatize(); 4905 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 4906 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 4907 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4908 }; 4909 4910 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 4911 CodeGenTeams); 4912 emitPostUpdateForReductionClause(CGF, S, 4913 [](CodeGenFunction &) { return nullptr; }); 4914 } 4915 4916 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 4917 CodeGenModule &CGM, StringRef ParentName, 4918 const OMPTargetTeamsDistributeParallelForDirective &S) { 4919 // Emit SPMD target teams distribute parallel for region as a standalone 4920 // region. 4921 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4922 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 4923 }; 4924 llvm::Function *Fn; 4925 llvm::Constant *Addr; 4926 // Emit target region as a standalone region. 4927 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4928 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4929 assert(Fn && Addr && "Target device function emission failed."); 4930 } 4931 4932 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 4933 const OMPTargetTeamsDistributeParallelForDirective &S) { 4934 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4935 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 4936 }; 4937 emitCommonOMPTargetDirective(*this, S, CodeGen); 4938 } 4939 4940 static void emitTargetTeamsDistributeParallelForSimdRegion( 4941 CodeGenFunction &CGF, 4942 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 4943 PrePostActionTy &Action) { 4944 Action.Enter(CGF); 4945 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4946 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 4947 S.getDistInc()); 4948 }; 4949 4950 // Emit teams region as a standalone region. 4951 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4952 PrePostActionTy &Action) { 4953 Action.Enter(CGF); 4954 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4955 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4956 (void)PrivateScope.Privatize(); 4957 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 4958 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 4959 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4960 }; 4961 4962 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 4963 CodeGenTeams); 4964 emitPostUpdateForReductionClause(CGF, S, 4965 [](CodeGenFunction &) { return nullptr; }); 4966 } 4967 4968 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 4969 CodeGenModule &CGM, StringRef ParentName, 4970 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 4971 // Emit SPMD target teams distribute parallel for simd region as a standalone 4972 // region. 4973 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4974 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 4975 }; 4976 llvm::Function *Fn; 4977 llvm::Constant *Addr; 4978 // Emit target region as a standalone region. 4979 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4980 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4981 assert(Fn && Addr && "Target device function emission failed."); 4982 } 4983 4984 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 4985 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 4986 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4987 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 4988 }; 4989 emitCommonOMPTargetDirective(*this, S, CodeGen); 4990 } 4991 4992 void CodeGenFunction::EmitOMPCancellationPointDirective( 4993 const OMPCancellationPointDirective &S) { 4994 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 4995 S.getCancelRegion()); 4996 } 4997 4998 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 4999 const Expr *IfCond = nullptr; 5000 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5001 if (C->getNameModifier() == OMPD_unknown || 5002 C->getNameModifier() == OMPD_cancel) { 5003 IfCond = C->getCondition(); 5004 break; 5005 } 5006 } 5007 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5008 // TODO: This check is necessary as we only generate `omp parallel` through 5009 // the OpenMPIRBuilder for now. 5010 if (S.getCancelRegion() == OMPD_parallel) { 5011 llvm::Value *IfCondition = nullptr; 5012 if (IfCond) 5013 IfCondition = EmitScalarExpr(IfCond, 5014 /*IgnoreResultAssign=*/true); 5015 return Builder.restoreIP( 5016 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5017 } 5018 } 5019 5020 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5021 S.getCancelRegion()); 5022 } 5023 5024 CodeGenFunction::JumpDest 5025 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5026 if (Kind == OMPD_parallel || Kind == OMPD_task || 5027 Kind == OMPD_target_parallel) 5028 return ReturnBlock; 5029 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5030 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5031 Kind == OMPD_distribute_parallel_for || 5032 Kind == OMPD_target_parallel_for || 5033 Kind == OMPD_teams_distribute_parallel_for || 5034 Kind == OMPD_target_teams_distribute_parallel_for); 5035 return OMPCancelStack.getExitBlock(); 5036 } 5037 5038 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5039 const OMPClause &NC, OMPPrivateScope &PrivateScope, 5040 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5041 const auto &C = cast<OMPUseDevicePtrClause>(NC); 5042 auto OrigVarIt = C.varlist_begin(); 5043 auto InitIt = C.inits().begin(); 5044 for (const Expr *PvtVarIt : C.private_copies()) { 5045 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5046 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5047 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5048 5049 // In order to identify the right initializer we need to match the 5050 // declaration used by the mapping logic. In some cases we may get 5051 // OMPCapturedExprDecl that refers to the original declaration. 5052 const ValueDecl *MatchingVD = OrigVD; 5053 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5054 // OMPCapturedExprDecl are used to privative fields of the current 5055 // structure. 5056 const auto *ME = cast<MemberExpr>(OED->getInit()); 5057 assert(isa<CXXThisExpr>(ME->getBase()) && 5058 "Base should be the current struct!"); 5059 MatchingVD = ME->getMemberDecl(); 5060 } 5061 5062 // If we don't have information about the current list item, move on to 5063 // the next one. 5064 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5065 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5066 continue; 5067 5068 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5069 InitAddrIt, InitVD, 5070 PvtVD]() { 5071 // Initialize the temporary initialization variable with the address we 5072 // get from the runtime library. We have to cast the source address 5073 // because it is always a void *. References are materialized in the 5074 // privatization scope, so the initialization here disregards the fact 5075 // the original variable is a reference. 5076 QualType AddrQTy = 5077 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5078 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5079 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5080 setAddrOfLocalVar(InitVD, InitAddr); 5081 5082 // Emit private declaration, it will be initialized by the value we 5083 // declaration we just added to the local declarations map. 5084 EmitDecl(*PvtVD); 5085 5086 // The initialization variables reached its purpose in the emission 5087 // of the previous declaration, so we don't need it anymore. 5088 LocalDeclMap.erase(InitVD); 5089 5090 // Return the address of the private variable. 5091 return GetAddrOfLocalVar(PvtVD); 5092 }); 5093 assert(IsRegistered && "firstprivate var already registered as private"); 5094 // Silence the warning about unused variable. 5095 (void)IsRegistered; 5096 5097 ++OrigVarIt; 5098 ++InitIt; 5099 } 5100 } 5101 5102 // Generate the instructions for '#pragma omp target data' directive. 5103 void CodeGenFunction::EmitOMPTargetDataDirective( 5104 const OMPTargetDataDirective &S) { 5105 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5106 5107 // Create a pre/post action to signal the privatization of the device pointer. 5108 // This action can be replaced by the OpenMP runtime code generation to 5109 // deactivate privatization. 5110 bool PrivatizeDevicePointers = false; 5111 class DevicePointerPrivActionTy : public PrePostActionTy { 5112 bool &PrivatizeDevicePointers; 5113 5114 public: 5115 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5116 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5117 void Enter(CodeGenFunction &CGF) override { 5118 PrivatizeDevicePointers = true; 5119 } 5120 }; 5121 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5122 5123 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5124 CodeGenFunction &CGF, PrePostActionTy &Action) { 5125 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5126 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5127 }; 5128 5129 // Codegen that selects whether to generate the privatization code or not. 5130 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5131 &InnermostCodeGen](CodeGenFunction &CGF, 5132 PrePostActionTy &Action) { 5133 RegionCodeGenTy RCG(InnermostCodeGen); 5134 PrivatizeDevicePointers = false; 5135 5136 // Call the pre-action to change the status of PrivatizeDevicePointers if 5137 // needed. 5138 Action.Enter(CGF); 5139 5140 if (PrivatizeDevicePointers) { 5141 OMPPrivateScope PrivateScope(CGF); 5142 // Emit all instances of the use_device_ptr clause. 5143 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5144 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5145 Info.CaptureDeviceAddrMap); 5146 (void)PrivateScope.Privatize(); 5147 RCG(CGF); 5148 } else { 5149 RCG(CGF); 5150 } 5151 }; 5152 5153 // Forward the provided action to the privatization codegen. 5154 RegionCodeGenTy PrivRCG(PrivCodeGen); 5155 PrivRCG.setAction(Action); 5156 5157 // Notwithstanding the body of the region is emitted as inlined directive, 5158 // we don't use an inline scope as changes in the references inside the 5159 // region are expected to be visible outside, so we do not privative them. 5160 OMPLexicalScope Scope(CGF, S); 5161 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5162 PrivRCG); 5163 }; 5164 5165 RegionCodeGenTy RCG(CodeGen); 5166 5167 // If we don't have target devices, don't bother emitting the data mapping 5168 // code. 5169 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 5170 RCG(*this); 5171 return; 5172 } 5173 5174 // Check if we have any if clause associated with the directive. 5175 const Expr *IfCond = nullptr; 5176 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5177 IfCond = C->getCondition(); 5178 5179 // Check if we have any device clause associated with the directive. 5180 const Expr *Device = nullptr; 5181 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5182 Device = C->getDevice(); 5183 5184 // Set the action to signal privatization of device pointers. 5185 RCG.setAction(PrivAction); 5186 5187 // Emit region code. 5188 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 5189 Info); 5190 } 5191 5192 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 5193 const OMPTargetEnterDataDirective &S) { 5194 // If we don't have target devices, don't bother emitting the data mapping 5195 // code. 5196 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5197 return; 5198 5199 // Check if we have any if clause associated with the directive. 5200 const Expr *IfCond = nullptr; 5201 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5202 IfCond = C->getCondition(); 5203 5204 // Check if we have any device clause associated with the directive. 5205 const Expr *Device = nullptr; 5206 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5207 Device = C->getDevice(); 5208 5209 OMPLexicalScope Scope(*this, S, OMPD_task); 5210 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5211 } 5212 5213 void CodeGenFunction::EmitOMPTargetExitDataDirective( 5214 const OMPTargetExitDataDirective &S) { 5215 // If we don't have target devices, don't bother emitting the data mapping 5216 // code. 5217 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5218 return; 5219 5220 // Check if we have any if clause associated with the directive. 5221 const Expr *IfCond = nullptr; 5222 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5223 IfCond = C->getCondition(); 5224 5225 // Check if we have any device clause associated with the directive. 5226 const Expr *Device = nullptr; 5227 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5228 Device = C->getDevice(); 5229 5230 OMPLexicalScope Scope(*this, S, OMPD_task); 5231 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5232 } 5233 5234 static void emitTargetParallelRegion(CodeGenFunction &CGF, 5235 const OMPTargetParallelDirective &S, 5236 PrePostActionTy &Action) { 5237 // Get the captured statement associated with the 'parallel' region. 5238 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 5239 Action.Enter(CGF); 5240 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5241 Action.Enter(CGF); 5242 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5243 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5244 CGF.EmitOMPPrivateClause(S, PrivateScope); 5245 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5246 (void)PrivateScope.Privatize(); 5247 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5248 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5249 // TODO: Add support for clauses. 5250 CGF.EmitStmt(CS->getCapturedStmt()); 5251 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 5252 }; 5253 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 5254 emitEmptyBoundParameters); 5255 emitPostUpdateForReductionClause(CGF, S, 5256 [](CodeGenFunction &) { return nullptr; }); 5257 } 5258 5259 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 5260 CodeGenModule &CGM, StringRef ParentName, 5261 const OMPTargetParallelDirective &S) { 5262 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5263 emitTargetParallelRegion(CGF, S, Action); 5264 }; 5265 llvm::Function *Fn; 5266 llvm::Constant *Addr; 5267 // Emit target region as a standalone region. 5268 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5269 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5270 assert(Fn && Addr && "Target device function emission failed."); 5271 } 5272 5273 void CodeGenFunction::EmitOMPTargetParallelDirective( 5274 const OMPTargetParallelDirective &S) { 5275 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5276 emitTargetParallelRegion(CGF, S, Action); 5277 }; 5278 emitCommonOMPTargetDirective(*this, S, CodeGen); 5279 } 5280 5281 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 5282 const OMPTargetParallelForDirective &S, 5283 PrePostActionTy &Action) { 5284 Action.Enter(CGF); 5285 // Emit directive as a combined directive that consists of two implicit 5286 // directives: 'parallel' with 'for' directive. 5287 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5288 Action.Enter(CGF); 5289 CodeGenFunction::OMPCancelStackRAII CancelRegion( 5290 CGF, OMPD_target_parallel_for, S.hasCancel()); 5291 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5292 emitDispatchForLoopBounds); 5293 }; 5294 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 5295 emitEmptyBoundParameters); 5296 } 5297 5298 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 5299 CodeGenModule &CGM, StringRef ParentName, 5300 const OMPTargetParallelForDirective &S) { 5301 // Emit SPMD target parallel for region as a standalone region. 5302 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5303 emitTargetParallelForRegion(CGF, S, Action); 5304 }; 5305 llvm::Function *Fn; 5306 llvm::Constant *Addr; 5307 // Emit target region as a standalone region. 5308 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5309 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5310 assert(Fn && Addr && "Target device function emission failed."); 5311 } 5312 5313 void CodeGenFunction::EmitOMPTargetParallelForDirective( 5314 const OMPTargetParallelForDirective &S) { 5315 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5316 emitTargetParallelForRegion(CGF, S, Action); 5317 }; 5318 emitCommonOMPTargetDirective(*this, S, CodeGen); 5319 } 5320 5321 static void 5322 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 5323 const OMPTargetParallelForSimdDirective &S, 5324 PrePostActionTy &Action) { 5325 Action.Enter(CGF); 5326 // Emit directive as a combined directive that consists of two implicit 5327 // directives: 'parallel' with 'for' directive. 5328 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5329 Action.Enter(CGF); 5330 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5331 emitDispatchForLoopBounds); 5332 }; 5333 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 5334 emitEmptyBoundParameters); 5335 } 5336 5337 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 5338 CodeGenModule &CGM, StringRef ParentName, 5339 const OMPTargetParallelForSimdDirective &S) { 5340 // Emit SPMD target parallel for region as a standalone region. 5341 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5342 emitTargetParallelForSimdRegion(CGF, S, Action); 5343 }; 5344 llvm::Function *Fn; 5345 llvm::Constant *Addr; 5346 // Emit target region as a standalone region. 5347 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5348 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5349 assert(Fn && Addr && "Target device function emission failed."); 5350 } 5351 5352 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 5353 const OMPTargetParallelForSimdDirective &S) { 5354 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5355 emitTargetParallelForSimdRegion(CGF, S, Action); 5356 }; 5357 emitCommonOMPTargetDirective(*this, S, CodeGen); 5358 } 5359 5360 /// Emit a helper variable and return corresponding lvalue. 5361 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 5362 const ImplicitParamDecl *PVD, 5363 CodeGenFunction::OMPPrivateScope &Privates) { 5364 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 5365 Privates.addPrivate(VDecl, 5366 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 5367 } 5368 5369 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 5370 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 5371 // Emit outlined function for task construct. 5372 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 5373 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 5374 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 5375 const Expr *IfCond = nullptr; 5376 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5377 if (C->getNameModifier() == OMPD_unknown || 5378 C->getNameModifier() == OMPD_taskloop) { 5379 IfCond = C->getCondition(); 5380 break; 5381 } 5382 } 5383 5384 OMPTaskDataTy Data; 5385 // Check if taskloop must be emitted without taskgroup. 5386 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 5387 // TODO: Check if we should emit tied or untied task. 5388 Data.Tied = true; 5389 // Set scheduling for taskloop 5390 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 5391 // grainsize clause 5392 Data.Schedule.setInt(/*IntVal=*/false); 5393 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 5394 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 5395 // num_tasks clause 5396 Data.Schedule.setInt(/*IntVal=*/true); 5397 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 5398 } 5399 5400 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 5401 // if (PreCond) { 5402 // for (IV in 0..LastIteration) BODY; 5403 // <Final counter/linear vars updates>; 5404 // } 5405 // 5406 5407 // Emit: if (PreCond) - begin. 5408 // If the condition constant folds and can be elided, avoid emitting the 5409 // whole loop. 5410 bool CondConstant; 5411 llvm::BasicBlock *ContBlock = nullptr; 5412 OMPLoopScope PreInitScope(CGF, S); 5413 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5414 if (!CondConstant) 5415 return; 5416 } else { 5417 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 5418 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 5419 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 5420 CGF.getProfileCount(&S)); 5421 CGF.EmitBlock(ThenBlock); 5422 CGF.incrementProfileCounter(&S); 5423 } 5424 5425 (void)CGF.EmitOMPLinearClauseInit(S); 5426 5427 OMPPrivateScope LoopScope(CGF); 5428 // Emit helper vars inits. 5429 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 5430 auto *I = CS->getCapturedDecl()->param_begin(); 5431 auto *LBP = std::next(I, LowerBound); 5432 auto *UBP = std::next(I, UpperBound); 5433 auto *STP = std::next(I, Stride); 5434 auto *LIP = std::next(I, LastIter); 5435 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 5436 LoopScope); 5437 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 5438 LoopScope); 5439 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 5440 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 5441 LoopScope); 5442 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 5443 CGF.EmitOMPLinearClause(S, LoopScope); 5444 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 5445 (void)LoopScope.Privatize(); 5446 // Emit the loop iteration variable. 5447 const Expr *IVExpr = S.getIterationVariable(); 5448 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 5449 CGF.EmitVarDecl(*IVDecl); 5450 CGF.EmitIgnoredExpr(S.getInit()); 5451 5452 // Emit the iterations count variable. 5453 // If it is not a variable, Sema decided to calculate iterations count on 5454 // each iteration (e.g., it is foldable into a constant). 5455 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5456 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5457 // Emit calculation of the iterations count. 5458 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 5459 } 5460 5461 { 5462 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 5463 emitCommonSimdLoop( 5464 CGF, S, 5465 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5466 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5467 CGF.EmitOMPSimdInit(S); 5468 }, 5469 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 5470 CGF.EmitOMPInnerLoop( 5471 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 5472 [&S](CodeGenFunction &CGF) { 5473 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 5474 CGF.EmitStopPoint(&S); 5475 }, 5476 [](CodeGenFunction &) {}); 5477 }); 5478 } 5479 // Emit: if (PreCond) - end. 5480 if (ContBlock) { 5481 CGF.EmitBranch(ContBlock); 5482 CGF.EmitBlock(ContBlock, true); 5483 } 5484 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5485 if (HasLastprivateClause) { 5486 CGF.EmitOMPLastprivateClauseFinal( 5487 S, isOpenMPSimdDirective(S.getDirectiveKind()), 5488 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 5489 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5490 (*LIP)->getType(), S.getBeginLoc()))); 5491 } 5492 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 5493 return CGF.Builder.CreateIsNotNull( 5494 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5495 (*LIP)->getType(), S.getBeginLoc())); 5496 }); 5497 }; 5498 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 5499 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 5500 const OMPTaskDataTy &Data) { 5501 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 5502 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 5503 OMPLoopScope PreInitScope(CGF, S); 5504 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 5505 OutlinedFn, SharedsTy, 5506 CapturedStruct, IfCond, Data); 5507 }; 5508 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 5509 CodeGen); 5510 }; 5511 if (Data.Nogroup) { 5512 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 5513 } else { 5514 CGM.getOpenMPRuntime().emitTaskgroupRegion( 5515 *this, 5516 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 5517 PrePostActionTy &Action) { 5518 Action.Enter(CGF); 5519 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 5520 Data); 5521 }, 5522 S.getBeginLoc()); 5523 } 5524 } 5525 5526 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 5527 auto LPCRegion = 5528 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5529 EmitOMPTaskLoopBasedDirective(S); 5530 } 5531 5532 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 5533 const OMPTaskLoopSimdDirective &S) { 5534 auto LPCRegion = 5535 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5536 OMPLexicalScope Scope(*this, S); 5537 EmitOMPTaskLoopBasedDirective(S); 5538 } 5539 5540 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 5541 const OMPMasterTaskLoopDirective &S) { 5542 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5543 Action.Enter(CGF); 5544 EmitOMPTaskLoopBasedDirective(S); 5545 }; 5546 auto LPCRegion = 5547 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5548 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 5549 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5550 } 5551 5552 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 5553 const OMPMasterTaskLoopSimdDirective &S) { 5554 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5555 Action.Enter(CGF); 5556 EmitOMPTaskLoopBasedDirective(S); 5557 }; 5558 auto LPCRegion = 5559 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5560 OMPLexicalScope Scope(*this, S); 5561 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5562 } 5563 5564 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 5565 const OMPParallelMasterTaskLoopDirective &S) { 5566 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5567 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5568 PrePostActionTy &Action) { 5569 Action.Enter(CGF); 5570 CGF.EmitOMPTaskLoopBasedDirective(S); 5571 }; 5572 OMPLexicalScope Scope(CGF, S, llvm::None, /*EmitPreInitStmt=*/false); 5573 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5574 S.getBeginLoc()); 5575 }; 5576 auto LPCRegion = 5577 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5578 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 5579 emitEmptyBoundParameters); 5580 } 5581 5582 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 5583 const OMPParallelMasterTaskLoopSimdDirective &S) { 5584 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5585 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5586 PrePostActionTy &Action) { 5587 Action.Enter(CGF); 5588 CGF.EmitOMPTaskLoopBasedDirective(S); 5589 }; 5590 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 5591 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5592 S.getBeginLoc()); 5593 }; 5594 auto LPCRegion = 5595 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5596 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 5597 emitEmptyBoundParameters); 5598 } 5599 5600 // Generate the instructions for '#pragma omp target update' directive. 5601 void CodeGenFunction::EmitOMPTargetUpdateDirective( 5602 const OMPTargetUpdateDirective &S) { 5603 // If we don't have target devices, don't bother emitting the data mapping 5604 // code. 5605 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5606 return; 5607 5608 // Check if we have any if clause associated with the directive. 5609 const Expr *IfCond = nullptr; 5610 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5611 IfCond = C->getCondition(); 5612 5613 // Check if we have any device clause associated with the directive. 5614 const Expr *Device = nullptr; 5615 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5616 Device = C->getDevice(); 5617 5618 OMPLexicalScope Scope(*this, S, OMPD_task); 5619 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5620 } 5621 5622 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 5623 const OMPExecutableDirective &D) { 5624 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 5625 return; 5626 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 5627 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 5628 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 5629 } else { 5630 OMPPrivateScope LoopGlobals(CGF); 5631 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 5632 for (const Expr *E : LD->counters()) { 5633 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 5634 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 5635 LValue GlobLVal = CGF.EmitLValue(E); 5636 LoopGlobals.addPrivate( 5637 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 5638 } 5639 if (isa<OMPCapturedExprDecl>(VD)) { 5640 // Emit only those that were not explicitly referenced in clauses. 5641 if (!CGF.LocalDeclMap.count(VD)) 5642 CGF.EmitVarDecl(*VD); 5643 } 5644 } 5645 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 5646 if (!C->getNumForLoops()) 5647 continue; 5648 for (unsigned I = LD->getCollapsedNumber(), 5649 E = C->getLoopNumIterations().size(); 5650 I < E; ++I) { 5651 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 5652 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 5653 // Emit only those that were not explicitly referenced in clauses. 5654 if (!CGF.LocalDeclMap.count(VD)) 5655 CGF.EmitVarDecl(*VD); 5656 } 5657 } 5658 } 5659 } 5660 LoopGlobals.Privatize(); 5661 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 5662 } 5663 }; 5664 { 5665 auto LPCRegion = 5666 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 5667 OMPSimdLexicalScope Scope(*this, D); 5668 CGM.getOpenMPRuntime().emitInlinedDirective( 5669 *this, 5670 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 5671 : D.getDirectiveKind(), 5672 CodeGen); 5673 } 5674 // Check for outer lastprivate conditional update. 5675 checkForLastprivateConditionalUpdate(*this, D); 5676 } 5677