1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Instructions.h" 28 #include "llvm/Support/AtomicOrdering.h" 29 using namespace clang; 30 using namespace CodeGen; 31 using namespace llvm::omp; 32 33 namespace { 34 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 35 /// for captured expressions. 36 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 37 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 38 for (const auto *C : S.clauses()) { 39 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 40 if (const auto *PreInit = 41 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 42 for (const auto *I : PreInit->decls()) { 43 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 44 CGF.EmitVarDecl(cast<VarDecl>(*I)); 45 } else { 46 CodeGenFunction::AutoVarEmission Emission = 47 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 48 CGF.EmitAutoVarCleanups(Emission); 49 } 50 } 51 } 52 } 53 } 54 } 55 CodeGenFunction::OMPPrivateScope InlinedShareds; 56 57 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 58 return CGF.LambdaCaptureFields.lookup(VD) || 59 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 60 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 61 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 62 } 63 64 public: 65 OMPLexicalScope( 66 CodeGenFunction &CGF, const OMPExecutableDirective &S, 67 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 68 const bool EmitPreInitStmt = true) 69 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 70 InlinedShareds(CGF) { 71 if (EmitPreInitStmt) 72 emitPreInitStmt(CGF, S); 73 if (!CapturedRegion.hasValue()) 74 return; 75 assert(S.hasAssociatedStmt() && 76 "Expected associated statement for inlined directive."); 77 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 78 for (const auto &C : CS->captures()) { 79 if (C.capturesVariable() || C.capturesVariableByCopy()) { 80 auto *VD = C.getCapturedVar(); 81 assert(VD == VD->getCanonicalDecl() && 82 "Canonical decl must be captured."); 83 DeclRefExpr DRE( 84 CGF.getContext(), const_cast<VarDecl *>(VD), 85 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 86 InlinedShareds.isGlobalVarCaptured(VD)), 87 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 88 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 89 return CGF.EmitLValue(&DRE).getAddress(CGF); 90 }); 91 } 92 } 93 (void)InlinedShareds.Privatize(); 94 } 95 }; 96 97 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 98 /// for captured expressions. 99 class OMPParallelScope final : public OMPLexicalScope { 100 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 101 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 102 return !(isOpenMPTargetExecutionDirective(Kind) || 103 isOpenMPLoopBoundSharingDirective(Kind)) && 104 isOpenMPParallelDirective(Kind); 105 } 106 107 public: 108 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 109 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 110 EmitPreInitStmt(S)) {} 111 }; 112 113 /// Lexical scope for OpenMP teams construct, that handles correct codegen 114 /// for captured expressions. 115 class OMPTeamsScope final : public OMPLexicalScope { 116 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 117 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 118 return !isOpenMPTargetExecutionDirective(Kind) && 119 isOpenMPTeamsDirective(Kind); 120 } 121 122 public: 123 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 124 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 125 EmitPreInitStmt(S)) {} 126 }; 127 128 /// Private scope for OpenMP loop-based directives, that supports capturing 129 /// of used expression from loop statement. 130 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 131 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 132 CodeGenFunction::OMPMapVars PreCondVars; 133 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 134 for (const auto *E : S.counters()) { 135 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 136 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 137 (void)PreCondVars.setVarAddr( 138 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 139 } 140 // Mark private vars as undefs. 141 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 142 for (const Expr *IRef : C->varlists()) { 143 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 144 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 145 (void)PreCondVars.setVarAddr( 146 CGF, OrigVD, 147 Address(llvm::UndefValue::get( 148 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 149 OrigVD->getType().getNonReferenceType()))), 150 CGF.getContext().getDeclAlign(OrigVD))); 151 } 152 } 153 } 154 (void)PreCondVars.apply(CGF); 155 // Emit init, __range and __end variables for C++ range loops. 156 const Stmt *Body = 157 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 158 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 159 Body = OMPLoopDirective::tryToFindNextInnerLoop( 160 Body, /*TryImperfectlyNestedLoops=*/true); 161 if (auto *For = dyn_cast<ForStmt>(Body)) { 162 Body = For->getBody(); 163 } else { 164 assert(isa<CXXForRangeStmt>(Body) && 165 "Expected canonical for loop or range-based for loop."); 166 auto *CXXFor = cast<CXXForRangeStmt>(Body); 167 if (const Stmt *Init = CXXFor->getInit()) 168 CGF.EmitStmt(Init); 169 CGF.EmitStmt(CXXFor->getRangeStmt()); 170 CGF.EmitStmt(CXXFor->getEndStmt()); 171 Body = CXXFor->getBody(); 172 } 173 } 174 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 175 for (const auto *I : PreInits->decls()) 176 CGF.EmitVarDecl(cast<VarDecl>(*I)); 177 } 178 PreCondVars.restore(CGF); 179 } 180 181 public: 182 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 183 : CodeGenFunction::RunCleanupsScope(CGF) { 184 emitPreInitStmt(CGF, S); 185 } 186 }; 187 188 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 189 CodeGenFunction::OMPPrivateScope InlinedShareds; 190 191 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 192 return CGF.LambdaCaptureFields.lookup(VD) || 193 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 194 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 195 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 196 } 197 198 public: 199 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 200 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 201 InlinedShareds(CGF) { 202 for (const auto *C : S.clauses()) { 203 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 204 if (const auto *PreInit = 205 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 206 for (const auto *I : PreInit->decls()) { 207 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 208 CGF.EmitVarDecl(cast<VarDecl>(*I)); 209 } else { 210 CodeGenFunction::AutoVarEmission Emission = 211 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 212 CGF.EmitAutoVarCleanups(Emission); 213 } 214 } 215 } 216 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 217 for (const Expr *E : UDP->varlists()) { 218 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 219 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 220 CGF.EmitVarDecl(*OED); 221 } 222 } 223 } 224 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 225 CGF.EmitOMPPrivateClause(S, InlinedShareds); 226 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 227 if (const Expr *E = TG->getReductionRef()) 228 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 229 } 230 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 231 while (CS) { 232 for (auto &C : CS->captures()) { 233 if (C.capturesVariable() || C.capturesVariableByCopy()) { 234 auto *VD = C.getCapturedVar(); 235 assert(VD == VD->getCanonicalDecl() && 236 "Canonical decl must be captured."); 237 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 238 isCapturedVar(CGF, VD) || 239 (CGF.CapturedStmtInfo && 240 InlinedShareds.isGlobalVarCaptured(VD)), 241 VD->getType().getNonReferenceType(), VK_LValue, 242 C.getLocation()); 243 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 244 return CGF.EmitLValue(&DRE).getAddress(CGF); 245 }); 246 } 247 } 248 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 249 } 250 (void)InlinedShareds.Privatize(); 251 } 252 }; 253 254 } // namespace 255 256 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 257 const OMPExecutableDirective &S, 258 const RegionCodeGenTy &CodeGen); 259 260 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 261 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 262 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 263 OrigVD = OrigVD->getCanonicalDecl(); 264 bool IsCaptured = 265 LambdaCaptureFields.lookup(OrigVD) || 266 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 267 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 268 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 269 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 270 return EmitLValue(&DRE); 271 } 272 } 273 return EmitLValue(E); 274 } 275 276 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 277 ASTContext &C = getContext(); 278 llvm::Value *Size = nullptr; 279 auto SizeInChars = C.getTypeSizeInChars(Ty); 280 if (SizeInChars.isZero()) { 281 // getTypeSizeInChars() returns 0 for a VLA. 282 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 283 VlaSizePair VlaSize = getVLASize(VAT); 284 Ty = VlaSize.Type; 285 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 286 : VlaSize.NumElts; 287 } 288 SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) 290 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 291 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 292 } 293 return CGM.getSize(SizeInChars); 294 } 295 296 void CodeGenFunction::GenerateOpenMPCapturedVars( 297 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 298 const RecordDecl *RD = S.getCapturedRecordDecl(); 299 auto CurField = RD->field_begin(); 300 auto CurCap = S.captures().begin(); 301 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 302 E = S.capture_init_end(); 303 I != E; ++I, ++CurField, ++CurCap) { 304 if (CurField->hasCapturedVLAType()) { 305 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 306 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 307 CapturedVars.push_back(Val); 308 } else if (CurCap->capturesThis()) { 309 CapturedVars.push_back(CXXThisValue); 310 } else if (CurCap->capturesVariableByCopy()) { 311 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 312 313 // If the field is not a pointer, we need to save the actual value 314 // and load it as a void pointer. 315 if (!CurField->getType()->isAnyPointerType()) { 316 ASTContext &Ctx = getContext(); 317 Address DstAddr = CreateMemTemp( 318 Ctx.getUIntPtrType(), 319 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 320 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 321 322 llvm::Value *SrcAddrVal = EmitScalarConversion( 323 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 324 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 325 LValue SrcLV = 326 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 327 328 // Store the value using the source type pointer. 329 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 330 331 // Load the value using the destination type pointer. 332 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 333 } 334 CapturedVars.push_back(CV); 335 } else { 336 assert(CurCap->capturesVariable() && "Expected capture by reference."); 337 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 338 } 339 } 340 } 341 342 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 343 QualType DstType, StringRef Name, 344 LValue AddrLV) { 345 ASTContext &Ctx = CGF.getContext(); 346 347 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 348 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 349 Ctx.getPointerType(DstType), Loc); 350 Address TmpAddr = 351 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 352 .getAddress(CGF); 353 return TmpAddr; 354 } 355 356 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 357 if (T->isLValueReferenceType()) 358 return C.getLValueReferenceType( 359 getCanonicalParamType(C, T.getNonReferenceType()), 360 /*SpelledAsLValue=*/false); 361 if (T->isPointerType()) 362 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 363 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 364 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 365 return getCanonicalParamType(C, VLA->getElementType()); 366 if (!A->isVariablyModifiedType()) 367 return C.getCanonicalType(T); 368 } 369 return C.getCanonicalParamType(T); 370 } 371 372 namespace { 373 /// Contains required data for proper outlined function codegen. 374 struct FunctionOptions { 375 /// Captured statement for which the function is generated. 376 const CapturedStmt *S = nullptr; 377 /// true if cast to/from UIntPtr is required for variables captured by 378 /// value. 379 const bool UIntPtrCastRequired = true; 380 /// true if only casted arguments must be registered as local args or VLA 381 /// sizes. 382 const bool RegisterCastedArgsOnly = false; 383 /// Name of the generated function. 384 const StringRef FunctionName; 385 /// Location of the non-debug version of the outlined function. 386 SourceLocation Loc; 387 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 388 bool RegisterCastedArgsOnly, StringRef FunctionName, 389 SourceLocation Loc) 390 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 391 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 392 FunctionName(FunctionName), Loc(Loc) {} 393 }; 394 } // namespace 395 396 static llvm::Function *emitOutlinedFunctionPrologue( 397 CodeGenFunction &CGF, FunctionArgList &Args, 398 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 399 &LocalAddrs, 400 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 401 &VLASizes, 402 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 403 const CapturedDecl *CD = FO.S->getCapturedDecl(); 404 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 405 assert(CD->hasBody() && "missing CapturedDecl body"); 406 407 CXXThisValue = nullptr; 408 // Build the argument list. 409 CodeGenModule &CGM = CGF.CGM; 410 ASTContext &Ctx = CGM.getContext(); 411 FunctionArgList TargetArgs; 412 Args.append(CD->param_begin(), 413 std::next(CD->param_begin(), CD->getContextParamPosition())); 414 TargetArgs.append( 415 CD->param_begin(), 416 std::next(CD->param_begin(), CD->getContextParamPosition())); 417 auto I = FO.S->captures().begin(); 418 FunctionDecl *DebugFunctionDecl = nullptr; 419 if (!FO.UIntPtrCastRequired) { 420 FunctionProtoType::ExtProtoInfo EPI; 421 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 422 DebugFunctionDecl = FunctionDecl::Create( 423 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 424 SourceLocation(), DeclarationName(), FunctionTy, 425 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 426 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 427 } 428 for (const FieldDecl *FD : RD->fields()) { 429 QualType ArgType = FD->getType(); 430 IdentifierInfo *II = nullptr; 431 VarDecl *CapVar = nullptr; 432 433 // If this is a capture by copy and the type is not a pointer, the outlined 434 // function argument type should be uintptr and the value properly casted to 435 // uintptr. This is necessary given that the runtime library is only able to 436 // deal with pointers. We can pass in the same way the VLA type sizes to the 437 // outlined function. 438 if (FO.UIntPtrCastRequired && 439 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 440 I->capturesVariableArrayType())) 441 ArgType = Ctx.getUIntPtrType(); 442 443 if (I->capturesVariable() || I->capturesVariableByCopy()) { 444 CapVar = I->getCapturedVar(); 445 II = CapVar->getIdentifier(); 446 } else if (I->capturesThis()) { 447 II = &Ctx.Idents.get("this"); 448 } else { 449 assert(I->capturesVariableArrayType()); 450 II = &Ctx.Idents.get("vla"); 451 } 452 if (ArgType->isVariablyModifiedType()) 453 ArgType = getCanonicalParamType(Ctx, ArgType); 454 VarDecl *Arg; 455 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 456 Arg = ParmVarDecl::Create( 457 Ctx, DebugFunctionDecl, 458 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 459 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 460 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 461 } else { 462 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 463 II, ArgType, ImplicitParamDecl::Other); 464 } 465 Args.emplace_back(Arg); 466 // Do not cast arguments if we emit function with non-original types. 467 TargetArgs.emplace_back( 468 FO.UIntPtrCastRequired 469 ? Arg 470 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 471 ++I; 472 } 473 Args.append( 474 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 475 CD->param_end()); 476 TargetArgs.append( 477 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 478 CD->param_end()); 479 480 // Create the function declaration. 481 const CGFunctionInfo &FuncInfo = 482 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 483 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 484 485 auto *F = 486 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 487 FO.FunctionName, &CGM.getModule()); 488 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 489 if (CD->isNothrow()) 490 F->setDoesNotThrow(); 491 F->setDoesNotRecurse(); 492 493 // Generate the function. 494 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 495 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 496 FO.UIntPtrCastRequired ? FO.Loc 497 : CD->getBody()->getBeginLoc()); 498 unsigned Cnt = CD->getContextParamPosition(); 499 I = FO.S->captures().begin(); 500 for (const FieldDecl *FD : RD->fields()) { 501 // Do not map arguments if we emit function with non-original types. 502 Address LocalAddr(Address::invalid()); 503 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 504 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 505 TargetArgs[Cnt]); 506 } else { 507 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 508 } 509 // If we are capturing a pointer by copy we don't need to do anything, just 510 // use the value that we get from the arguments. 511 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 512 const VarDecl *CurVD = I->getCapturedVar(); 513 if (!FO.RegisterCastedArgsOnly) 514 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 515 ++Cnt; 516 ++I; 517 continue; 518 } 519 520 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 521 AlignmentSource::Decl); 522 if (FD->hasCapturedVLAType()) { 523 if (FO.UIntPtrCastRequired) { 524 ArgLVal = CGF.MakeAddrLValue( 525 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 526 Args[Cnt]->getName(), ArgLVal), 527 FD->getType(), AlignmentSource::Decl); 528 } 529 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 530 const VariableArrayType *VAT = FD->getCapturedVLAType(); 531 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 532 } else if (I->capturesVariable()) { 533 const VarDecl *Var = I->getCapturedVar(); 534 QualType VarTy = Var->getType(); 535 Address ArgAddr = ArgLVal.getAddress(CGF); 536 if (ArgLVal.getType()->isLValueReferenceType()) { 537 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 538 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 539 assert(ArgLVal.getType()->isPointerType()); 540 ArgAddr = CGF.EmitLoadOfPointer( 541 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 542 } 543 if (!FO.RegisterCastedArgsOnly) { 544 LocalAddrs.insert( 545 {Args[Cnt], 546 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 547 } 548 } else if (I->capturesVariableByCopy()) { 549 assert(!FD->getType()->isAnyPointerType() && 550 "Not expecting a captured pointer."); 551 const VarDecl *Var = I->getCapturedVar(); 552 LocalAddrs.insert({Args[Cnt], 553 {Var, FO.UIntPtrCastRequired 554 ? castValueFromUintptr( 555 CGF, I->getLocation(), FD->getType(), 556 Args[Cnt]->getName(), ArgLVal) 557 : ArgLVal.getAddress(CGF)}}); 558 } else { 559 // If 'this' is captured, load it into CXXThisValue. 560 assert(I->capturesThis()); 561 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 562 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 563 } 564 ++Cnt; 565 ++I; 566 } 567 568 return F; 569 } 570 571 llvm::Function * 572 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 573 SourceLocation Loc) { 574 assert( 575 CapturedStmtInfo && 576 "CapturedStmtInfo should be set when generating the captured function"); 577 const CapturedDecl *CD = S.getCapturedDecl(); 578 // Build the argument list. 579 bool NeedWrapperFunction = 580 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 581 FunctionArgList Args; 582 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 583 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 584 SmallString<256> Buffer; 585 llvm::raw_svector_ostream Out(Buffer); 586 Out << CapturedStmtInfo->getHelperName(); 587 if (NeedWrapperFunction) 588 Out << "_debug__"; 589 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 590 Out.str(), Loc); 591 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 592 VLASizes, CXXThisValue, FO); 593 CodeGenFunction::OMPPrivateScope LocalScope(*this); 594 for (const auto &LocalAddrPair : LocalAddrs) { 595 if (LocalAddrPair.second.first) { 596 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 597 return LocalAddrPair.second.second; 598 }); 599 } 600 } 601 (void)LocalScope.Privatize(); 602 for (const auto &VLASizePair : VLASizes) 603 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 604 PGO.assignRegionCounters(GlobalDecl(CD), F); 605 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 606 (void)LocalScope.ForceCleanup(); 607 FinishFunction(CD->getBodyRBrace()); 608 if (!NeedWrapperFunction) 609 return F; 610 611 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 612 /*RegisterCastedArgsOnly=*/true, 613 CapturedStmtInfo->getHelperName(), Loc); 614 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 615 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 616 Args.clear(); 617 LocalAddrs.clear(); 618 VLASizes.clear(); 619 llvm::Function *WrapperF = 620 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 621 WrapperCGF.CXXThisValue, WrapperFO); 622 llvm::SmallVector<llvm::Value *, 4> CallArgs; 623 for (const auto *Arg : Args) { 624 llvm::Value *CallArg; 625 auto I = LocalAddrs.find(Arg); 626 if (I != LocalAddrs.end()) { 627 LValue LV = WrapperCGF.MakeAddrLValue( 628 I->second.second, 629 I->second.first ? I->second.first->getType() : Arg->getType(), 630 AlignmentSource::Decl); 631 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 632 } else { 633 auto EI = VLASizes.find(Arg); 634 if (EI != VLASizes.end()) { 635 CallArg = EI->second.second; 636 } else { 637 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 638 Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } 642 } 643 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 644 } 645 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 646 WrapperCGF.FinishFunction(); 647 return WrapperF; 648 } 649 650 //===----------------------------------------------------------------------===// 651 // OpenMP Directive Emission 652 //===----------------------------------------------------------------------===// 653 void CodeGenFunction::EmitOMPAggregateAssign( 654 Address DestAddr, Address SrcAddr, QualType OriginalType, 655 const llvm::function_ref<void(Address, Address)> CopyGen) { 656 // Perform element-by-element initialization. 657 QualType ElementTy; 658 659 // Drill down to the base element type on both arrays. 660 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 661 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 662 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 663 664 llvm::Value *SrcBegin = SrcAddr.getPointer(); 665 llvm::Value *DestBegin = DestAddr.getPointer(); 666 // Cast from pointer to array type to pointer to single element. 667 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 668 // The basic structure here is a while-do loop. 669 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 670 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 671 llvm::Value *IsEmpty = 672 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 673 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 674 675 // Enter the loop body, making that address the current address. 676 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 677 EmitBlock(BodyBB); 678 679 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 680 681 llvm::PHINode *SrcElementPHI = 682 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 683 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 684 Address SrcElementCurrent = 685 Address(SrcElementPHI, 686 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 687 688 llvm::PHINode *DestElementPHI = 689 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 690 DestElementPHI->addIncoming(DestBegin, EntryBB); 691 Address DestElementCurrent = 692 Address(DestElementPHI, 693 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 694 695 // Emit copy. 696 CopyGen(DestElementCurrent, SrcElementCurrent); 697 698 // Shift the address forward by one element. 699 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 700 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 701 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 702 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 703 // Check whether we've reached the end. 704 llvm::Value *Done = 705 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 706 Builder.CreateCondBr(Done, DoneBB, BodyBB); 707 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 708 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 709 710 // Done. 711 EmitBlock(DoneBB, /*IsFinished=*/true); 712 } 713 714 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 715 Address SrcAddr, const VarDecl *DestVD, 716 const VarDecl *SrcVD, const Expr *Copy) { 717 if (OriginalType->isArrayType()) { 718 const auto *BO = dyn_cast<BinaryOperator>(Copy); 719 if (BO && BO->getOpcode() == BO_Assign) { 720 // Perform simple memcpy for simple copying. 721 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 722 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 723 EmitAggregateAssign(Dest, Src, OriginalType); 724 } else { 725 // For arrays with complex element types perform element by element 726 // copying. 727 EmitOMPAggregateAssign( 728 DestAddr, SrcAddr, OriginalType, 729 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 730 // Working with the single array element, so have to remap 731 // destination and source variables to corresponding array 732 // elements. 733 CodeGenFunction::OMPPrivateScope Remap(*this); 734 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 735 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 736 (void)Remap.Privatize(); 737 EmitIgnoredExpr(Copy); 738 }); 739 } 740 } else { 741 // Remap pseudo source variable to private copy. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 744 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 745 (void)Remap.Privatize(); 746 // Emit copying of the whole variable. 747 EmitIgnoredExpr(Copy); 748 } 749 } 750 751 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 752 OMPPrivateScope &PrivateScope) { 753 if (!HaveInsertPoint()) 754 return false; 755 bool DeviceConstTarget = 756 getLangOpts().OpenMPIsDevice && 757 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 758 bool FirstprivateIsLastprivate = false; 759 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 760 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 761 for (const auto *D : C->varlists()) 762 Lastprivates.try_emplace( 763 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 764 C->getKind()); 765 } 766 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 767 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 768 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 769 // Force emission of the firstprivate copy if the directive does not emit 770 // outlined function, like omp for, omp simd, omp distribute etc. 771 bool MustEmitFirstprivateCopy = 772 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 773 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 774 const auto *IRef = C->varlist_begin(); 775 const auto *InitsRef = C->inits().begin(); 776 for (const Expr *IInit : C->private_copies()) { 777 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 778 bool ThisFirstprivateIsLastprivate = 779 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 780 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 781 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 782 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 783 !FD->getType()->isReferenceType() && 784 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 785 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 786 ++IRef; 787 ++InitsRef; 788 continue; 789 } 790 // Do not emit copy for firstprivate constant variables in target regions, 791 // captured by reference. 792 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 793 FD && FD->getType()->isReferenceType() && 794 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 795 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 796 OrigVD); 797 ++IRef; 798 ++InitsRef; 799 continue; 800 } 801 FirstprivateIsLastprivate = 802 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 803 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 804 const auto *VDInit = 805 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 806 bool IsRegistered; 807 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 808 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 809 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 810 LValue OriginalLVal; 811 if (!FD) { 812 // Check if the firstprivate variable is just a constant value. 813 ConstantEmission CE = tryEmitAsConstant(&DRE); 814 if (CE && !CE.isReference()) { 815 // Constant value, no need to create a copy. 816 ++IRef; 817 ++InitsRef; 818 continue; 819 } 820 if (CE && CE.isReference()) { 821 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 822 } else { 823 assert(!CE && "Expected non-constant firstprivate."); 824 OriginalLVal = EmitLValue(&DRE); 825 } 826 } else { 827 OriginalLVal = EmitLValue(&DRE); 828 } 829 QualType Type = VD->getType(); 830 if (Type->isArrayType()) { 831 // Emit VarDecl with copy init for arrays. 832 // Get the address of the original variable captured in current 833 // captured region. 834 IsRegistered = PrivateScope.addPrivate( 835 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 836 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 837 const Expr *Init = VD->getInit(); 838 if (!isa<CXXConstructExpr>(Init) || 839 isTrivialInitializer(Init)) { 840 // Perform simple memcpy. 841 LValue Dest = 842 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 843 EmitAggregateAssign(Dest, OriginalLVal, Type); 844 } else { 845 EmitOMPAggregateAssign( 846 Emission.getAllocatedAddress(), 847 OriginalLVal.getAddress(*this), Type, 848 [this, VDInit, Init](Address DestElement, 849 Address SrcElement) { 850 // Clean up any temporaries needed by the 851 // initialization. 852 RunCleanupsScope InitScope(*this); 853 // Emit initialization for single element. 854 setAddrOfLocalVar(VDInit, SrcElement); 855 EmitAnyExprToMem(Init, DestElement, 856 Init->getType().getQualifiers(), 857 /*IsInitializer*/ false); 858 LocalDeclMap.erase(VDInit); 859 }); 860 } 861 EmitAutoVarCleanups(Emission); 862 return Emission.getAllocatedAddress(); 863 }); 864 } else { 865 Address OriginalAddr = OriginalLVal.getAddress(*this); 866 IsRegistered = 867 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 868 ThisFirstprivateIsLastprivate, 869 OrigVD, &Lastprivates, IRef]() { 870 // Emit private VarDecl with copy init. 871 // Remap temp VDInit variable to the address of the original 872 // variable (for proper handling of captured global variables). 873 setAddrOfLocalVar(VDInit, OriginalAddr); 874 EmitDecl(*VD); 875 LocalDeclMap.erase(VDInit); 876 if (ThisFirstprivateIsLastprivate && 877 Lastprivates[OrigVD->getCanonicalDecl()] == 878 OMPC_LASTPRIVATE_conditional) { 879 // Create/init special variable for lastprivate conditionals. 880 Address VDAddr = 881 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 882 *this, OrigVD); 883 llvm::Value *V = EmitLoadOfScalar( 884 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 885 AlignmentSource::Decl), 886 (*IRef)->getExprLoc()); 887 EmitStoreOfScalar(V, 888 MakeAddrLValue(VDAddr, (*IRef)->getType(), 889 AlignmentSource::Decl)); 890 LocalDeclMap.erase(VD); 891 setAddrOfLocalVar(VD, VDAddr); 892 return VDAddr; 893 } 894 return GetAddrOfLocalVar(VD); 895 }); 896 } 897 assert(IsRegistered && 898 "firstprivate var already registered as private"); 899 // Silence the warning about unused variable. 900 (void)IsRegistered; 901 } 902 ++IRef; 903 ++InitsRef; 904 } 905 } 906 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 907 } 908 909 void CodeGenFunction::EmitOMPPrivateClause( 910 const OMPExecutableDirective &D, 911 CodeGenFunction::OMPPrivateScope &PrivateScope) { 912 if (!HaveInsertPoint()) 913 return; 914 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 915 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 916 auto IRef = C->varlist_begin(); 917 for (const Expr *IInit : C->private_copies()) { 918 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 919 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 920 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 921 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 922 // Emit private VarDecl with copy init. 923 EmitDecl(*VD); 924 return GetAddrOfLocalVar(VD); 925 }); 926 assert(IsRegistered && "private var already registered as private"); 927 // Silence the warning about unused variable. 928 (void)IsRegistered; 929 } 930 ++IRef; 931 } 932 } 933 } 934 935 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 936 if (!HaveInsertPoint()) 937 return false; 938 // threadprivate_var1 = master_threadprivate_var1; 939 // operator=(threadprivate_var2, master_threadprivate_var2); 940 // ... 941 // __kmpc_barrier(&loc, global_tid); 942 llvm::DenseSet<const VarDecl *> CopiedVars; 943 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 944 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 945 auto IRef = C->varlist_begin(); 946 auto ISrcRef = C->source_exprs().begin(); 947 auto IDestRef = C->destination_exprs().begin(); 948 for (const Expr *AssignOp : C->assignment_ops()) { 949 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 950 QualType Type = VD->getType(); 951 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 952 // Get the address of the master variable. If we are emitting code with 953 // TLS support, the address is passed from the master as field in the 954 // captured declaration. 955 Address MasterAddr = Address::invalid(); 956 if (getLangOpts().OpenMPUseTLS && 957 getContext().getTargetInfo().isTLSSupported()) { 958 assert(CapturedStmtInfo->lookup(VD) && 959 "Copyin threadprivates should have been captured!"); 960 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 961 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 962 MasterAddr = EmitLValue(&DRE).getAddress(*this); 963 LocalDeclMap.erase(VD); 964 } else { 965 MasterAddr = 966 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 967 : CGM.GetAddrOfGlobal(VD), 968 getContext().getDeclAlign(VD)); 969 } 970 // Get the address of the threadprivate variable. 971 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 972 if (CopiedVars.size() == 1) { 973 // At first check if current thread is a master thread. If it is, no 974 // need to copy data. 975 CopyBegin = createBasicBlock("copyin.not.master"); 976 CopyEnd = createBasicBlock("copyin.not.master.end"); 977 Builder.CreateCondBr( 978 Builder.CreateICmpNE( 979 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 980 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 981 CGM.IntPtrTy)), 982 CopyBegin, CopyEnd); 983 EmitBlock(CopyBegin); 984 } 985 const auto *SrcVD = 986 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 987 const auto *DestVD = 988 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 989 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 990 } 991 ++IRef; 992 ++ISrcRef; 993 ++IDestRef; 994 } 995 } 996 if (CopyEnd) { 997 // Exit out of copying procedure for non-master thread. 998 EmitBlock(CopyEnd, /*IsFinished=*/true); 999 return true; 1000 } 1001 return false; 1002 } 1003 1004 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1005 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1006 if (!HaveInsertPoint()) 1007 return false; 1008 bool HasAtLeastOneLastprivate = false; 1009 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1010 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1011 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1012 for (const Expr *C : LoopDirective->counters()) { 1013 SIMDLCVs.insert( 1014 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1015 } 1016 } 1017 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1018 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1019 HasAtLeastOneLastprivate = true; 1020 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1021 !getLangOpts().OpenMPSimd) 1022 break; 1023 const auto *IRef = C->varlist_begin(); 1024 const auto *IDestRef = C->destination_exprs().begin(); 1025 for (const Expr *IInit : C->private_copies()) { 1026 // Keep the address of the original variable for future update at the end 1027 // of the loop. 1028 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1029 // Taskloops do not require additional initialization, it is done in 1030 // runtime support library. 1031 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1032 const auto *DestVD = 1033 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1034 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1035 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1036 /*RefersToEnclosingVariableOrCapture=*/ 1037 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1038 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1039 return EmitLValue(&DRE).getAddress(*this); 1040 }); 1041 // Check if the variable is also a firstprivate: in this case IInit is 1042 // not generated. Initialization of this variable will happen in codegen 1043 // for 'firstprivate' clause. 1044 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1045 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1046 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1047 OrigVD]() { 1048 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1049 Address VDAddr = 1050 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1051 OrigVD); 1052 setAddrOfLocalVar(VD, VDAddr); 1053 return VDAddr; 1054 } 1055 // Emit private VarDecl with copy init. 1056 EmitDecl(*VD); 1057 return GetAddrOfLocalVar(VD); 1058 }); 1059 assert(IsRegistered && 1060 "lastprivate var already registered as private"); 1061 (void)IsRegistered; 1062 } 1063 } 1064 ++IRef; 1065 ++IDestRef; 1066 } 1067 } 1068 return HasAtLeastOneLastprivate; 1069 } 1070 1071 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1072 const OMPExecutableDirective &D, bool NoFinals, 1073 llvm::Value *IsLastIterCond) { 1074 if (!HaveInsertPoint()) 1075 return; 1076 // Emit following code: 1077 // if (<IsLastIterCond>) { 1078 // orig_var1 = private_orig_var1; 1079 // ... 1080 // orig_varn = private_orig_varn; 1081 // } 1082 llvm::BasicBlock *ThenBB = nullptr; 1083 llvm::BasicBlock *DoneBB = nullptr; 1084 if (IsLastIterCond) { 1085 // Emit implicit barrier if at least one lastprivate conditional is found 1086 // and this is not a simd mode. 1087 if (!getLangOpts().OpenMPSimd && 1088 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1089 [](const OMPLastprivateClause *C) { 1090 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1091 })) { 1092 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1093 OMPD_unknown, 1094 /*EmitChecks=*/false, 1095 /*ForceSimpleCall=*/true); 1096 } 1097 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1098 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1099 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1100 EmitBlock(ThenBB); 1101 } 1102 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1103 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1104 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1105 auto IC = LoopDirective->counters().begin(); 1106 for (const Expr *F : LoopDirective->finals()) { 1107 const auto *D = 1108 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1109 if (NoFinals) 1110 AlreadyEmittedVars.insert(D); 1111 else 1112 LoopCountersAndUpdates[D] = F; 1113 ++IC; 1114 } 1115 } 1116 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1117 auto IRef = C->varlist_begin(); 1118 auto ISrcRef = C->source_exprs().begin(); 1119 auto IDestRef = C->destination_exprs().begin(); 1120 for (const Expr *AssignOp : C->assignment_ops()) { 1121 const auto *PrivateVD = 1122 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1123 QualType Type = PrivateVD->getType(); 1124 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1125 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1126 // If lastprivate variable is a loop control variable for loop-based 1127 // directive, update its value before copyin back to original 1128 // variable. 1129 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1130 EmitIgnoredExpr(FinalExpr); 1131 const auto *SrcVD = 1132 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1133 const auto *DestVD = 1134 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1135 // Get the address of the private variable. 1136 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1137 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1138 PrivateAddr = 1139 Address(Builder.CreateLoad(PrivateAddr), 1140 getNaturalTypeAlignment(RefTy->getPointeeType())); 1141 // Store the last value to the private copy in the last iteration. 1142 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1143 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1144 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1145 (*IRef)->getExprLoc()); 1146 // Get the address of the original variable. 1147 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1148 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1149 } 1150 ++IRef; 1151 ++ISrcRef; 1152 ++IDestRef; 1153 } 1154 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1155 EmitIgnoredExpr(PostUpdate); 1156 } 1157 if (IsLastIterCond) 1158 EmitBlock(DoneBB, /*IsFinished=*/true); 1159 } 1160 1161 void CodeGenFunction::EmitOMPReductionClauseInit( 1162 const OMPExecutableDirective &D, 1163 CodeGenFunction::OMPPrivateScope &PrivateScope) { 1164 if (!HaveInsertPoint()) 1165 return; 1166 SmallVector<const Expr *, 4> Shareds; 1167 SmallVector<const Expr *, 4> Privates; 1168 SmallVector<const Expr *, 4> ReductionOps; 1169 SmallVector<const Expr *, 4> LHSs; 1170 SmallVector<const Expr *, 4> RHSs; 1171 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1172 auto IPriv = C->privates().begin(); 1173 auto IRed = C->reduction_ops().begin(); 1174 auto ILHS = C->lhs_exprs().begin(); 1175 auto IRHS = C->rhs_exprs().begin(); 1176 for (const Expr *Ref : C->varlists()) { 1177 Shareds.emplace_back(Ref); 1178 Privates.emplace_back(*IPriv); 1179 ReductionOps.emplace_back(*IRed); 1180 LHSs.emplace_back(*ILHS); 1181 RHSs.emplace_back(*IRHS); 1182 std::advance(IPriv, 1); 1183 std::advance(IRed, 1); 1184 std::advance(ILHS, 1); 1185 std::advance(IRHS, 1); 1186 } 1187 } 1188 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps); 1189 unsigned Count = 0; 1190 auto ILHS = LHSs.begin(); 1191 auto IRHS = RHSs.begin(); 1192 auto IPriv = Privates.begin(); 1193 for (const Expr *IRef : Shareds) { 1194 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1195 // Emit private VarDecl with reduction init. 1196 RedCG.emitSharedLValue(*this, Count); 1197 RedCG.emitAggregateType(*this, Count); 1198 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1199 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1200 RedCG.getSharedLValue(Count), 1201 [&Emission](CodeGenFunction &CGF) { 1202 CGF.EmitAutoVarInit(Emission); 1203 return true; 1204 }); 1205 EmitAutoVarCleanups(Emission); 1206 Address BaseAddr = RedCG.adjustPrivateAddress( 1207 *this, Count, Emission.getAllocatedAddress()); 1208 bool IsRegistered = PrivateScope.addPrivate( 1209 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1210 assert(IsRegistered && "private var already registered as private"); 1211 // Silence the warning about unused variable. 1212 (void)IsRegistered; 1213 1214 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1215 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1216 QualType Type = PrivateVD->getType(); 1217 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1218 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1219 // Store the address of the original variable associated with the LHS 1220 // implicit variable. 1221 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1222 return RedCG.getSharedLValue(Count).getAddress(*this); 1223 }); 1224 PrivateScope.addPrivate( 1225 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1226 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1227 isa<ArraySubscriptExpr>(IRef)) { 1228 // Store the address of the original variable associated with the LHS 1229 // implicit variable. 1230 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1231 return RedCG.getSharedLValue(Count).getAddress(*this); 1232 }); 1233 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1234 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1235 ConvertTypeForMem(RHSVD->getType()), 1236 "rhs.begin"); 1237 }); 1238 } else { 1239 QualType Type = PrivateVD->getType(); 1240 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1241 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1242 // Store the address of the original variable associated with the LHS 1243 // implicit variable. 1244 if (IsArray) { 1245 OriginalAddr = Builder.CreateElementBitCast( 1246 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1247 } 1248 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1249 PrivateScope.addPrivate( 1250 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1251 return IsArray 1252 ? Builder.CreateElementBitCast( 1253 GetAddrOfLocalVar(PrivateVD), 1254 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1255 : GetAddrOfLocalVar(PrivateVD); 1256 }); 1257 } 1258 ++ILHS; 1259 ++IRHS; 1260 ++IPriv; 1261 ++Count; 1262 } 1263 } 1264 1265 void CodeGenFunction::EmitOMPReductionClauseFinal( 1266 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1267 if (!HaveInsertPoint()) 1268 return; 1269 llvm::SmallVector<const Expr *, 8> Privates; 1270 llvm::SmallVector<const Expr *, 8> LHSExprs; 1271 llvm::SmallVector<const Expr *, 8> RHSExprs; 1272 llvm::SmallVector<const Expr *, 8> ReductionOps; 1273 bool HasAtLeastOneReduction = false; 1274 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1275 HasAtLeastOneReduction = true; 1276 Privates.append(C->privates().begin(), C->privates().end()); 1277 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1278 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1279 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1280 } 1281 if (HasAtLeastOneReduction) { 1282 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1283 isOpenMPParallelDirective(D.getDirectiveKind()) || 1284 ReductionKind == OMPD_simd; 1285 bool SimpleReduction = ReductionKind == OMPD_simd; 1286 // Emit nowait reduction if nowait clause is present or directive is a 1287 // parallel directive (it always has implicit barrier). 1288 CGM.getOpenMPRuntime().emitReduction( 1289 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1290 {WithNowait, SimpleReduction, ReductionKind}); 1291 } 1292 } 1293 1294 static void emitPostUpdateForReductionClause( 1295 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1296 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1297 if (!CGF.HaveInsertPoint()) 1298 return; 1299 llvm::BasicBlock *DoneBB = nullptr; 1300 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1301 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1302 if (!DoneBB) { 1303 if (llvm::Value *Cond = CondGen(CGF)) { 1304 // If the first post-update expression is found, emit conditional 1305 // block if it was requested. 1306 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1307 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1308 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1309 CGF.EmitBlock(ThenBB); 1310 } 1311 } 1312 CGF.EmitIgnoredExpr(PostUpdate); 1313 } 1314 } 1315 if (DoneBB) 1316 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1317 } 1318 1319 namespace { 1320 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1321 /// parallel function. This is necessary for combined constructs such as 1322 /// 'distribute parallel for' 1323 typedef llvm::function_ref<void(CodeGenFunction &, 1324 const OMPExecutableDirective &, 1325 llvm::SmallVectorImpl<llvm::Value *> &)> 1326 CodeGenBoundParametersTy; 1327 } // anonymous namespace 1328 1329 static void 1330 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1331 const OMPExecutableDirective &S) { 1332 if (CGF.getLangOpts().OpenMP < 50) 1333 return; 1334 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1335 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1336 for (const Expr *Ref : C->varlists()) { 1337 if (!Ref->getType()->isScalarType()) 1338 continue; 1339 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1340 if (!DRE) 1341 continue; 1342 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1343 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1344 } 1345 } 1346 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1347 for (const Expr *Ref : C->varlists()) { 1348 if (!Ref->getType()->isScalarType()) 1349 continue; 1350 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1351 if (!DRE) 1352 continue; 1353 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1354 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1355 } 1356 } 1357 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1358 for (const Expr *Ref : C->varlists()) { 1359 if (!Ref->getType()->isScalarType()) 1360 continue; 1361 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1362 if (!DRE) 1363 continue; 1364 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1365 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1366 } 1367 } 1368 // Privates should ne analyzed since they are not captured at all. 1369 // Task reductions may be skipped - tasks are ignored. 1370 // Firstprivates do not return value but may be passed by reference - no need 1371 // to check for updated lastprivate conditional. 1372 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1373 for (const Expr *Ref : C->varlists()) { 1374 if (!Ref->getType()->isScalarType()) 1375 continue; 1376 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1377 if (!DRE) 1378 continue; 1379 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1380 } 1381 } 1382 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1383 CGF, S, PrivateDecls); 1384 } 1385 1386 static void emitCommonOMPParallelDirective( 1387 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1388 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1389 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1390 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1391 llvm::Function *OutlinedFn = 1392 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1393 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1394 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1395 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1396 llvm::Value *NumThreads = 1397 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1398 /*IgnoreResultAssign=*/true); 1399 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1400 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1401 } 1402 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1403 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1404 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1405 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1406 } 1407 const Expr *IfCond = nullptr; 1408 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1409 if (C->getNameModifier() == OMPD_unknown || 1410 C->getNameModifier() == OMPD_parallel) { 1411 IfCond = C->getCondition(); 1412 break; 1413 } 1414 } 1415 1416 OMPParallelScope Scope(CGF, S); 1417 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1418 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1419 // lower and upper bounds with the pragma 'for' chunking mechanism. 1420 // The following lambda takes care of appending the lower and upper bound 1421 // parameters when necessary 1422 CodeGenBoundParameters(CGF, S, CapturedVars); 1423 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1424 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1425 CapturedVars, IfCond); 1426 } 1427 1428 static void emitEmptyBoundParameters(CodeGenFunction &, 1429 const OMPExecutableDirective &, 1430 llvm::SmallVectorImpl<llvm::Value *> &) {} 1431 1432 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1433 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1434 // Check if we have any if clause associated with the directive. 1435 llvm::Value *IfCond = nullptr; 1436 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1437 IfCond = EmitScalarExpr(C->getCondition(), 1438 /*IgnoreResultAssign=*/true); 1439 1440 llvm::Value *NumThreads = nullptr; 1441 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1442 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1443 /*IgnoreResultAssign=*/true); 1444 1445 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1446 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1447 ProcBind = ProcBindClause->getProcBindKind(); 1448 1449 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1450 1451 // The cleanup callback that finalizes all variabels at the given location, 1452 // thus calls destructors etc. 1453 auto FiniCB = [this](InsertPointTy IP) { 1454 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1455 }; 1456 1457 // Privatization callback that performs appropriate action for 1458 // shared/private/firstprivate/lastprivate/copyin/... variables. 1459 // 1460 // TODO: This defaults to shared right now. 1461 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1462 llvm::Value &Val, llvm::Value *&ReplVal) { 1463 // The next line is appropriate only for variables (Val) with the 1464 // data-sharing attribute "shared". 1465 ReplVal = &Val; 1466 1467 return CodeGenIP; 1468 }; 1469 1470 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1471 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1472 1473 auto BodyGenCB = [ParallelRegionBodyStmt, 1474 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1475 llvm::BasicBlock &ContinuationBB) { 1476 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1477 ContinuationBB); 1478 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1479 CodeGenIP, ContinuationBB); 1480 }; 1481 1482 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1483 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1484 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1485 FiniCB, IfCond, NumThreads, 1486 ProcBind, S.hasCancel())); 1487 return; 1488 } 1489 1490 // Emit parallel region as a standalone region. 1491 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1492 Action.Enter(CGF); 1493 OMPPrivateScope PrivateScope(CGF); 1494 bool Copyins = CGF.EmitOMPCopyinClause(S); 1495 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1496 if (Copyins) { 1497 // Emit implicit barrier to synchronize threads and avoid data races on 1498 // propagation master's thread values of threadprivate variables to local 1499 // instances of that variables of all other implicit threads. 1500 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1501 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1502 /*ForceSimpleCall=*/true); 1503 } 1504 CGF.EmitOMPPrivateClause(S, PrivateScope); 1505 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1506 (void)PrivateScope.Privatize(); 1507 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1508 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1509 }; 1510 { 1511 auto LPCRegion = 1512 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1513 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1514 emitEmptyBoundParameters); 1515 emitPostUpdateForReductionClause(*this, S, 1516 [](CodeGenFunction &) { return nullptr; }); 1517 } 1518 // Check for outer lastprivate conditional update. 1519 checkForLastprivateConditionalUpdate(*this, S); 1520 } 1521 1522 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1523 int MaxLevel, int Level = 0) { 1524 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1525 const Stmt *SimplifiedS = S->IgnoreContainers(); 1526 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1527 PrettyStackTraceLoc CrashInfo( 1528 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1529 "LLVM IR generation of compound statement ('{}')"); 1530 1531 // Keep track of the current cleanup stack depth, including debug scopes. 1532 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1533 for (const Stmt *CurStmt : CS->body()) 1534 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1535 return; 1536 } 1537 if (SimplifiedS == NextLoop) { 1538 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1539 S = For->getBody(); 1540 } else { 1541 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1542 "Expected canonical for loop or range-based for loop."); 1543 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1544 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1545 S = CXXFor->getBody(); 1546 } 1547 if (Level + 1 < MaxLevel) { 1548 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1549 S, /*TryImperfectlyNestedLoops=*/true); 1550 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1551 return; 1552 } 1553 } 1554 CGF.EmitStmt(S); 1555 } 1556 1557 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1558 JumpDest LoopExit) { 1559 RunCleanupsScope BodyScope(*this); 1560 // Update counters values on current iteration. 1561 for (const Expr *UE : D.updates()) 1562 EmitIgnoredExpr(UE); 1563 // Update the linear variables. 1564 // In distribute directives only loop counters may be marked as linear, no 1565 // need to generate the code for them. 1566 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1567 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1568 for (const Expr *UE : C->updates()) 1569 EmitIgnoredExpr(UE); 1570 } 1571 } 1572 1573 // On a continue in the body, jump to the end. 1574 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1575 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1576 for (const Expr *E : D.finals_conditions()) { 1577 if (!E) 1578 continue; 1579 // Check that loop counter in non-rectangular nest fits into the iteration 1580 // space. 1581 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1582 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1583 getProfileCount(D.getBody())); 1584 EmitBlock(NextBB); 1585 } 1586 // Emit loop variables for C++ range loops. 1587 const Stmt *Body = 1588 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1589 // Emit loop body. 1590 emitBody(*this, Body, 1591 OMPLoopDirective::tryToFindNextInnerLoop( 1592 Body, /*TryImperfectlyNestedLoops=*/true), 1593 D.getCollapsedNumber()); 1594 1595 // The end (updates/cleanups). 1596 EmitBlock(Continue.getBlock()); 1597 BreakContinueStack.pop_back(); 1598 } 1599 1600 void CodeGenFunction::EmitOMPInnerLoop( 1601 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 1602 const Expr *IncExpr, 1603 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1604 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1605 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1606 1607 // Start the loop with a block that tests the condition. 1608 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1609 EmitBlock(CondBlock); 1610 const SourceRange R = S.getSourceRange(); 1611 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1612 SourceLocToDebugLoc(R.getEnd())); 1613 1614 // If there are any cleanups between here and the loop-exit scope, 1615 // create a block to stage a loop exit along. 1616 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1617 if (RequiresCleanup) 1618 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1619 1620 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1621 1622 // Emit condition. 1623 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1624 if (ExitBlock != LoopExit.getBlock()) { 1625 EmitBlock(ExitBlock); 1626 EmitBranchThroughCleanup(LoopExit); 1627 } 1628 1629 EmitBlock(LoopBody); 1630 incrementProfileCounter(&S); 1631 1632 // Create a block for the increment. 1633 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1634 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1635 1636 BodyGen(*this); 1637 1638 // Emit "IV = IV + 1" and a back-edge to the condition block. 1639 EmitBlock(Continue.getBlock()); 1640 EmitIgnoredExpr(IncExpr); 1641 PostIncGen(*this); 1642 BreakContinueStack.pop_back(); 1643 EmitBranch(CondBlock); 1644 LoopStack.pop(); 1645 // Emit the fall-through block. 1646 EmitBlock(LoopExit.getBlock()); 1647 } 1648 1649 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1650 if (!HaveInsertPoint()) 1651 return false; 1652 // Emit inits for the linear variables. 1653 bool HasLinears = false; 1654 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1655 for (const Expr *Init : C->inits()) { 1656 HasLinears = true; 1657 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1658 if (const auto *Ref = 1659 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1660 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1661 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1662 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1663 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1664 VD->getInit()->getType(), VK_LValue, 1665 VD->getInit()->getExprLoc()); 1666 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1667 VD->getType()), 1668 /*capturedByInit=*/false); 1669 EmitAutoVarCleanups(Emission); 1670 } else { 1671 EmitVarDecl(*VD); 1672 } 1673 } 1674 // Emit the linear steps for the linear clauses. 1675 // If a step is not constant, it is pre-calculated before the loop. 1676 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1677 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1678 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1679 // Emit calculation of the linear step. 1680 EmitIgnoredExpr(CS); 1681 } 1682 } 1683 return HasLinears; 1684 } 1685 1686 void CodeGenFunction::EmitOMPLinearClauseFinal( 1687 const OMPLoopDirective &D, 1688 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1689 if (!HaveInsertPoint()) 1690 return; 1691 llvm::BasicBlock *DoneBB = nullptr; 1692 // Emit the final values of the linear variables. 1693 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1694 auto IC = C->varlist_begin(); 1695 for (const Expr *F : C->finals()) { 1696 if (!DoneBB) { 1697 if (llvm::Value *Cond = CondGen(*this)) { 1698 // If the first post-update expression is found, emit conditional 1699 // block if it was requested. 1700 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1701 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1702 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1703 EmitBlock(ThenBB); 1704 } 1705 } 1706 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1707 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1708 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1709 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1710 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1711 CodeGenFunction::OMPPrivateScope VarScope(*this); 1712 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1713 (void)VarScope.Privatize(); 1714 EmitIgnoredExpr(F); 1715 ++IC; 1716 } 1717 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1718 EmitIgnoredExpr(PostUpdate); 1719 } 1720 if (DoneBB) 1721 EmitBlock(DoneBB, /*IsFinished=*/true); 1722 } 1723 1724 static void emitAlignedClause(CodeGenFunction &CGF, 1725 const OMPExecutableDirective &D) { 1726 if (!CGF.HaveInsertPoint()) 1727 return; 1728 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1729 llvm::APInt ClauseAlignment(64, 0); 1730 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1731 auto *AlignmentCI = 1732 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1733 ClauseAlignment = AlignmentCI->getValue(); 1734 } 1735 for (const Expr *E : Clause->varlists()) { 1736 llvm::APInt Alignment(ClauseAlignment); 1737 if (Alignment == 0) { 1738 // OpenMP [2.8.1, Description] 1739 // If no optional parameter is specified, implementation-defined default 1740 // alignments for SIMD instructions on the target platforms are assumed. 1741 Alignment = 1742 CGF.getContext() 1743 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1744 E->getType()->getPointeeType())) 1745 .getQuantity(); 1746 } 1747 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1748 "alignment is not power of 2"); 1749 if (Alignment != 0) { 1750 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1751 CGF.emitAlignmentAssumption( 1752 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1753 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1754 } 1755 } 1756 } 1757 } 1758 1759 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1760 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1761 if (!HaveInsertPoint()) 1762 return; 1763 auto I = S.private_counters().begin(); 1764 for (const Expr *E : S.counters()) { 1765 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1766 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1767 // Emit var without initialization. 1768 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1769 EmitAutoVarCleanups(VarEmission); 1770 LocalDeclMap.erase(PrivateVD); 1771 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1772 return VarEmission.getAllocatedAddress(); 1773 }); 1774 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1775 VD->hasGlobalStorage()) { 1776 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1777 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1778 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1779 E->getType(), VK_LValue, E->getExprLoc()); 1780 return EmitLValue(&DRE).getAddress(*this); 1781 }); 1782 } else { 1783 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1784 return VarEmission.getAllocatedAddress(); 1785 }); 1786 } 1787 ++I; 1788 } 1789 // Privatize extra loop counters used in loops for ordered(n) clauses. 1790 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1791 if (!C->getNumForLoops()) 1792 continue; 1793 for (unsigned I = S.getCollapsedNumber(), 1794 E = C->getLoopNumIterations().size(); 1795 I < E; ++I) { 1796 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1797 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1798 // Override only those variables that can be captured to avoid re-emission 1799 // of the variables declared within the loops. 1800 if (DRE->refersToEnclosingVariableOrCapture()) { 1801 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1802 return CreateMemTemp(DRE->getType(), VD->getName()); 1803 }); 1804 } 1805 } 1806 } 1807 } 1808 1809 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1810 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1811 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1812 if (!CGF.HaveInsertPoint()) 1813 return; 1814 { 1815 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1816 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1817 (void)PreCondScope.Privatize(); 1818 // Get initial values of real counters. 1819 for (const Expr *I : S.inits()) { 1820 CGF.EmitIgnoredExpr(I); 1821 } 1822 } 1823 // Create temp loop control variables with their init values to support 1824 // non-rectangular loops. 1825 CodeGenFunction::OMPMapVars PreCondVars; 1826 for (const Expr * E: S.dependent_counters()) { 1827 if (!E) 1828 continue; 1829 assert(!E->getType().getNonReferenceType()->isRecordType() && 1830 "dependent counter must not be an iterator."); 1831 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1832 Address CounterAddr = 1833 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 1834 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 1835 } 1836 (void)PreCondVars.apply(CGF); 1837 for (const Expr *E : S.dependent_inits()) { 1838 if (!E) 1839 continue; 1840 CGF.EmitIgnoredExpr(E); 1841 } 1842 // Check that loop is executed at least one time. 1843 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 1844 PreCondVars.restore(CGF); 1845 } 1846 1847 void CodeGenFunction::EmitOMPLinearClause( 1848 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 1849 if (!HaveInsertPoint()) 1850 return; 1851 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1852 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1853 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1854 for (const Expr *C : LoopDirective->counters()) { 1855 SIMDLCVs.insert( 1856 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1857 } 1858 } 1859 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1860 auto CurPrivate = C->privates().begin(); 1861 for (const Expr *E : C->varlists()) { 1862 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1863 const auto *PrivateVD = 1864 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 1865 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 1866 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 1867 // Emit private VarDecl with copy init. 1868 EmitVarDecl(*PrivateVD); 1869 return GetAddrOfLocalVar(PrivateVD); 1870 }); 1871 assert(IsRegistered && "linear var already registered as private"); 1872 // Silence the warning about unused variable. 1873 (void)IsRegistered; 1874 } else { 1875 EmitVarDecl(*PrivateVD); 1876 } 1877 ++CurPrivate; 1878 } 1879 } 1880 } 1881 1882 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 1883 const OMPExecutableDirective &D, 1884 bool IsMonotonic) { 1885 if (!CGF.HaveInsertPoint()) 1886 return; 1887 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 1888 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 1889 /*ignoreResult=*/true); 1890 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1891 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1892 // In presence of finite 'safelen', it may be unsafe to mark all 1893 // the memory instructions parallel, because loop-carried 1894 // dependences of 'safelen' iterations are possible. 1895 if (!IsMonotonic) 1896 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 1897 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 1898 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 1899 /*ignoreResult=*/true); 1900 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1901 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1902 // In presence of finite 'safelen', it may be unsafe to mark all 1903 // the memory instructions parallel, because loop-carried 1904 // dependences of 'safelen' iterations are possible. 1905 CGF.LoopStack.setParallel(/*Enable=*/false); 1906 } 1907 } 1908 1909 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 1910 bool IsMonotonic) { 1911 // Walk clauses and process safelen/lastprivate. 1912 LoopStack.setParallel(!IsMonotonic); 1913 LoopStack.setVectorizeEnable(); 1914 emitSimdlenSafelenClause(*this, D, IsMonotonic); 1915 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 1916 if (C->getKind() == OMPC_ORDER_concurrent) 1917 LoopStack.setParallel(/*Enable=*/true); 1918 } 1919 1920 void CodeGenFunction::EmitOMPSimdFinal( 1921 const OMPLoopDirective &D, 1922 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1923 if (!HaveInsertPoint()) 1924 return; 1925 llvm::BasicBlock *DoneBB = nullptr; 1926 auto IC = D.counters().begin(); 1927 auto IPC = D.private_counters().begin(); 1928 for (const Expr *F : D.finals()) { 1929 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 1930 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 1931 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 1932 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 1933 OrigVD->hasGlobalStorage() || CED) { 1934 if (!DoneBB) { 1935 if (llvm::Value *Cond = CondGen(*this)) { 1936 // If the first post-update expression is found, emit conditional 1937 // block if it was requested. 1938 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 1939 DoneBB = createBasicBlock(".omp.final.done"); 1940 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1941 EmitBlock(ThenBB); 1942 } 1943 } 1944 Address OrigAddr = Address::invalid(); 1945 if (CED) { 1946 OrigAddr = 1947 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 1948 } else { 1949 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 1950 /*RefersToEnclosingVariableOrCapture=*/false, 1951 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 1952 OrigAddr = EmitLValue(&DRE).getAddress(*this); 1953 } 1954 OMPPrivateScope VarScope(*this); 1955 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1956 (void)VarScope.Privatize(); 1957 EmitIgnoredExpr(F); 1958 } 1959 ++IC; 1960 ++IPC; 1961 } 1962 if (DoneBB) 1963 EmitBlock(DoneBB, /*IsFinished=*/true); 1964 } 1965 1966 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 1967 const OMPLoopDirective &S, 1968 CodeGenFunction::JumpDest LoopExit) { 1969 CGF.EmitOMPLoopBody(S, LoopExit); 1970 CGF.EmitStopPoint(&S); 1971 } 1972 1973 /// Emit a helper variable and return corresponding lvalue. 1974 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1975 const DeclRefExpr *Helper) { 1976 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1977 CGF.EmitVarDecl(*VDecl); 1978 return CGF.EmitLValue(Helper); 1979 } 1980 1981 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 1982 const RegionCodeGenTy &SimdInitGen, 1983 const RegionCodeGenTy &BodyCodeGen) { 1984 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 1985 PrePostActionTy &) { 1986 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 1987 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 1988 SimdInitGen(CGF); 1989 1990 BodyCodeGen(CGF); 1991 }; 1992 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 1993 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 1994 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 1995 1996 BodyCodeGen(CGF); 1997 }; 1998 const Expr *IfCond = nullptr; 1999 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2000 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2001 if (CGF.getLangOpts().OpenMP >= 50 && 2002 (C->getNameModifier() == OMPD_unknown || 2003 C->getNameModifier() == OMPD_simd)) { 2004 IfCond = C->getCondition(); 2005 break; 2006 } 2007 } 2008 } 2009 if (IfCond) { 2010 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2011 } else { 2012 RegionCodeGenTy ThenRCG(ThenGen); 2013 ThenRCG(CGF); 2014 } 2015 } 2016 2017 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2018 PrePostActionTy &Action) { 2019 Action.Enter(CGF); 2020 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2021 "Expected simd directive"); 2022 OMPLoopScope PreInitScope(CGF, S); 2023 // if (PreCond) { 2024 // for (IV in 0..LastIteration) BODY; 2025 // <Final counter/linear vars updates>; 2026 // } 2027 // 2028 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2029 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2030 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2031 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2032 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2033 } 2034 2035 // Emit: if (PreCond) - begin. 2036 // If the condition constant folds and can be elided, avoid emitting the 2037 // whole loop. 2038 bool CondConstant; 2039 llvm::BasicBlock *ContBlock = nullptr; 2040 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2041 if (!CondConstant) 2042 return; 2043 } else { 2044 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2045 ContBlock = CGF.createBasicBlock("simd.if.end"); 2046 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2047 CGF.getProfileCount(&S)); 2048 CGF.EmitBlock(ThenBlock); 2049 CGF.incrementProfileCounter(&S); 2050 } 2051 2052 // Emit the loop iteration variable. 2053 const Expr *IVExpr = S.getIterationVariable(); 2054 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2055 CGF.EmitVarDecl(*IVDecl); 2056 CGF.EmitIgnoredExpr(S.getInit()); 2057 2058 // Emit the iterations count variable. 2059 // If it is not a variable, Sema decided to calculate iterations count on 2060 // each iteration (e.g., it is foldable into a constant). 2061 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2062 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2063 // Emit calculation of the iterations count. 2064 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2065 } 2066 2067 emitAlignedClause(CGF, S); 2068 (void)CGF.EmitOMPLinearClauseInit(S); 2069 { 2070 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2071 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2072 CGF.EmitOMPLinearClause(S, LoopScope); 2073 CGF.EmitOMPPrivateClause(S, LoopScope); 2074 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2075 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2076 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2077 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2078 (void)LoopScope.Privatize(); 2079 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2080 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2081 2082 emitCommonSimdLoop( 2083 CGF, S, 2084 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2085 CGF.EmitOMPSimdInit(S); 2086 }, 2087 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2088 CGF.EmitOMPInnerLoop( 2089 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2090 [&S](CodeGenFunction &CGF) { 2091 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 2092 CGF.EmitStopPoint(&S); 2093 }, 2094 [](CodeGenFunction &) {}); 2095 }); 2096 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2097 // Emit final copy of the lastprivate variables at the end of loops. 2098 if (HasLastprivateClause) 2099 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2100 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2101 emitPostUpdateForReductionClause(CGF, S, 2102 [](CodeGenFunction &) { return nullptr; }); 2103 } 2104 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2105 // Emit: if (PreCond) - end. 2106 if (ContBlock) { 2107 CGF.EmitBranch(ContBlock); 2108 CGF.EmitBlock(ContBlock, true); 2109 } 2110 } 2111 2112 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2113 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2114 emitOMPSimdRegion(CGF, S, Action); 2115 }; 2116 { 2117 auto LPCRegion = 2118 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2119 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2120 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2121 } 2122 // Check for outer lastprivate conditional update. 2123 checkForLastprivateConditionalUpdate(*this, S); 2124 } 2125 2126 void CodeGenFunction::EmitOMPOuterLoop( 2127 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2128 CodeGenFunction::OMPPrivateScope &LoopScope, 2129 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2130 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2131 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2132 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2133 2134 const Expr *IVExpr = S.getIterationVariable(); 2135 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2136 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2137 2138 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2139 2140 // Start the loop with a block that tests the condition. 2141 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2142 EmitBlock(CondBlock); 2143 const SourceRange R = S.getSourceRange(); 2144 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2145 SourceLocToDebugLoc(R.getEnd())); 2146 2147 llvm::Value *BoolCondVal = nullptr; 2148 if (!DynamicOrOrdered) { 2149 // UB = min(UB, GlobalUB) or 2150 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2151 // 'distribute parallel for') 2152 EmitIgnoredExpr(LoopArgs.EUB); 2153 // IV = LB 2154 EmitIgnoredExpr(LoopArgs.Init); 2155 // IV < UB 2156 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2157 } else { 2158 BoolCondVal = 2159 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2160 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2161 } 2162 2163 // If there are any cleanups between here and the loop-exit scope, 2164 // create a block to stage a loop exit along. 2165 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2166 if (LoopScope.requiresCleanups()) 2167 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2168 2169 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2170 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2171 if (ExitBlock != LoopExit.getBlock()) { 2172 EmitBlock(ExitBlock); 2173 EmitBranchThroughCleanup(LoopExit); 2174 } 2175 EmitBlock(LoopBody); 2176 2177 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2178 // LB for loop condition and emitted it above). 2179 if (DynamicOrOrdered) 2180 EmitIgnoredExpr(LoopArgs.Init); 2181 2182 // Create a block for the increment. 2183 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2184 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2185 2186 emitCommonSimdLoop( 2187 *this, S, 2188 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2189 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2190 // with dynamic/guided scheduling and without ordered clause. 2191 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2192 CGF.LoopStack.setParallel(!IsMonotonic); 2193 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2194 if (C->getKind() == OMPC_ORDER_concurrent) 2195 CGF.LoopStack.setParallel(/*Enable=*/true); 2196 } else { 2197 CGF.EmitOMPSimdInit(S, IsMonotonic); 2198 } 2199 }, 2200 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2201 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2202 SourceLocation Loc = S.getBeginLoc(); 2203 // when 'distribute' is not combined with a 'for': 2204 // while (idx <= UB) { BODY; ++idx; } 2205 // when 'distribute' is combined with a 'for' 2206 // (e.g. 'distribute parallel for') 2207 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2208 CGF.EmitOMPInnerLoop( 2209 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2210 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2211 CodeGenLoop(CGF, S, LoopExit); 2212 }, 2213 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2214 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2215 }); 2216 }); 2217 2218 EmitBlock(Continue.getBlock()); 2219 BreakContinueStack.pop_back(); 2220 if (!DynamicOrOrdered) { 2221 // Emit "LB = LB + Stride", "UB = UB + Stride". 2222 EmitIgnoredExpr(LoopArgs.NextLB); 2223 EmitIgnoredExpr(LoopArgs.NextUB); 2224 } 2225 2226 EmitBranch(CondBlock); 2227 LoopStack.pop(); 2228 // Emit the fall-through block. 2229 EmitBlock(LoopExit.getBlock()); 2230 2231 // Tell the runtime we are done. 2232 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2233 if (!DynamicOrOrdered) 2234 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2235 S.getDirectiveKind()); 2236 }; 2237 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2238 } 2239 2240 void CodeGenFunction::EmitOMPForOuterLoop( 2241 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2242 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2243 const OMPLoopArguments &LoopArgs, 2244 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2245 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2246 2247 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2248 const bool DynamicOrOrdered = 2249 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2250 2251 assert((Ordered || 2252 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2253 LoopArgs.Chunk != nullptr)) && 2254 "static non-chunked schedule does not need outer loop"); 2255 2256 // Emit outer loop. 2257 // 2258 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2259 // When schedule(dynamic,chunk_size) is specified, the iterations are 2260 // distributed to threads in the team in chunks as the threads request them. 2261 // Each thread executes a chunk of iterations, then requests another chunk, 2262 // until no chunks remain to be distributed. Each chunk contains chunk_size 2263 // iterations, except for the last chunk to be distributed, which may have 2264 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2265 // 2266 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2267 // to threads in the team in chunks as the executing threads request them. 2268 // Each thread executes a chunk of iterations, then requests another chunk, 2269 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2270 // each chunk is proportional to the number of unassigned iterations divided 2271 // by the number of threads in the team, decreasing to 1. For a chunk_size 2272 // with value k (greater than 1), the size of each chunk is determined in the 2273 // same way, with the restriction that the chunks do not contain fewer than k 2274 // iterations (except for the last chunk to be assigned, which may have fewer 2275 // than k iterations). 2276 // 2277 // When schedule(auto) is specified, the decision regarding scheduling is 2278 // delegated to the compiler and/or runtime system. The programmer gives the 2279 // implementation the freedom to choose any possible mapping of iterations to 2280 // threads in the team. 2281 // 2282 // When schedule(runtime) is specified, the decision regarding scheduling is 2283 // deferred until run time, and the schedule and chunk size are taken from the 2284 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2285 // implementation defined 2286 // 2287 // while(__kmpc_dispatch_next(&LB, &UB)) { 2288 // idx = LB; 2289 // while (idx <= UB) { BODY; ++idx; 2290 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2291 // } // inner loop 2292 // } 2293 // 2294 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2295 // When schedule(static, chunk_size) is specified, iterations are divided into 2296 // chunks of size chunk_size, and the chunks are assigned to the threads in 2297 // the team in a round-robin fashion in the order of the thread number. 2298 // 2299 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2300 // while (idx <= UB) { BODY; ++idx; } // inner loop 2301 // LB = LB + ST; 2302 // UB = UB + ST; 2303 // } 2304 // 2305 2306 const Expr *IVExpr = S.getIterationVariable(); 2307 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2308 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2309 2310 if (DynamicOrOrdered) { 2311 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2312 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2313 llvm::Value *LBVal = DispatchBounds.first; 2314 llvm::Value *UBVal = DispatchBounds.second; 2315 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2316 LoopArgs.Chunk}; 2317 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2318 IVSigned, Ordered, DipatchRTInputValues); 2319 } else { 2320 CGOpenMPRuntime::StaticRTInput StaticInit( 2321 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2322 LoopArgs.ST, LoopArgs.Chunk); 2323 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2324 ScheduleKind, StaticInit); 2325 } 2326 2327 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2328 const unsigned IVSize, 2329 const bool IVSigned) { 2330 if (Ordered) { 2331 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2332 IVSigned); 2333 } 2334 }; 2335 2336 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2337 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2338 OuterLoopArgs.IncExpr = S.getInc(); 2339 OuterLoopArgs.Init = S.getInit(); 2340 OuterLoopArgs.Cond = S.getCond(); 2341 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2342 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2343 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2344 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2345 } 2346 2347 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2348 const unsigned IVSize, const bool IVSigned) {} 2349 2350 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2351 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2352 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2353 const CodeGenLoopTy &CodeGenLoopContent) { 2354 2355 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2356 2357 // Emit outer loop. 2358 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2359 // dynamic 2360 // 2361 2362 const Expr *IVExpr = S.getIterationVariable(); 2363 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2364 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2365 2366 CGOpenMPRuntime::StaticRTInput StaticInit( 2367 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2368 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2369 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2370 2371 // for combined 'distribute' and 'for' the increment expression of distribute 2372 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2373 Expr *IncExpr; 2374 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2375 IncExpr = S.getDistInc(); 2376 else 2377 IncExpr = S.getInc(); 2378 2379 // this routine is shared by 'omp distribute parallel for' and 2380 // 'omp distribute': select the right EUB expression depending on the 2381 // directive 2382 OMPLoopArguments OuterLoopArgs; 2383 OuterLoopArgs.LB = LoopArgs.LB; 2384 OuterLoopArgs.UB = LoopArgs.UB; 2385 OuterLoopArgs.ST = LoopArgs.ST; 2386 OuterLoopArgs.IL = LoopArgs.IL; 2387 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2388 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2389 ? S.getCombinedEnsureUpperBound() 2390 : S.getEnsureUpperBound(); 2391 OuterLoopArgs.IncExpr = IncExpr; 2392 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2393 ? S.getCombinedInit() 2394 : S.getInit(); 2395 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2396 ? S.getCombinedCond() 2397 : S.getCond(); 2398 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2399 ? S.getCombinedNextLowerBound() 2400 : S.getNextLowerBound(); 2401 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2402 ? S.getCombinedNextUpperBound() 2403 : S.getNextUpperBound(); 2404 2405 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2406 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2407 emitEmptyOrdered); 2408 } 2409 2410 static std::pair<LValue, LValue> 2411 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2412 const OMPExecutableDirective &S) { 2413 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2414 LValue LB = 2415 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2416 LValue UB = 2417 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2418 2419 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2420 // parallel for') we need to use the 'distribute' 2421 // chunk lower and upper bounds rather than the whole loop iteration 2422 // space. These are parameters to the outlined function for 'parallel' 2423 // and we copy the bounds of the previous schedule into the 2424 // the current ones. 2425 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2426 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2427 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2428 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2429 PrevLBVal = CGF.EmitScalarConversion( 2430 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2431 LS.getIterationVariable()->getType(), 2432 LS.getPrevLowerBoundVariable()->getExprLoc()); 2433 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2434 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2435 PrevUBVal = CGF.EmitScalarConversion( 2436 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2437 LS.getIterationVariable()->getType(), 2438 LS.getPrevUpperBoundVariable()->getExprLoc()); 2439 2440 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2441 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2442 2443 return {LB, UB}; 2444 } 2445 2446 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2447 /// we need to use the LB and UB expressions generated by the worksharing 2448 /// code generation support, whereas in non combined situations we would 2449 /// just emit 0 and the LastIteration expression 2450 /// This function is necessary due to the difference of the LB and UB 2451 /// types for the RT emission routines for 'for_static_init' and 2452 /// 'for_dispatch_init' 2453 static std::pair<llvm::Value *, llvm::Value *> 2454 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2455 const OMPExecutableDirective &S, 2456 Address LB, Address UB) { 2457 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2458 const Expr *IVExpr = LS.getIterationVariable(); 2459 // when implementing a dynamic schedule for a 'for' combined with a 2460 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2461 // is not normalized as each team only executes its own assigned 2462 // distribute chunk 2463 QualType IteratorTy = IVExpr->getType(); 2464 llvm::Value *LBVal = 2465 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2466 llvm::Value *UBVal = 2467 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2468 return {LBVal, UBVal}; 2469 } 2470 2471 static void emitDistributeParallelForDistributeInnerBoundParams( 2472 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2473 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2474 const auto &Dir = cast<OMPLoopDirective>(S); 2475 LValue LB = 2476 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2477 llvm::Value *LBCast = 2478 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2479 CGF.SizeTy, /*isSigned=*/false); 2480 CapturedVars.push_back(LBCast); 2481 LValue UB = 2482 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2483 2484 llvm::Value *UBCast = 2485 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2486 CGF.SizeTy, /*isSigned=*/false); 2487 CapturedVars.push_back(UBCast); 2488 } 2489 2490 static void 2491 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2492 const OMPLoopDirective &S, 2493 CodeGenFunction::JumpDest LoopExit) { 2494 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2495 PrePostActionTy &Action) { 2496 Action.Enter(CGF); 2497 bool HasCancel = false; 2498 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2499 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2500 HasCancel = D->hasCancel(); 2501 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2502 HasCancel = D->hasCancel(); 2503 else if (const auto *D = 2504 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2505 HasCancel = D->hasCancel(); 2506 } 2507 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2508 HasCancel); 2509 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2510 emitDistributeParallelForInnerBounds, 2511 emitDistributeParallelForDispatchBounds); 2512 }; 2513 2514 emitCommonOMPParallelDirective( 2515 CGF, S, 2516 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2517 CGInlinedWorksharingLoop, 2518 emitDistributeParallelForDistributeInnerBoundParams); 2519 } 2520 2521 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2522 const OMPDistributeParallelForDirective &S) { 2523 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2524 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2525 S.getDistInc()); 2526 }; 2527 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2528 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2529 } 2530 2531 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2532 const OMPDistributeParallelForSimdDirective &S) { 2533 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2534 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2535 S.getDistInc()); 2536 }; 2537 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2538 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2539 } 2540 2541 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2542 const OMPDistributeSimdDirective &S) { 2543 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2544 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2545 }; 2546 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2547 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2548 } 2549 2550 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2551 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2552 // Emit SPMD target parallel for region as a standalone region. 2553 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2554 emitOMPSimdRegion(CGF, S, Action); 2555 }; 2556 llvm::Function *Fn; 2557 llvm::Constant *Addr; 2558 // Emit target region as a standalone region. 2559 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2560 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2561 assert(Fn && Addr && "Target device function emission failed."); 2562 } 2563 2564 void CodeGenFunction::EmitOMPTargetSimdDirective( 2565 const OMPTargetSimdDirective &S) { 2566 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2567 emitOMPSimdRegion(CGF, S, Action); 2568 }; 2569 emitCommonOMPTargetDirective(*this, S, CodeGen); 2570 } 2571 2572 namespace { 2573 struct ScheduleKindModifiersTy { 2574 OpenMPScheduleClauseKind Kind; 2575 OpenMPScheduleClauseModifier M1; 2576 OpenMPScheduleClauseModifier M2; 2577 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2578 OpenMPScheduleClauseModifier M1, 2579 OpenMPScheduleClauseModifier M2) 2580 : Kind(Kind), M1(M1), M2(M2) {} 2581 }; 2582 } // namespace 2583 2584 bool CodeGenFunction::EmitOMPWorksharingLoop( 2585 const OMPLoopDirective &S, Expr *EUB, 2586 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2587 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2588 // Emit the loop iteration variable. 2589 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2590 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2591 EmitVarDecl(*IVDecl); 2592 2593 // Emit the iterations count variable. 2594 // If it is not a variable, Sema decided to calculate iterations count on each 2595 // iteration (e.g., it is foldable into a constant). 2596 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2597 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2598 // Emit calculation of the iterations count. 2599 EmitIgnoredExpr(S.getCalcLastIteration()); 2600 } 2601 2602 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2603 2604 bool HasLastprivateClause; 2605 // Check pre-condition. 2606 { 2607 OMPLoopScope PreInitScope(*this, S); 2608 // Skip the entire loop if we don't meet the precondition. 2609 // If the condition constant folds and can be elided, avoid emitting the 2610 // whole loop. 2611 bool CondConstant; 2612 llvm::BasicBlock *ContBlock = nullptr; 2613 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2614 if (!CondConstant) 2615 return false; 2616 } else { 2617 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2618 ContBlock = createBasicBlock("omp.precond.end"); 2619 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2620 getProfileCount(&S)); 2621 EmitBlock(ThenBlock); 2622 incrementProfileCounter(&S); 2623 } 2624 2625 RunCleanupsScope DoacrossCleanupScope(*this); 2626 bool Ordered = false; 2627 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2628 if (OrderedClause->getNumForLoops()) 2629 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2630 else 2631 Ordered = true; 2632 } 2633 2634 llvm::DenseSet<const Expr *> EmittedFinals; 2635 emitAlignedClause(*this, S); 2636 bool HasLinears = EmitOMPLinearClauseInit(S); 2637 // Emit helper vars inits. 2638 2639 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2640 LValue LB = Bounds.first; 2641 LValue UB = Bounds.second; 2642 LValue ST = 2643 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2644 LValue IL = 2645 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2646 2647 // Emit 'then' code. 2648 { 2649 OMPPrivateScope LoopScope(*this); 2650 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2651 // Emit implicit barrier to synchronize threads and avoid data races on 2652 // initialization of firstprivate variables and post-update of 2653 // lastprivate variables. 2654 CGM.getOpenMPRuntime().emitBarrierCall( 2655 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2656 /*ForceSimpleCall=*/true); 2657 } 2658 EmitOMPPrivateClause(S, LoopScope); 2659 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2660 *this, S, EmitLValue(S.getIterationVariable())); 2661 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2662 EmitOMPReductionClauseInit(S, LoopScope); 2663 EmitOMPPrivateLoopCounters(S, LoopScope); 2664 EmitOMPLinearClause(S, LoopScope); 2665 (void)LoopScope.Privatize(); 2666 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2667 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2668 2669 // Detect the loop schedule kind and chunk. 2670 const Expr *ChunkExpr = nullptr; 2671 OpenMPScheduleTy ScheduleKind; 2672 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2673 ScheduleKind.Schedule = C->getScheduleKind(); 2674 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2675 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2676 ChunkExpr = C->getChunkSize(); 2677 } else { 2678 // Default behaviour for schedule clause. 2679 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2680 *this, S, ScheduleKind.Schedule, ChunkExpr); 2681 } 2682 bool HasChunkSizeOne = false; 2683 llvm::Value *Chunk = nullptr; 2684 if (ChunkExpr) { 2685 Chunk = EmitScalarExpr(ChunkExpr); 2686 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2687 S.getIterationVariable()->getType(), 2688 S.getBeginLoc()); 2689 Expr::EvalResult Result; 2690 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2691 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2692 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2693 } 2694 } 2695 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2696 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2697 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2698 // If the static schedule kind is specified or if the ordered clause is 2699 // specified, and if no monotonic modifier is specified, the effect will 2700 // be as if the monotonic modifier was specified. 2701 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2702 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2703 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2704 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2705 /* Chunked */ Chunk != nullptr) || 2706 StaticChunkedOne) && 2707 !Ordered) { 2708 JumpDest LoopExit = 2709 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2710 emitCommonSimdLoop( 2711 *this, S, 2712 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2713 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2714 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2715 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2716 if (C->getKind() == OMPC_ORDER_concurrent) 2717 CGF.LoopStack.setParallel(/*Enable=*/true); 2718 } 2719 }, 2720 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2721 &S, ScheduleKind, LoopExit, 2722 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2723 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2724 // When no chunk_size is specified, the iteration space is divided 2725 // into chunks that are approximately equal in size, and at most 2726 // one chunk is distributed to each thread. Note that the size of 2727 // the chunks is unspecified in this case. 2728 CGOpenMPRuntime::StaticRTInput StaticInit( 2729 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2730 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2731 StaticChunkedOne ? Chunk : nullptr); 2732 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2733 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2734 StaticInit); 2735 // UB = min(UB, GlobalUB); 2736 if (!StaticChunkedOne) 2737 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2738 // IV = LB; 2739 CGF.EmitIgnoredExpr(S.getInit()); 2740 // For unchunked static schedule generate: 2741 // 2742 // while (idx <= UB) { 2743 // BODY; 2744 // ++idx; 2745 // } 2746 // 2747 // For static schedule with chunk one: 2748 // 2749 // while (IV <= PrevUB) { 2750 // BODY; 2751 // IV += ST; 2752 // } 2753 CGF.EmitOMPInnerLoop( 2754 S, LoopScope.requiresCleanups(), 2755 StaticChunkedOne ? S.getCombinedParForInDistCond() 2756 : S.getCond(), 2757 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2758 [&S, LoopExit](CodeGenFunction &CGF) { 2759 CGF.EmitOMPLoopBody(S, LoopExit); 2760 CGF.EmitStopPoint(&S); 2761 }, 2762 [](CodeGenFunction &) {}); 2763 }); 2764 EmitBlock(LoopExit.getBlock()); 2765 // Tell the runtime we are done. 2766 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2767 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2768 S.getDirectiveKind()); 2769 }; 2770 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2771 } else { 2772 const bool IsMonotonic = 2773 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2774 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2775 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2776 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2777 // Emit the outer loop, which requests its work chunk [LB..UB] from 2778 // runtime and runs the inner loop to process it. 2779 const OMPLoopArguments LoopArguments( 2780 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2781 IL.getAddress(*this), Chunk, EUB); 2782 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2783 LoopArguments, CGDispatchBounds); 2784 } 2785 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2786 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2787 return CGF.Builder.CreateIsNotNull( 2788 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2789 }); 2790 } 2791 EmitOMPReductionClauseFinal( 2792 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2793 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2794 : /*Parallel only*/ OMPD_parallel); 2795 // Emit post-update of the reduction variables if IsLastIter != 0. 2796 emitPostUpdateForReductionClause( 2797 *this, S, [IL, &S](CodeGenFunction &CGF) { 2798 return CGF.Builder.CreateIsNotNull( 2799 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2800 }); 2801 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2802 if (HasLastprivateClause) 2803 EmitOMPLastprivateClauseFinal( 2804 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2805 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2806 } 2807 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2808 return CGF.Builder.CreateIsNotNull( 2809 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2810 }); 2811 DoacrossCleanupScope.ForceCleanup(); 2812 // We're now done with the loop, so jump to the continuation block. 2813 if (ContBlock) { 2814 EmitBranch(ContBlock); 2815 EmitBlock(ContBlock, /*IsFinished=*/true); 2816 } 2817 } 2818 return HasLastprivateClause; 2819 } 2820 2821 /// The following two functions generate expressions for the loop lower 2822 /// and upper bounds in case of static and dynamic (dispatch) schedule 2823 /// of the associated 'for' or 'distribute' loop. 2824 static std::pair<LValue, LValue> 2825 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 2826 const auto &LS = cast<OMPLoopDirective>(S); 2827 LValue LB = 2828 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2829 LValue UB = 2830 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2831 return {LB, UB}; 2832 } 2833 2834 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 2835 /// consider the lower and upper bound expressions generated by the 2836 /// worksharing loop support, but we use 0 and the iteration space size as 2837 /// constants 2838 static std::pair<llvm::Value *, llvm::Value *> 2839 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 2840 Address LB, Address UB) { 2841 const auto &LS = cast<OMPLoopDirective>(S); 2842 const Expr *IVExpr = LS.getIterationVariable(); 2843 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 2844 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 2845 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 2846 return {LBVal, UBVal}; 2847 } 2848 2849 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 2850 bool HasLastprivates = false; 2851 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2852 PrePostActionTy &) { 2853 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 2854 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2855 emitForLoopBounds, 2856 emitDispatchForLoopBounds); 2857 }; 2858 { 2859 auto LPCRegion = 2860 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2861 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2862 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 2863 S.hasCancel()); 2864 } 2865 2866 // Emit an implicit barrier at the end. 2867 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2868 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2869 // Check for outer lastprivate conditional update. 2870 checkForLastprivateConditionalUpdate(*this, S); 2871 } 2872 2873 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 2874 bool HasLastprivates = false; 2875 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2876 PrePostActionTy &) { 2877 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2878 emitForLoopBounds, 2879 emitDispatchForLoopBounds); 2880 }; 2881 { 2882 auto LPCRegion = 2883 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2884 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2885 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2886 } 2887 2888 // Emit an implicit barrier at the end. 2889 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2890 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2891 // Check for outer lastprivate conditional update. 2892 checkForLastprivateConditionalUpdate(*this, S); 2893 } 2894 2895 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 2896 const Twine &Name, 2897 llvm::Value *Init = nullptr) { 2898 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 2899 if (Init) 2900 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 2901 return LVal; 2902 } 2903 2904 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 2905 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 2906 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 2907 bool HasLastprivates = false; 2908 auto &&CodeGen = [&S, CapturedStmt, CS, 2909 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 2910 ASTContext &C = CGF.getContext(); 2911 QualType KmpInt32Ty = 2912 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 2913 // Emit helper vars inits. 2914 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 2915 CGF.Builder.getInt32(0)); 2916 llvm::ConstantInt *GlobalUBVal = CS != nullptr 2917 ? CGF.Builder.getInt32(CS->size() - 1) 2918 : CGF.Builder.getInt32(0); 2919 LValue UB = 2920 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 2921 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 2922 CGF.Builder.getInt32(1)); 2923 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 2924 CGF.Builder.getInt32(0)); 2925 // Loop counter. 2926 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 2927 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2928 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 2929 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2930 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 2931 // Generate condition for loop. 2932 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 2933 OK_Ordinary, S.getBeginLoc(), FPOptions()); 2934 // Increment for loop counter. 2935 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 2936 S.getBeginLoc(), true); 2937 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 2938 // Iterate through all sections and emit a switch construct: 2939 // switch (IV) { 2940 // case 0: 2941 // <SectionStmt[0]>; 2942 // break; 2943 // ... 2944 // case <NumSection> - 1: 2945 // <SectionStmt[<NumSection> - 1]>; 2946 // break; 2947 // } 2948 // .omp.sections.exit: 2949 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 2950 llvm::SwitchInst *SwitchStmt = 2951 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 2952 ExitBB, CS == nullptr ? 1 : CS->size()); 2953 if (CS) { 2954 unsigned CaseNumber = 0; 2955 for (const Stmt *SubStmt : CS->children()) { 2956 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2957 CGF.EmitBlock(CaseBB); 2958 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 2959 CGF.EmitStmt(SubStmt); 2960 CGF.EmitBranch(ExitBB); 2961 ++CaseNumber; 2962 } 2963 } else { 2964 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2965 CGF.EmitBlock(CaseBB); 2966 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 2967 CGF.EmitStmt(CapturedStmt); 2968 CGF.EmitBranch(ExitBB); 2969 } 2970 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2971 }; 2972 2973 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2974 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 2975 // Emit implicit barrier to synchronize threads and avoid data races on 2976 // initialization of firstprivate variables and post-update of lastprivate 2977 // variables. 2978 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 2979 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2980 /*ForceSimpleCall=*/true); 2981 } 2982 CGF.EmitOMPPrivateClause(S, LoopScope); 2983 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 2984 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2985 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2986 (void)LoopScope.Privatize(); 2987 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2988 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2989 2990 // Emit static non-chunked loop. 2991 OpenMPScheduleTy ScheduleKind; 2992 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 2993 CGOpenMPRuntime::StaticRTInput StaticInit( 2994 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 2995 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 2996 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2997 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 2998 // UB = min(UB, GlobalUB); 2999 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3000 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3001 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3002 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3003 // IV = LB; 3004 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3005 // while (idx <= UB) { BODY; ++idx; } 3006 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 3007 [](CodeGenFunction &) {}); 3008 // Tell the runtime we are done. 3009 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3010 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3011 S.getDirectiveKind()); 3012 }; 3013 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3014 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3015 // Emit post-update of the reduction variables if IsLastIter != 0. 3016 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3017 return CGF.Builder.CreateIsNotNull( 3018 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3019 }); 3020 3021 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3022 if (HasLastprivates) 3023 CGF.EmitOMPLastprivateClauseFinal( 3024 S, /*NoFinals=*/false, 3025 CGF.Builder.CreateIsNotNull( 3026 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3027 }; 3028 3029 bool HasCancel = false; 3030 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3031 HasCancel = OSD->hasCancel(); 3032 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3033 HasCancel = OPSD->hasCancel(); 3034 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3035 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3036 HasCancel); 3037 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3038 // clause. Otherwise the barrier will be generated by the codegen for the 3039 // directive. 3040 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3041 // Emit implicit barrier to synchronize threads and avoid data races on 3042 // initialization of firstprivate variables. 3043 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3044 OMPD_unknown); 3045 } 3046 } 3047 3048 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3049 { 3050 auto LPCRegion = 3051 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3052 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3053 EmitSections(S); 3054 } 3055 // Emit an implicit barrier at the end. 3056 if (!S.getSingleClause<OMPNowaitClause>()) { 3057 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3058 OMPD_sections); 3059 } 3060 // Check for outer lastprivate conditional update. 3061 checkForLastprivateConditionalUpdate(*this, S); 3062 } 3063 3064 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3065 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3066 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3067 }; 3068 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3069 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3070 S.hasCancel()); 3071 } 3072 3073 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3074 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3075 llvm::SmallVector<const Expr *, 8> DestExprs; 3076 llvm::SmallVector<const Expr *, 8> SrcExprs; 3077 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3078 // Check if there are any 'copyprivate' clauses associated with this 3079 // 'single' construct. 3080 // Build a list of copyprivate variables along with helper expressions 3081 // (<source>, <destination>, <destination>=<source> expressions) 3082 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3083 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3084 DestExprs.append(C->destination_exprs().begin(), 3085 C->destination_exprs().end()); 3086 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3087 AssignmentOps.append(C->assignment_ops().begin(), 3088 C->assignment_ops().end()); 3089 } 3090 // Emit code for 'single' region along with 'copyprivate' clauses 3091 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3092 Action.Enter(CGF); 3093 OMPPrivateScope SingleScope(CGF); 3094 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3095 CGF.EmitOMPPrivateClause(S, SingleScope); 3096 (void)SingleScope.Privatize(); 3097 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3098 }; 3099 { 3100 auto LPCRegion = 3101 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3102 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3103 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3104 CopyprivateVars, DestExprs, 3105 SrcExprs, AssignmentOps); 3106 } 3107 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3108 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3109 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3110 CGM.getOpenMPRuntime().emitBarrierCall( 3111 *this, S.getBeginLoc(), 3112 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3113 } 3114 // Check for outer lastprivate conditional update. 3115 checkForLastprivateConditionalUpdate(*this, S); 3116 } 3117 3118 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3119 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3120 Action.Enter(CGF); 3121 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3122 }; 3123 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3124 } 3125 3126 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3127 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3128 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3129 3130 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3131 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3132 3133 auto FiniCB = [this](InsertPointTy IP) { 3134 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3135 }; 3136 3137 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3138 InsertPointTy CodeGenIP, 3139 llvm::BasicBlock &FiniBB) { 3140 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3141 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3142 CodeGenIP, FiniBB); 3143 }; 3144 3145 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3146 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3147 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3148 3149 return; 3150 } 3151 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3152 emitMaster(*this, S); 3153 } 3154 3155 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3156 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3157 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3158 3159 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3160 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3161 const Expr *Hint = nullptr; 3162 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3163 Hint = HintClause->getHint(); 3164 3165 // TODO: This is slightly different from what's currently being done in 3166 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3167 // about typing is final. 3168 llvm::Value *HintInst = nullptr; 3169 if (Hint) 3170 HintInst = 3171 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3172 3173 auto FiniCB = [this](InsertPointTy IP) { 3174 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3175 }; 3176 3177 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3178 InsertPointTy CodeGenIP, 3179 llvm::BasicBlock &FiniBB) { 3180 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3181 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3182 CodeGenIP, FiniBB); 3183 }; 3184 3185 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3186 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3187 Builder.restoreIP(OMPBuilder->CreateCritical( 3188 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3189 HintInst)); 3190 3191 return; 3192 } 3193 3194 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3195 Action.Enter(CGF); 3196 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3197 }; 3198 const Expr *Hint = nullptr; 3199 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3200 Hint = HintClause->getHint(); 3201 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3202 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3203 S.getDirectiveName().getAsString(), 3204 CodeGen, S.getBeginLoc(), Hint); 3205 } 3206 3207 void CodeGenFunction::EmitOMPParallelForDirective( 3208 const OMPParallelForDirective &S) { 3209 // Emit directive as a combined directive that consists of two implicit 3210 // directives: 'parallel' with 'for' directive. 3211 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3212 Action.Enter(CGF); 3213 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 3214 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3215 emitDispatchForLoopBounds); 3216 }; 3217 { 3218 auto LPCRegion = 3219 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3220 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3221 emitEmptyBoundParameters); 3222 } 3223 // Check for outer lastprivate conditional update. 3224 checkForLastprivateConditionalUpdate(*this, S); 3225 } 3226 3227 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3228 const OMPParallelForSimdDirective &S) { 3229 // Emit directive as a combined directive that consists of two implicit 3230 // directives: 'parallel' with 'for' directive. 3231 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3232 Action.Enter(CGF); 3233 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3234 emitDispatchForLoopBounds); 3235 }; 3236 { 3237 auto LPCRegion = 3238 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3239 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3240 emitEmptyBoundParameters); 3241 } 3242 // Check for outer lastprivate conditional update. 3243 checkForLastprivateConditionalUpdate(*this, S); 3244 } 3245 3246 void CodeGenFunction::EmitOMPParallelMasterDirective( 3247 const OMPParallelMasterDirective &S) { 3248 // Emit directive as a combined directive that consists of two implicit 3249 // directives: 'parallel' with 'master' directive. 3250 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3251 Action.Enter(CGF); 3252 OMPPrivateScope PrivateScope(CGF); 3253 bool Copyins = CGF.EmitOMPCopyinClause(S); 3254 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3255 if (Copyins) { 3256 // Emit implicit barrier to synchronize threads and avoid data races on 3257 // propagation master's thread values of threadprivate variables to local 3258 // instances of that variables of all other implicit threads. 3259 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3260 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3261 /*ForceSimpleCall=*/true); 3262 } 3263 CGF.EmitOMPPrivateClause(S, PrivateScope); 3264 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3265 (void)PrivateScope.Privatize(); 3266 emitMaster(CGF, S); 3267 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3268 }; 3269 { 3270 auto LPCRegion = 3271 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3272 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3273 emitEmptyBoundParameters); 3274 emitPostUpdateForReductionClause(*this, S, 3275 [](CodeGenFunction &) { return nullptr; }); 3276 } 3277 // Check for outer lastprivate conditional update. 3278 checkForLastprivateConditionalUpdate(*this, S); 3279 } 3280 3281 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3282 const OMPParallelSectionsDirective &S) { 3283 // Emit directive as a combined directive that consists of two implicit 3284 // directives: 'parallel' with 'sections' directive. 3285 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3286 Action.Enter(CGF); 3287 CGF.EmitSections(S); 3288 }; 3289 { 3290 auto LPCRegion = 3291 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3292 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3293 emitEmptyBoundParameters); 3294 } 3295 // Check for outer lastprivate conditional update. 3296 checkForLastprivateConditionalUpdate(*this, S); 3297 } 3298 3299 void CodeGenFunction::EmitOMPTaskBasedDirective( 3300 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3301 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3302 OMPTaskDataTy &Data) { 3303 // Emit outlined function for task construct. 3304 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3305 auto I = CS->getCapturedDecl()->param_begin(); 3306 auto PartId = std::next(I); 3307 auto TaskT = std::next(I, 4); 3308 // Check if the task is final 3309 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3310 // If the condition constant folds and can be elided, try to avoid emitting 3311 // the condition and the dead arm of the if/else. 3312 const Expr *Cond = Clause->getCondition(); 3313 bool CondConstant; 3314 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3315 Data.Final.setInt(CondConstant); 3316 else 3317 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3318 } else { 3319 // By default the task is not final. 3320 Data.Final.setInt(/*IntVal=*/false); 3321 } 3322 // Check if the task has 'priority' clause. 3323 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3324 const Expr *Prio = Clause->getPriority(); 3325 Data.Priority.setInt(/*IntVal=*/true); 3326 Data.Priority.setPointer(EmitScalarConversion( 3327 EmitScalarExpr(Prio), Prio->getType(), 3328 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3329 Prio->getExprLoc())); 3330 } 3331 // The first function argument for tasks is a thread id, the second one is a 3332 // part id (0 for tied tasks, >=0 for untied task). 3333 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3334 // Get list of private variables. 3335 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3336 auto IRef = C->varlist_begin(); 3337 for (const Expr *IInit : C->private_copies()) { 3338 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3339 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3340 Data.PrivateVars.push_back(*IRef); 3341 Data.PrivateCopies.push_back(IInit); 3342 } 3343 ++IRef; 3344 } 3345 } 3346 EmittedAsPrivate.clear(); 3347 // Get list of firstprivate variables. 3348 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3349 auto IRef = C->varlist_begin(); 3350 auto IElemInitRef = C->inits().begin(); 3351 for (const Expr *IInit : C->private_copies()) { 3352 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3353 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3354 Data.FirstprivateVars.push_back(*IRef); 3355 Data.FirstprivateCopies.push_back(IInit); 3356 Data.FirstprivateInits.push_back(*IElemInitRef); 3357 } 3358 ++IRef; 3359 ++IElemInitRef; 3360 } 3361 } 3362 // Get list of lastprivate variables (for taskloops). 3363 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3364 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3365 auto IRef = C->varlist_begin(); 3366 auto ID = C->destination_exprs().begin(); 3367 for (const Expr *IInit : C->private_copies()) { 3368 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3369 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3370 Data.LastprivateVars.push_back(*IRef); 3371 Data.LastprivateCopies.push_back(IInit); 3372 } 3373 LastprivateDstsOrigs.insert( 3374 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3375 cast<DeclRefExpr>(*IRef)}); 3376 ++IRef; 3377 ++ID; 3378 } 3379 } 3380 SmallVector<const Expr *, 4> LHSs; 3381 SmallVector<const Expr *, 4> RHSs; 3382 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3383 auto IPriv = C->privates().begin(); 3384 auto IRed = C->reduction_ops().begin(); 3385 auto ILHS = C->lhs_exprs().begin(); 3386 auto IRHS = C->rhs_exprs().begin(); 3387 for (const Expr *Ref : C->varlists()) { 3388 Data.ReductionVars.emplace_back(Ref); 3389 Data.ReductionCopies.emplace_back(*IPriv); 3390 Data.ReductionOps.emplace_back(*IRed); 3391 LHSs.emplace_back(*ILHS); 3392 RHSs.emplace_back(*IRHS); 3393 std::advance(IPriv, 1); 3394 std::advance(IRed, 1); 3395 std::advance(ILHS, 1); 3396 std::advance(IRHS, 1); 3397 } 3398 } 3399 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3400 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3401 // Build list of dependences. 3402 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3403 for (const Expr *IRef : C->varlists()) 3404 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3405 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3406 CapturedRegion](CodeGenFunction &CGF, 3407 PrePostActionTy &Action) { 3408 // Set proper addresses for generated private copies. 3409 OMPPrivateScope Scope(CGF); 3410 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 3411 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3412 !Data.LastprivateVars.empty()) { 3413 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3414 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3415 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3416 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3417 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3418 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3419 CS->getCapturedDecl()->getParam(PrivatesParam))); 3420 // Map privates. 3421 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3422 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3423 CallArgs.push_back(PrivatesPtr); 3424 for (const Expr *E : Data.PrivateVars) { 3425 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3426 Address PrivatePtr = CGF.CreateMemTemp( 3427 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3428 PrivatePtrs.emplace_back(VD, PrivatePtr); 3429 CallArgs.push_back(PrivatePtr.getPointer()); 3430 } 3431 for (const Expr *E : Data.FirstprivateVars) { 3432 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3433 Address PrivatePtr = 3434 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3435 ".firstpriv.ptr.addr"); 3436 PrivatePtrs.emplace_back(VD, PrivatePtr); 3437 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 3438 CallArgs.push_back(PrivatePtr.getPointer()); 3439 } 3440 for (const Expr *E : Data.LastprivateVars) { 3441 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3442 Address PrivatePtr = 3443 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3444 ".lastpriv.ptr.addr"); 3445 PrivatePtrs.emplace_back(VD, PrivatePtr); 3446 CallArgs.push_back(PrivatePtr.getPointer()); 3447 } 3448 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3449 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3450 for (const auto &Pair : LastprivateDstsOrigs) { 3451 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3452 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3453 /*RefersToEnclosingVariableOrCapture=*/ 3454 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3455 Pair.second->getType(), VK_LValue, 3456 Pair.second->getExprLoc()); 3457 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3458 return CGF.EmitLValue(&DRE).getAddress(CGF); 3459 }); 3460 } 3461 for (const auto &Pair : PrivatePtrs) { 3462 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3463 CGF.getContext().getDeclAlign(Pair.first)); 3464 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3465 } 3466 } 3467 if (Data.Reductions) { 3468 OMPPrivateScope FirstprivateScope(CGF); 3469 for (const auto &Pair : FirstprivatePtrs) { 3470 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3471 CGF.getContext().getDeclAlign(Pair.first)); 3472 FirstprivateScope.addPrivate(Pair.first, 3473 [Replacement]() { return Replacement; }); 3474 } 3475 (void)FirstprivateScope.Privatize(); 3476 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3477 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies, 3478 Data.ReductionOps); 3479 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3480 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3481 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3482 RedCG.emitSharedLValue(CGF, Cnt); 3483 RedCG.emitAggregateType(CGF, Cnt); 3484 // FIXME: This must removed once the runtime library is fixed. 3485 // Emit required threadprivate variables for 3486 // initializer/combiner/finalizer. 3487 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3488 RedCG, Cnt); 3489 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3490 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3491 Replacement = 3492 Address(CGF.EmitScalarConversion( 3493 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3494 CGF.getContext().getPointerType( 3495 Data.ReductionCopies[Cnt]->getType()), 3496 Data.ReductionCopies[Cnt]->getExprLoc()), 3497 Replacement.getAlignment()); 3498 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3499 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3500 [Replacement]() { return Replacement; }); 3501 } 3502 } 3503 // Privatize all private variables except for in_reduction items. 3504 (void)Scope.Privatize(); 3505 SmallVector<const Expr *, 4> InRedVars; 3506 SmallVector<const Expr *, 4> InRedPrivs; 3507 SmallVector<const Expr *, 4> InRedOps; 3508 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3509 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3510 auto IPriv = C->privates().begin(); 3511 auto IRed = C->reduction_ops().begin(); 3512 auto ITD = C->taskgroup_descriptors().begin(); 3513 for (const Expr *Ref : C->varlists()) { 3514 InRedVars.emplace_back(Ref); 3515 InRedPrivs.emplace_back(*IPriv); 3516 InRedOps.emplace_back(*IRed); 3517 TaskgroupDescriptors.emplace_back(*ITD); 3518 std::advance(IPriv, 1); 3519 std::advance(IRed, 1); 3520 std::advance(ITD, 1); 3521 } 3522 } 3523 // Privatize in_reduction items here, because taskgroup descriptors must be 3524 // privatized earlier. 3525 OMPPrivateScope InRedScope(CGF); 3526 if (!InRedVars.empty()) { 3527 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps); 3528 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3529 RedCG.emitSharedLValue(CGF, Cnt); 3530 RedCG.emitAggregateType(CGF, Cnt); 3531 // The taskgroup descriptor variable is always implicit firstprivate and 3532 // privatized already during processing of the firstprivates. 3533 // FIXME: This must removed once the runtime library is fixed. 3534 // Emit required threadprivate variables for 3535 // initializer/combiner/finalizer. 3536 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3537 RedCG, Cnt); 3538 llvm::Value *ReductionsPtr = 3539 CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]), 3540 TaskgroupDescriptors[Cnt]->getExprLoc()); 3541 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3542 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3543 Replacement = Address( 3544 CGF.EmitScalarConversion( 3545 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3546 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3547 InRedPrivs[Cnt]->getExprLoc()), 3548 Replacement.getAlignment()); 3549 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3550 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3551 [Replacement]() { return Replacement; }); 3552 } 3553 } 3554 (void)InRedScope.Privatize(); 3555 3556 Action.Enter(CGF); 3557 BodyGen(CGF); 3558 }; 3559 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3560 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3561 Data.NumberOfParts); 3562 OMPLexicalScope Scope(*this, S, llvm::None, 3563 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3564 !isOpenMPSimdDirective(S.getDirectiveKind())); 3565 TaskGen(*this, OutlinedFn, Data); 3566 } 3567 3568 static ImplicitParamDecl * 3569 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3570 QualType Ty, CapturedDecl *CD, 3571 SourceLocation Loc) { 3572 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3573 ImplicitParamDecl::Other); 3574 auto *OrigRef = DeclRefExpr::Create( 3575 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3576 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3577 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3578 ImplicitParamDecl::Other); 3579 auto *PrivateRef = DeclRefExpr::Create( 3580 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3581 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3582 QualType ElemType = C.getBaseElementType(Ty); 3583 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3584 ImplicitParamDecl::Other); 3585 auto *InitRef = DeclRefExpr::Create( 3586 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3587 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3588 PrivateVD->setInitStyle(VarDecl::CInit); 3589 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3590 InitRef, /*BasePath=*/nullptr, 3591 VK_RValue)); 3592 Data.FirstprivateVars.emplace_back(OrigRef); 3593 Data.FirstprivateCopies.emplace_back(PrivateRef); 3594 Data.FirstprivateInits.emplace_back(InitRef); 3595 return OrigVD; 3596 } 3597 3598 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3599 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3600 OMPTargetDataInfo &InputInfo) { 3601 // Emit outlined function for task construct. 3602 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3603 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3604 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3605 auto I = CS->getCapturedDecl()->param_begin(); 3606 auto PartId = std::next(I); 3607 auto TaskT = std::next(I, 4); 3608 OMPTaskDataTy Data; 3609 // The task is not final. 3610 Data.Final.setInt(/*IntVal=*/false); 3611 // Get list of firstprivate variables. 3612 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3613 auto IRef = C->varlist_begin(); 3614 auto IElemInitRef = C->inits().begin(); 3615 for (auto *IInit : C->private_copies()) { 3616 Data.FirstprivateVars.push_back(*IRef); 3617 Data.FirstprivateCopies.push_back(IInit); 3618 Data.FirstprivateInits.push_back(*IElemInitRef); 3619 ++IRef; 3620 ++IElemInitRef; 3621 } 3622 } 3623 OMPPrivateScope TargetScope(*this); 3624 VarDecl *BPVD = nullptr; 3625 VarDecl *PVD = nullptr; 3626 VarDecl *SVD = nullptr; 3627 if (InputInfo.NumberOfTargetItems > 0) { 3628 auto *CD = CapturedDecl::Create( 3629 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 3630 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 3631 QualType BaseAndPointersType = getContext().getConstantArrayType( 3632 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 3633 /*IndexTypeQuals=*/0); 3634 BPVD = createImplicitFirstprivateForType( 3635 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3636 PVD = createImplicitFirstprivateForType( 3637 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3638 QualType SizesType = getContext().getConstantArrayType( 3639 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 3640 ArrSize, nullptr, ArrayType::Normal, 3641 /*IndexTypeQuals=*/0); 3642 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 3643 S.getBeginLoc()); 3644 TargetScope.addPrivate( 3645 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 3646 TargetScope.addPrivate(PVD, 3647 [&InputInfo]() { return InputInfo.PointersArray; }); 3648 TargetScope.addPrivate(SVD, 3649 [&InputInfo]() { return InputInfo.SizesArray; }); 3650 } 3651 (void)TargetScope.Privatize(); 3652 // Build list of dependences. 3653 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3654 for (const Expr *IRef : C->varlists()) 3655 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3656 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 3657 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 3658 // Set proper addresses for generated private copies. 3659 OMPPrivateScope Scope(CGF); 3660 if (!Data.FirstprivateVars.empty()) { 3661 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3662 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3663 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3664 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3665 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3666 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3667 CS->getCapturedDecl()->getParam(PrivatesParam))); 3668 // Map privates. 3669 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3670 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3671 CallArgs.push_back(PrivatesPtr); 3672 for (const Expr *E : Data.FirstprivateVars) { 3673 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3674 Address PrivatePtr = 3675 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3676 ".firstpriv.ptr.addr"); 3677 PrivatePtrs.emplace_back(VD, PrivatePtr); 3678 CallArgs.push_back(PrivatePtr.getPointer()); 3679 } 3680 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3681 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3682 for (const auto &Pair : PrivatePtrs) { 3683 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3684 CGF.getContext().getDeclAlign(Pair.first)); 3685 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3686 } 3687 } 3688 // Privatize all private variables except for in_reduction items. 3689 (void)Scope.Privatize(); 3690 if (InputInfo.NumberOfTargetItems > 0) { 3691 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 3692 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 3693 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 3694 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 3695 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 3696 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 3697 } 3698 3699 Action.Enter(CGF); 3700 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 3701 BodyGen(CGF); 3702 }; 3703 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3704 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 3705 Data.NumberOfParts); 3706 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 3707 IntegerLiteral IfCond(getContext(), TrueOrFalse, 3708 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3709 SourceLocation()); 3710 3711 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 3712 SharedsTy, CapturedStruct, &IfCond, Data); 3713 } 3714 3715 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 3716 // Emit outlined function for task construct. 3717 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3718 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3719 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3720 const Expr *IfCond = nullptr; 3721 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 3722 if (C->getNameModifier() == OMPD_unknown || 3723 C->getNameModifier() == OMPD_task) { 3724 IfCond = C->getCondition(); 3725 break; 3726 } 3727 } 3728 3729 OMPTaskDataTy Data; 3730 // Check if we should emit tied or untied task. 3731 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 3732 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 3733 CGF.EmitStmt(CS->getCapturedStmt()); 3734 }; 3735 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 3736 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 3737 const OMPTaskDataTy &Data) { 3738 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 3739 SharedsTy, CapturedStruct, IfCond, 3740 Data); 3741 }; 3742 auto LPCRegion = 3743 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3744 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 3745 } 3746 3747 void CodeGenFunction::EmitOMPTaskyieldDirective( 3748 const OMPTaskyieldDirective &S) { 3749 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 3750 } 3751 3752 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 3753 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 3754 } 3755 3756 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 3757 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 3758 } 3759 3760 void CodeGenFunction::EmitOMPTaskgroupDirective( 3761 const OMPTaskgroupDirective &S) { 3762 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3763 Action.Enter(CGF); 3764 if (const Expr *E = S.getReductionRef()) { 3765 SmallVector<const Expr *, 4> LHSs; 3766 SmallVector<const Expr *, 4> RHSs; 3767 OMPTaskDataTy Data; 3768 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 3769 auto IPriv = C->privates().begin(); 3770 auto IRed = C->reduction_ops().begin(); 3771 auto ILHS = C->lhs_exprs().begin(); 3772 auto IRHS = C->rhs_exprs().begin(); 3773 for (const Expr *Ref : C->varlists()) { 3774 Data.ReductionVars.emplace_back(Ref); 3775 Data.ReductionCopies.emplace_back(*IPriv); 3776 Data.ReductionOps.emplace_back(*IRed); 3777 LHSs.emplace_back(*ILHS); 3778 RHSs.emplace_back(*IRHS); 3779 std::advance(IPriv, 1); 3780 std::advance(IRed, 1); 3781 std::advance(ILHS, 1); 3782 std::advance(IRHS, 1); 3783 } 3784 } 3785 llvm::Value *ReductionDesc = 3786 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 3787 LHSs, RHSs, Data); 3788 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3789 CGF.EmitVarDecl(*VD); 3790 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 3791 /*Volatile=*/false, E->getType()); 3792 } 3793 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3794 }; 3795 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3796 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 3797 } 3798 3799 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 3800 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 3801 ? llvm::AtomicOrdering::NotAtomic 3802 : llvm::AtomicOrdering::AcquireRelease; 3803 CGM.getOpenMPRuntime().emitFlush( 3804 *this, 3805 [&S]() -> ArrayRef<const Expr *> { 3806 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 3807 return llvm::makeArrayRef(FlushClause->varlist_begin(), 3808 FlushClause->varlist_end()); 3809 return llvm::None; 3810 }(), 3811 S.getBeginLoc(), AO); 3812 } 3813 3814 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 3815 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 3816 LValue DOLVal = EmitLValue(DO->getDepobj()); 3817 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 3818 SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> 3819 Dependencies; 3820 for (const Expr *IRef : DC->varlists()) 3821 Dependencies.emplace_back(DC->getDependencyKind(), IRef); 3822 Address DepAddr = CGM.getOpenMPRuntime().emitDependClause( 3823 *this, Dependencies, /*ForDepobj=*/true, DC->getBeginLoc()).second; 3824 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 3825 return; 3826 } 3827 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 3828 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 3829 return; 3830 } 3831 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 3832 CGM.getOpenMPRuntime().emitUpdateClause( 3833 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 3834 return; 3835 } 3836 } 3837 3838 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 3839 const CodeGenLoopTy &CodeGenLoop, 3840 Expr *IncExpr) { 3841 // Emit the loop iteration variable. 3842 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3843 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3844 EmitVarDecl(*IVDecl); 3845 3846 // Emit the iterations count variable. 3847 // If it is not a variable, Sema decided to calculate iterations count on each 3848 // iteration (e.g., it is foldable into a constant). 3849 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3850 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3851 // Emit calculation of the iterations count. 3852 EmitIgnoredExpr(S.getCalcLastIteration()); 3853 } 3854 3855 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3856 3857 bool HasLastprivateClause = false; 3858 // Check pre-condition. 3859 { 3860 OMPLoopScope PreInitScope(*this, S); 3861 // Skip the entire loop if we don't meet the precondition. 3862 // If the condition constant folds and can be elided, avoid emitting the 3863 // whole loop. 3864 bool CondConstant; 3865 llvm::BasicBlock *ContBlock = nullptr; 3866 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3867 if (!CondConstant) 3868 return; 3869 } else { 3870 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3871 ContBlock = createBasicBlock("omp.precond.end"); 3872 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3873 getProfileCount(&S)); 3874 EmitBlock(ThenBlock); 3875 incrementProfileCounter(&S); 3876 } 3877 3878 emitAlignedClause(*this, S); 3879 // Emit 'then' code. 3880 { 3881 // Emit helper vars inits. 3882 3883 LValue LB = EmitOMPHelperVar( 3884 *this, cast<DeclRefExpr>( 3885 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3886 ? S.getCombinedLowerBoundVariable() 3887 : S.getLowerBoundVariable()))); 3888 LValue UB = EmitOMPHelperVar( 3889 *this, cast<DeclRefExpr>( 3890 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3891 ? S.getCombinedUpperBoundVariable() 3892 : S.getUpperBoundVariable()))); 3893 LValue ST = 3894 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3895 LValue IL = 3896 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3897 3898 OMPPrivateScope LoopScope(*this); 3899 if (EmitOMPFirstprivateClause(S, LoopScope)) { 3900 // Emit implicit barrier to synchronize threads and avoid data races 3901 // on initialization of firstprivate variables and post-update of 3902 // lastprivate variables. 3903 CGM.getOpenMPRuntime().emitBarrierCall( 3904 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3905 /*ForceSimpleCall=*/true); 3906 } 3907 EmitOMPPrivateClause(S, LoopScope); 3908 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 3909 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3910 !isOpenMPTeamsDirective(S.getDirectiveKind())) 3911 EmitOMPReductionClauseInit(S, LoopScope); 3912 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3913 EmitOMPPrivateLoopCounters(S, LoopScope); 3914 (void)LoopScope.Privatize(); 3915 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3916 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3917 3918 // Detect the distribute schedule kind and chunk. 3919 llvm::Value *Chunk = nullptr; 3920 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 3921 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 3922 ScheduleKind = C->getDistScheduleKind(); 3923 if (const Expr *Ch = C->getChunkSize()) { 3924 Chunk = EmitScalarExpr(Ch); 3925 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 3926 S.getIterationVariable()->getType(), 3927 S.getBeginLoc()); 3928 } 3929 } else { 3930 // Default behaviour for dist_schedule clause. 3931 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 3932 *this, S, ScheduleKind, Chunk); 3933 } 3934 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3935 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3936 3937 // OpenMP [2.10.8, distribute Construct, Description] 3938 // If dist_schedule is specified, kind must be static. If specified, 3939 // iterations are divided into chunks of size chunk_size, chunks are 3940 // assigned to the teams of the league in a round-robin fashion in the 3941 // order of the team number. When no chunk_size is specified, the 3942 // iteration space is divided into chunks that are approximately equal 3943 // in size, and at most one chunk is distributed to each team of the 3944 // league. The size of the chunks is unspecified in this case. 3945 bool StaticChunked = RT.isStaticChunked( 3946 ScheduleKind, /* Chunked */ Chunk != nullptr) && 3947 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3948 if (RT.isStaticNonchunked(ScheduleKind, 3949 /* Chunked */ Chunk != nullptr) || 3950 StaticChunked) { 3951 CGOpenMPRuntime::StaticRTInput StaticInit( 3952 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 3953 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3954 StaticChunked ? Chunk : nullptr); 3955 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 3956 StaticInit); 3957 JumpDest LoopExit = 3958 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3959 // UB = min(UB, GlobalUB); 3960 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3961 ? S.getCombinedEnsureUpperBound() 3962 : S.getEnsureUpperBound()); 3963 // IV = LB; 3964 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3965 ? S.getCombinedInit() 3966 : S.getInit()); 3967 3968 const Expr *Cond = 3969 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3970 ? S.getCombinedCond() 3971 : S.getCond(); 3972 3973 if (StaticChunked) 3974 Cond = S.getCombinedDistCond(); 3975 3976 // For static unchunked schedules generate: 3977 // 3978 // 1. For distribute alone, codegen 3979 // while (idx <= UB) { 3980 // BODY; 3981 // ++idx; 3982 // } 3983 // 3984 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 3985 // while (idx <= UB) { 3986 // <CodeGen rest of pragma>(LB, UB); 3987 // idx += ST; 3988 // } 3989 // 3990 // For static chunk one schedule generate: 3991 // 3992 // while (IV <= GlobalUB) { 3993 // <CodeGen rest of pragma>(LB, UB); 3994 // LB += ST; 3995 // UB += ST; 3996 // UB = min(UB, GlobalUB); 3997 // IV = LB; 3998 // } 3999 // 4000 emitCommonSimdLoop( 4001 *this, S, 4002 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4003 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4004 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 4005 }, 4006 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 4007 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 4008 CGF.EmitOMPInnerLoop( 4009 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4010 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4011 CodeGenLoop(CGF, S, LoopExit); 4012 }, 4013 [&S, StaticChunked](CodeGenFunction &CGF) { 4014 if (StaticChunked) { 4015 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4016 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4017 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4018 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4019 } 4020 }); 4021 }); 4022 EmitBlock(LoopExit.getBlock()); 4023 // Tell the runtime we are done. 4024 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4025 } else { 4026 // Emit the outer loop, which requests its work chunk [LB..UB] from 4027 // runtime and runs the inner loop to process it. 4028 const OMPLoopArguments LoopArguments = { 4029 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4030 IL.getAddress(*this), Chunk}; 4031 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4032 CodeGenLoop); 4033 } 4034 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4035 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4036 return CGF.Builder.CreateIsNotNull( 4037 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4038 }); 4039 } 4040 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4041 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4042 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4043 EmitOMPReductionClauseFinal(S, OMPD_simd); 4044 // Emit post-update of the reduction variables if IsLastIter != 0. 4045 emitPostUpdateForReductionClause( 4046 *this, S, [IL, &S](CodeGenFunction &CGF) { 4047 return CGF.Builder.CreateIsNotNull( 4048 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4049 }); 4050 } 4051 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4052 if (HasLastprivateClause) { 4053 EmitOMPLastprivateClauseFinal( 4054 S, /*NoFinals=*/false, 4055 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4056 } 4057 } 4058 4059 // We're now done with the loop, so jump to the continuation block. 4060 if (ContBlock) { 4061 EmitBranch(ContBlock); 4062 EmitBlock(ContBlock, true); 4063 } 4064 } 4065 } 4066 4067 void CodeGenFunction::EmitOMPDistributeDirective( 4068 const OMPDistributeDirective &S) { 4069 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4070 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4071 }; 4072 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4073 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4074 } 4075 4076 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4077 const CapturedStmt *S, 4078 SourceLocation Loc) { 4079 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4080 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4081 CGF.CapturedStmtInfo = &CapStmtInfo; 4082 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4083 Fn->setDoesNotRecurse(); 4084 return Fn; 4085 } 4086 4087 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4088 if (S.hasClausesOfKind<OMPDependClause>()) { 4089 assert(!S.getAssociatedStmt() && 4090 "No associated statement must be in ordered depend construct."); 4091 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4092 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4093 return; 4094 } 4095 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4096 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4097 PrePostActionTy &Action) { 4098 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4099 if (C) { 4100 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4101 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4102 llvm::Function *OutlinedFn = 4103 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4104 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4105 OutlinedFn, CapturedVars); 4106 } else { 4107 Action.Enter(CGF); 4108 CGF.EmitStmt(CS->getCapturedStmt()); 4109 } 4110 }; 4111 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4112 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4113 } 4114 4115 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4116 QualType SrcType, QualType DestType, 4117 SourceLocation Loc) { 4118 assert(CGF.hasScalarEvaluationKind(DestType) && 4119 "DestType must have scalar evaluation kind."); 4120 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4121 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4122 DestType, Loc) 4123 : CGF.EmitComplexToScalarConversion( 4124 Val.getComplexVal(), SrcType, DestType, Loc); 4125 } 4126 4127 static CodeGenFunction::ComplexPairTy 4128 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4129 QualType DestType, SourceLocation Loc) { 4130 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4131 "DestType must have complex evaluation kind."); 4132 CodeGenFunction::ComplexPairTy ComplexVal; 4133 if (Val.isScalar()) { 4134 // Convert the input element to the element type of the complex. 4135 QualType DestElementType = 4136 DestType->castAs<ComplexType>()->getElementType(); 4137 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4138 Val.getScalarVal(), SrcType, DestElementType, Loc); 4139 ComplexVal = CodeGenFunction::ComplexPairTy( 4140 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4141 } else { 4142 assert(Val.isComplex() && "Must be a scalar or complex."); 4143 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4144 QualType DestElementType = 4145 DestType->castAs<ComplexType>()->getElementType(); 4146 ComplexVal.first = CGF.EmitScalarConversion( 4147 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4148 ComplexVal.second = CGF.EmitScalarConversion( 4149 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4150 } 4151 return ComplexVal; 4152 } 4153 4154 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4155 LValue LVal, RValue RVal) { 4156 if (LVal.isGlobalReg()) 4157 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4158 else 4159 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4160 } 4161 4162 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4163 llvm::AtomicOrdering AO, LValue LVal, 4164 SourceLocation Loc) { 4165 if (LVal.isGlobalReg()) 4166 return CGF.EmitLoadOfLValue(LVal, Loc); 4167 return CGF.EmitAtomicLoad( 4168 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4169 LVal.isVolatile()); 4170 } 4171 4172 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4173 QualType RValTy, SourceLocation Loc) { 4174 switch (getEvaluationKind(LVal.getType())) { 4175 case TEK_Scalar: 4176 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4177 *this, RVal, RValTy, LVal.getType(), Loc)), 4178 LVal); 4179 break; 4180 case TEK_Complex: 4181 EmitStoreOfComplex( 4182 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4183 /*isInit=*/false); 4184 break; 4185 case TEK_Aggregate: 4186 llvm_unreachable("Must be a scalar or complex."); 4187 } 4188 } 4189 4190 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4191 const Expr *X, const Expr *V, 4192 SourceLocation Loc) { 4193 // v = x; 4194 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4195 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4196 LValue XLValue = CGF.EmitLValue(X); 4197 LValue VLValue = CGF.EmitLValue(V); 4198 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4199 // OpenMP, 2.17.7, atomic Construct 4200 // If the read or capture clause is specified and the acquire, acq_rel, or 4201 // seq_cst clause is specified then the strong flush on exit from the atomic 4202 // operation is also an acquire flush. 4203 switch (AO) { 4204 case llvm::AtomicOrdering::Acquire: 4205 case llvm::AtomicOrdering::AcquireRelease: 4206 case llvm::AtomicOrdering::SequentiallyConsistent: 4207 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4208 llvm::AtomicOrdering::Acquire); 4209 break; 4210 case llvm::AtomicOrdering::Monotonic: 4211 case llvm::AtomicOrdering::Release: 4212 break; 4213 case llvm::AtomicOrdering::NotAtomic: 4214 case llvm::AtomicOrdering::Unordered: 4215 llvm_unreachable("Unexpected ordering."); 4216 } 4217 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4218 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4219 } 4220 4221 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4222 llvm::AtomicOrdering AO, const Expr *X, 4223 const Expr *E, SourceLocation Loc) { 4224 // x = expr; 4225 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4226 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4227 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4228 // OpenMP, 2.17.7, atomic Construct 4229 // If the write, update, or capture clause is specified and the release, 4230 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4231 // the atomic operation is also a release flush. 4232 switch (AO) { 4233 case llvm::AtomicOrdering::Release: 4234 case llvm::AtomicOrdering::AcquireRelease: 4235 case llvm::AtomicOrdering::SequentiallyConsistent: 4236 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4237 llvm::AtomicOrdering::Release); 4238 break; 4239 case llvm::AtomicOrdering::Acquire: 4240 case llvm::AtomicOrdering::Monotonic: 4241 break; 4242 case llvm::AtomicOrdering::NotAtomic: 4243 case llvm::AtomicOrdering::Unordered: 4244 llvm_unreachable("Unexpected ordering."); 4245 } 4246 } 4247 4248 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4249 RValue Update, 4250 BinaryOperatorKind BO, 4251 llvm::AtomicOrdering AO, 4252 bool IsXLHSInRHSPart) { 4253 ASTContext &Context = CGF.getContext(); 4254 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4255 // expression is simple and atomic is allowed for the given type for the 4256 // target platform. 4257 if (BO == BO_Comma || !Update.isScalar() || 4258 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4259 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4260 (Update.getScalarVal()->getType() != 4261 X.getAddress(CGF).getElementType())) || 4262 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4263 !Context.getTargetInfo().hasBuiltinAtomic( 4264 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4265 return std::make_pair(false, RValue::get(nullptr)); 4266 4267 llvm::AtomicRMWInst::BinOp RMWOp; 4268 switch (BO) { 4269 case BO_Add: 4270 RMWOp = llvm::AtomicRMWInst::Add; 4271 break; 4272 case BO_Sub: 4273 if (!IsXLHSInRHSPart) 4274 return std::make_pair(false, RValue::get(nullptr)); 4275 RMWOp = llvm::AtomicRMWInst::Sub; 4276 break; 4277 case BO_And: 4278 RMWOp = llvm::AtomicRMWInst::And; 4279 break; 4280 case BO_Or: 4281 RMWOp = llvm::AtomicRMWInst::Or; 4282 break; 4283 case BO_Xor: 4284 RMWOp = llvm::AtomicRMWInst::Xor; 4285 break; 4286 case BO_LT: 4287 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4288 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4289 : llvm::AtomicRMWInst::Max) 4290 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4291 : llvm::AtomicRMWInst::UMax); 4292 break; 4293 case BO_GT: 4294 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4295 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4296 : llvm::AtomicRMWInst::Min) 4297 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4298 : llvm::AtomicRMWInst::UMin); 4299 break; 4300 case BO_Assign: 4301 RMWOp = llvm::AtomicRMWInst::Xchg; 4302 break; 4303 case BO_Mul: 4304 case BO_Div: 4305 case BO_Rem: 4306 case BO_Shl: 4307 case BO_Shr: 4308 case BO_LAnd: 4309 case BO_LOr: 4310 return std::make_pair(false, RValue::get(nullptr)); 4311 case BO_PtrMemD: 4312 case BO_PtrMemI: 4313 case BO_LE: 4314 case BO_GE: 4315 case BO_EQ: 4316 case BO_NE: 4317 case BO_Cmp: 4318 case BO_AddAssign: 4319 case BO_SubAssign: 4320 case BO_AndAssign: 4321 case BO_OrAssign: 4322 case BO_XorAssign: 4323 case BO_MulAssign: 4324 case BO_DivAssign: 4325 case BO_RemAssign: 4326 case BO_ShlAssign: 4327 case BO_ShrAssign: 4328 case BO_Comma: 4329 llvm_unreachable("Unsupported atomic update operation"); 4330 } 4331 llvm::Value *UpdateVal = Update.getScalarVal(); 4332 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4333 UpdateVal = CGF.Builder.CreateIntCast( 4334 IC, X.getAddress(CGF).getElementType(), 4335 X.getType()->hasSignedIntegerRepresentation()); 4336 } 4337 llvm::Value *Res = 4338 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4339 return std::make_pair(true, RValue::get(Res)); 4340 } 4341 4342 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4343 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4344 llvm::AtomicOrdering AO, SourceLocation Loc, 4345 const llvm::function_ref<RValue(RValue)> CommonGen) { 4346 // Update expressions are allowed to have the following forms: 4347 // x binop= expr; -> xrval + expr; 4348 // x++, ++x -> xrval + 1; 4349 // x--, --x -> xrval - 1; 4350 // x = x binop expr; -> xrval binop expr 4351 // x = expr Op x; - > expr binop xrval; 4352 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4353 if (!Res.first) { 4354 if (X.isGlobalReg()) { 4355 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4356 // 'xrval'. 4357 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4358 } else { 4359 // Perform compare-and-swap procedure. 4360 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4361 } 4362 } 4363 return Res; 4364 } 4365 4366 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4367 llvm::AtomicOrdering AO, const Expr *X, 4368 const Expr *E, const Expr *UE, 4369 bool IsXLHSInRHSPart, SourceLocation Loc) { 4370 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4371 "Update expr in 'atomic update' must be a binary operator."); 4372 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4373 // Update expressions are allowed to have the following forms: 4374 // x binop= expr; -> xrval + expr; 4375 // x++, ++x -> xrval + 1; 4376 // x--, --x -> xrval - 1; 4377 // x = x binop expr; -> xrval binop expr 4378 // x = expr Op x; - > expr binop xrval; 4379 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4380 LValue XLValue = CGF.EmitLValue(X); 4381 RValue ExprRValue = CGF.EmitAnyExpr(E); 4382 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4383 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4384 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4385 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4386 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4387 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4388 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4389 return CGF.EmitAnyExpr(UE); 4390 }; 4391 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4392 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4393 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4394 // OpenMP, 2.17.7, atomic Construct 4395 // If the write, update, or capture clause is specified and the release, 4396 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4397 // the atomic operation is also a release flush. 4398 switch (AO) { 4399 case llvm::AtomicOrdering::Release: 4400 case llvm::AtomicOrdering::AcquireRelease: 4401 case llvm::AtomicOrdering::SequentiallyConsistent: 4402 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4403 llvm::AtomicOrdering::Release); 4404 break; 4405 case llvm::AtomicOrdering::Acquire: 4406 case llvm::AtomicOrdering::Monotonic: 4407 break; 4408 case llvm::AtomicOrdering::NotAtomic: 4409 case llvm::AtomicOrdering::Unordered: 4410 llvm_unreachable("Unexpected ordering."); 4411 } 4412 } 4413 4414 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 4415 QualType SourceType, QualType ResType, 4416 SourceLocation Loc) { 4417 switch (CGF.getEvaluationKind(ResType)) { 4418 case TEK_Scalar: 4419 return RValue::get( 4420 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 4421 case TEK_Complex: { 4422 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 4423 return RValue::getComplex(Res.first, Res.second); 4424 } 4425 case TEK_Aggregate: 4426 break; 4427 } 4428 llvm_unreachable("Must be a scalar or complex."); 4429 } 4430 4431 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 4432 llvm::AtomicOrdering AO, 4433 bool IsPostfixUpdate, const Expr *V, 4434 const Expr *X, const Expr *E, 4435 const Expr *UE, bool IsXLHSInRHSPart, 4436 SourceLocation Loc) { 4437 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 4438 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 4439 RValue NewVVal; 4440 LValue VLValue = CGF.EmitLValue(V); 4441 LValue XLValue = CGF.EmitLValue(X); 4442 RValue ExprRValue = CGF.EmitAnyExpr(E); 4443 QualType NewVValType; 4444 if (UE) { 4445 // 'x' is updated with some additional value. 4446 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4447 "Update expr in 'atomic capture' must be a binary operator."); 4448 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4449 // Update expressions are allowed to have the following forms: 4450 // x binop= expr; -> xrval + expr; 4451 // x++, ++x -> xrval + 1; 4452 // x--, --x -> xrval - 1; 4453 // x = x binop expr; -> xrval binop expr 4454 // x = expr Op x; - > expr binop xrval; 4455 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4456 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4457 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4458 NewVValType = XRValExpr->getType(); 4459 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4460 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 4461 IsPostfixUpdate](RValue XRValue) { 4462 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4463 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4464 RValue Res = CGF.EmitAnyExpr(UE); 4465 NewVVal = IsPostfixUpdate ? XRValue : Res; 4466 return Res; 4467 }; 4468 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4469 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4470 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4471 if (Res.first) { 4472 // 'atomicrmw' instruction was generated. 4473 if (IsPostfixUpdate) { 4474 // Use old value from 'atomicrmw'. 4475 NewVVal = Res.second; 4476 } else { 4477 // 'atomicrmw' does not provide new value, so evaluate it using old 4478 // value of 'x'. 4479 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4480 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 4481 NewVVal = CGF.EmitAnyExpr(UE); 4482 } 4483 } 4484 } else { 4485 // 'x' is simply rewritten with some 'expr'. 4486 NewVValType = X->getType().getNonReferenceType(); 4487 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 4488 X->getType().getNonReferenceType(), Loc); 4489 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 4490 NewVVal = XRValue; 4491 return ExprRValue; 4492 }; 4493 // Try to perform atomicrmw xchg, otherwise simple exchange. 4494 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4495 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 4496 Loc, Gen); 4497 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4498 if (Res.first) { 4499 // 'atomicrmw' instruction was generated. 4500 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 4501 } 4502 } 4503 // Emit post-update store to 'v' of old/new 'x' value. 4504 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 4505 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4506 // OpenMP, 2.17.7, atomic Construct 4507 // If the write, update, or capture clause is specified and the release, 4508 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4509 // the atomic operation is also a release flush. 4510 // If the read or capture clause is specified and the acquire, acq_rel, or 4511 // seq_cst clause is specified then the strong flush on exit from the atomic 4512 // operation is also an acquire flush. 4513 switch (AO) { 4514 case llvm::AtomicOrdering::Release: 4515 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4516 llvm::AtomicOrdering::Release); 4517 break; 4518 case llvm::AtomicOrdering::Acquire: 4519 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4520 llvm::AtomicOrdering::Acquire); 4521 break; 4522 case llvm::AtomicOrdering::AcquireRelease: 4523 case llvm::AtomicOrdering::SequentiallyConsistent: 4524 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4525 llvm::AtomicOrdering::AcquireRelease); 4526 break; 4527 case llvm::AtomicOrdering::Monotonic: 4528 break; 4529 case llvm::AtomicOrdering::NotAtomic: 4530 case llvm::AtomicOrdering::Unordered: 4531 llvm_unreachable("Unexpected ordering."); 4532 } 4533 } 4534 4535 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 4536 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 4537 const Expr *X, const Expr *V, const Expr *E, 4538 const Expr *UE, bool IsXLHSInRHSPart, 4539 SourceLocation Loc) { 4540 switch (Kind) { 4541 case OMPC_read: 4542 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 4543 break; 4544 case OMPC_write: 4545 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 4546 break; 4547 case OMPC_unknown: 4548 case OMPC_update: 4549 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 4550 break; 4551 case OMPC_capture: 4552 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 4553 IsXLHSInRHSPart, Loc); 4554 break; 4555 case OMPC_if: 4556 case OMPC_final: 4557 case OMPC_num_threads: 4558 case OMPC_private: 4559 case OMPC_firstprivate: 4560 case OMPC_lastprivate: 4561 case OMPC_reduction: 4562 case OMPC_task_reduction: 4563 case OMPC_in_reduction: 4564 case OMPC_safelen: 4565 case OMPC_simdlen: 4566 case OMPC_allocator: 4567 case OMPC_allocate: 4568 case OMPC_collapse: 4569 case OMPC_default: 4570 case OMPC_seq_cst: 4571 case OMPC_acq_rel: 4572 case OMPC_acquire: 4573 case OMPC_release: 4574 case OMPC_relaxed: 4575 case OMPC_shared: 4576 case OMPC_linear: 4577 case OMPC_aligned: 4578 case OMPC_copyin: 4579 case OMPC_copyprivate: 4580 case OMPC_flush: 4581 case OMPC_depobj: 4582 case OMPC_proc_bind: 4583 case OMPC_schedule: 4584 case OMPC_ordered: 4585 case OMPC_nowait: 4586 case OMPC_untied: 4587 case OMPC_threadprivate: 4588 case OMPC_depend: 4589 case OMPC_mergeable: 4590 case OMPC_device: 4591 case OMPC_threads: 4592 case OMPC_simd: 4593 case OMPC_map: 4594 case OMPC_num_teams: 4595 case OMPC_thread_limit: 4596 case OMPC_priority: 4597 case OMPC_grainsize: 4598 case OMPC_nogroup: 4599 case OMPC_num_tasks: 4600 case OMPC_hint: 4601 case OMPC_dist_schedule: 4602 case OMPC_defaultmap: 4603 case OMPC_uniform: 4604 case OMPC_to: 4605 case OMPC_from: 4606 case OMPC_use_device_ptr: 4607 case OMPC_is_device_ptr: 4608 case OMPC_unified_address: 4609 case OMPC_unified_shared_memory: 4610 case OMPC_reverse_offload: 4611 case OMPC_dynamic_allocators: 4612 case OMPC_atomic_default_mem_order: 4613 case OMPC_device_type: 4614 case OMPC_match: 4615 case OMPC_nontemporal: 4616 case OMPC_order: 4617 case OMPC_destroy: 4618 case OMPC_detach: 4619 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 4620 } 4621 } 4622 4623 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 4624 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 4625 bool MemOrderingSpecified = false; 4626 if (S.getSingleClause<OMPSeqCstClause>()) { 4627 AO = llvm::AtomicOrdering::SequentiallyConsistent; 4628 MemOrderingSpecified = true; 4629 } else if (S.getSingleClause<OMPAcqRelClause>()) { 4630 AO = llvm::AtomicOrdering::AcquireRelease; 4631 MemOrderingSpecified = true; 4632 } else if (S.getSingleClause<OMPAcquireClause>()) { 4633 AO = llvm::AtomicOrdering::Acquire; 4634 MemOrderingSpecified = true; 4635 } else if (S.getSingleClause<OMPReleaseClause>()) { 4636 AO = llvm::AtomicOrdering::Release; 4637 MemOrderingSpecified = true; 4638 } else if (S.getSingleClause<OMPRelaxedClause>()) { 4639 AO = llvm::AtomicOrdering::Monotonic; 4640 MemOrderingSpecified = true; 4641 } 4642 OpenMPClauseKind Kind = OMPC_unknown; 4643 for (const OMPClause *C : S.clauses()) { 4644 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 4645 // if it is first). 4646 if (C->getClauseKind() != OMPC_seq_cst && 4647 C->getClauseKind() != OMPC_acq_rel && 4648 C->getClauseKind() != OMPC_acquire && 4649 C->getClauseKind() != OMPC_release && 4650 C->getClauseKind() != OMPC_relaxed) { 4651 Kind = C->getClauseKind(); 4652 break; 4653 } 4654 } 4655 if (!MemOrderingSpecified) { 4656 llvm::AtomicOrdering DefaultOrder = 4657 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 4658 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 4659 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 4660 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 4661 Kind == OMPC_capture)) { 4662 AO = DefaultOrder; 4663 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 4664 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 4665 AO = llvm::AtomicOrdering::Release; 4666 } else if (Kind == OMPC_read) { 4667 assert(Kind == OMPC_read && "Unexpected atomic kind."); 4668 AO = llvm::AtomicOrdering::Acquire; 4669 } 4670 } 4671 } 4672 4673 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 4674 if (const auto *FE = dyn_cast<FullExpr>(CS)) 4675 enterFullExpression(FE); 4676 // Processing for statements under 'atomic capture'. 4677 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 4678 for (const Stmt *C : Compound->body()) { 4679 if (const auto *FE = dyn_cast<FullExpr>(C)) 4680 enterFullExpression(FE); 4681 } 4682 } 4683 4684 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 4685 PrePostActionTy &) { 4686 CGF.EmitStopPoint(CS); 4687 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 4688 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 4689 S.getBeginLoc()); 4690 }; 4691 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4692 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 4693 } 4694 4695 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 4696 const OMPExecutableDirective &S, 4697 const RegionCodeGenTy &CodeGen) { 4698 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 4699 CodeGenModule &CGM = CGF.CGM; 4700 4701 // On device emit this construct as inlined code. 4702 if (CGM.getLangOpts().OpenMPIsDevice) { 4703 OMPLexicalScope Scope(CGF, S, OMPD_target); 4704 CGM.getOpenMPRuntime().emitInlinedDirective( 4705 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4706 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4707 }); 4708 return; 4709 } 4710 4711 auto LPCRegion = 4712 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 4713 llvm::Function *Fn = nullptr; 4714 llvm::Constant *FnID = nullptr; 4715 4716 const Expr *IfCond = nullptr; 4717 // Check for the at most one if clause associated with the target region. 4718 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4719 if (C->getNameModifier() == OMPD_unknown || 4720 C->getNameModifier() == OMPD_target) { 4721 IfCond = C->getCondition(); 4722 break; 4723 } 4724 } 4725 4726 // Check if we have any device clause associated with the directive. 4727 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 4728 nullptr, OMPC_DEVICE_unknown); 4729 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4730 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 4731 4732 // Check if we have an if clause whose conditional always evaluates to false 4733 // or if we do not have any targets specified. If so the target region is not 4734 // an offload entry point. 4735 bool IsOffloadEntry = true; 4736 if (IfCond) { 4737 bool Val; 4738 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 4739 IsOffloadEntry = false; 4740 } 4741 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4742 IsOffloadEntry = false; 4743 4744 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 4745 StringRef ParentName; 4746 // In case we have Ctors/Dtors we use the complete type variant to produce 4747 // the mangling of the device outlined kernel. 4748 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 4749 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 4750 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 4751 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 4752 else 4753 ParentName = 4754 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 4755 4756 // Emit target region as a standalone region. 4757 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 4758 IsOffloadEntry, CodeGen); 4759 OMPLexicalScope Scope(CGF, S, OMPD_task); 4760 auto &&SizeEmitter = 4761 [IsOffloadEntry](CodeGenFunction &CGF, 4762 const OMPLoopDirective &D) -> llvm::Value * { 4763 if (IsOffloadEntry) { 4764 OMPLoopScope(CGF, D); 4765 // Emit calculation of the iterations count. 4766 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 4767 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 4768 /*isSigned=*/false); 4769 return NumIterations; 4770 } 4771 return nullptr; 4772 }; 4773 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 4774 SizeEmitter); 4775 } 4776 4777 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 4778 PrePostActionTy &Action) { 4779 Action.Enter(CGF); 4780 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4781 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4782 CGF.EmitOMPPrivateClause(S, PrivateScope); 4783 (void)PrivateScope.Privatize(); 4784 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4785 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4786 4787 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 4788 } 4789 4790 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 4791 StringRef ParentName, 4792 const OMPTargetDirective &S) { 4793 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4794 emitTargetRegion(CGF, S, Action); 4795 }; 4796 llvm::Function *Fn; 4797 llvm::Constant *Addr; 4798 // Emit target region as a standalone region. 4799 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4800 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4801 assert(Fn && Addr && "Target device function emission failed."); 4802 } 4803 4804 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 4805 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4806 emitTargetRegion(CGF, S, Action); 4807 }; 4808 emitCommonOMPTargetDirective(*this, S, CodeGen); 4809 } 4810 4811 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 4812 const OMPExecutableDirective &S, 4813 OpenMPDirectiveKind InnermostKind, 4814 const RegionCodeGenTy &CodeGen) { 4815 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 4816 llvm::Function *OutlinedFn = 4817 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 4818 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 4819 4820 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 4821 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 4822 if (NT || TL) { 4823 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 4824 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 4825 4826 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 4827 S.getBeginLoc()); 4828 } 4829 4830 OMPTeamsScope Scope(CGF, S); 4831 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4832 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4833 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 4834 CapturedVars); 4835 } 4836 4837 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 4838 // Emit teams region as a standalone region. 4839 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4840 Action.Enter(CGF); 4841 OMPPrivateScope PrivateScope(CGF); 4842 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4843 CGF.EmitOMPPrivateClause(S, PrivateScope); 4844 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4845 (void)PrivateScope.Privatize(); 4846 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 4847 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4848 }; 4849 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 4850 emitPostUpdateForReductionClause(*this, S, 4851 [](CodeGenFunction &) { return nullptr; }); 4852 } 4853 4854 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4855 const OMPTargetTeamsDirective &S) { 4856 auto *CS = S.getCapturedStmt(OMPD_teams); 4857 Action.Enter(CGF); 4858 // Emit teams region as a standalone region. 4859 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 4860 Action.Enter(CGF); 4861 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4862 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4863 CGF.EmitOMPPrivateClause(S, PrivateScope); 4864 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4865 (void)PrivateScope.Privatize(); 4866 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4867 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4868 CGF.EmitStmt(CS->getCapturedStmt()); 4869 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4870 }; 4871 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 4872 emitPostUpdateForReductionClause(CGF, S, 4873 [](CodeGenFunction &) { return nullptr; }); 4874 } 4875 4876 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 4877 CodeGenModule &CGM, StringRef ParentName, 4878 const OMPTargetTeamsDirective &S) { 4879 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4880 emitTargetTeamsRegion(CGF, Action, S); 4881 }; 4882 llvm::Function *Fn; 4883 llvm::Constant *Addr; 4884 // Emit target region as a standalone region. 4885 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4886 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4887 assert(Fn && Addr && "Target device function emission failed."); 4888 } 4889 4890 void CodeGenFunction::EmitOMPTargetTeamsDirective( 4891 const OMPTargetTeamsDirective &S) { 4892 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4893 emitTargetTeamsRegion(CGF, Action, S); 4894 }; 4895 emitCommonOMPTargetDirective(*this, S, CodeGen); 4896 } 4897 4898 static void 4899 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4900 const OMPTargetTeamsDistributeDirective &S) { 4901 Action.Enter(CGF); 4902 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4903 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4904 }; 4905 4906 // Emit teams region as a standalone region. 4907 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4908 PrePostActionTy &Action) { 4909 Action.Enter(CGF); 4910 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4911 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4912 (void)PrivateScope.Privatize(); 4913 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4914 CodeGenDistribute); 4915 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4916 }; 4917 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 4918 emitPostUpdateForReductionClause(CGF, S, 4919 [](CodeGenFunction &) { return nullptr; }); 4920 } 4921 4922 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 4923 CodeGenModule &CGM, StringRef ParentName, 4924 const OMPTargetTeamsDistributeDirective &S) { 4925 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4926 emitTargetTeamsDistributeRegion(CGF, Action, S); 4927 }; 4928 llvm::Function *Fn; 4929 llvm::Constant *Addr; 4930 // Emit target region as a standalone region. 4931 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4932 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4933 assert(Fn && Addr && "Target device function emission failed."); 4934 } 4935 4936 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 4937 const OMPTargetTeamsDistributeDirective &S) { 4938 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4939 emitTargetTeamsDistributeRegion(CGF, Action, S); 4940 }; 4941 emitCommonOMPTargetDirective(*this, S, CodeGen); 4942 } 4943 4944 static void emitTargetTeamsDistributeSimdRegion( 4945 CodeGenFunction &CGF, PrePostActionTy &Action, 4946 const OMPTargetTeamsDistributeSimdDirective &S) { 4947 Action.Enter(CGF); 4948 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4949 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4950 }; 4951 4952 // Emit teams region as a standalone region. 4953 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4954 PrePostActionTy &Action) { 4955 Action.Enter(CGF); 4956 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4957 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4958 (void)PrivateScope.Privatize(); 4959 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4960 CodeGenDistribute); 4961 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4962 }; 4963 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 4964 emitPostUpdateForReductionClause(CGF, S, 4965 [](CodeGenFunction &) { return nullptr; }); 4966 } 4967 4968 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 4969 CodeGenModule &CGM, StringRef ParentName, 4970 const OMPTargetTeamsDistributeSimdDirective &S) { 4971 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4972 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4973 }; 4974 llvm::Function *Fn; 4975 llvm::Constant *Addr; 4976 // Emit target region as a standalone region. 4977 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4978 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4979 assert(Fn && Addr && "Target device function emission failed."); 4980 } 4981 4982 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 4983 const OMPTargetTeamsDistributeSimdDirective &S) { 4984 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4985 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4986 }; 4987 emitCommonOMPTargetDirective(*this, S, CodeGen); 4988 } 4989 4990 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 4991 const OMPTeamsDistributeDirective &S) { 4992 4993 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4994 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4995 }; 4996 4997 // Emit teams region as a standalone region. 4998 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4999 PrePostActionTy &Action) { 5000 Action.Enter(CGF); 5001 OMPPrivateScope PrivateScope(CGF); 5002 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5003 (void)PrivateScope.Privatize(); 5004 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5005 CodeGenDistribute); 5006 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5007 }; 5008 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5009 emitPostUpdateForReductionClause(*this, S, 5010 [](CodeGenFunction &) { return nullptr; }); 5011 } 5012 5013 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5014 const OMPTeamsDistributeSimdDirective &S) { 5015 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5016 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5017 }; 5018 5019 // Emit teams region as a standalone region. 5020 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5021 PrePostActionTy &Action) { 5022 Action.Enter(CGF); 5023 OMPPrivateScope PrivateScope(CGF); 5024 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5025 (void)PrivateScope.Privatize(); 5026 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5027 CodeGenDistribute); 5028 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5029 }; 5030 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5031 emitPostUpdateForReductionClause(*this, S, 5032 [](CodeGenFunction &) { return nullptr; }); 5033 } 5034 5035 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5036 const OMPTeamsDistributeParallelForDirective &S) { 5037 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5038 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5039 S.getDistInc()); 5040 }; 5041 5042 // Emit teams region as a standalone region. 5043 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5044 PrePostActionTy &Action) { 5045 Action.Enter(CGF); 5046 OMPPrivateScope PrivateScope(CGF); 5047 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5048 (void)PrivateScope.Privatize(); 5049 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5050 CodeGenDistribute); 5051 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5052 }; 5053 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5054 emitPostUpdateForReductionClause(*this, S, 5055 [](CodeGenFunction &) { return nullptr; }); 5056 } 5057 5058 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5059 const OMPTeamsDistributeParallelForSimdDirective &S) { 5060 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5061 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5062 S.getDistInc()); 5063 }; 5064 5065 // Emit teams region as a standalone region. 5066 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5067 PrePostActionTy &Action) { 5068 Action.Enter(CGF); 5069 OMPPrivateScope PrivateScope(CGF); 5070 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5071 (void)PrivateScope.Privatize(); 5072 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5073 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5074 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5075 }; 5076 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5077 CodeGen); 5078 emitPostUpdateForReductionClause(*this, S, 5079 [](CodeGenFunction &) { return nullptr; }); 5080 } 5081 5082 static void emitTargetTeamsDistributeParallelForRegion( 5083 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5084 PrePostActionTy &Action) { 5085 Action.Enter(CGF); 5086 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5087 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5088 S.getDistInc()); 5089 }; 5090 5091 // Emit teams region as a standalone region. 5092 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5093 PrePostActionTy &Action) { 5094 Action.Enter(CGF); 5095 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5096 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5097 (void)PrivateScope.Privatize(); 5098 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5099 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5100 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5101 }; 5102 5103 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5104 CodeGenTeams); 5105 emitPostUpdateForReductionClause(CGF, S, 5106 [](CodeGenFunction &) { return nullptr; }); 5107 } 5108 5109 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5110 CodeGenModule &CGM, StringRef ParentName, 5111 const OMPTargetTeamsDistributeParallelForDirective &S) { 5112 // Emit SPMD target teams distribute parallel for region as a standalone 5113 // region. 5114 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5115 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5116 }; 5117 llvm::Function *Fn; 5118 llvm::Constant *Addr; 5119 // Emit target region as a standalone region. 5120 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5121 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5122 assert(Fn && Addr && "Target device function emission failed."); 5123 } 5124 5125 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5126 const OMPTargetTeamsDistributeParallelForDirective &S) { 5127 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5128 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5129 }; 5130 emitCommonOMPTargetDirective(*this, S, CodeGen); 5131 } 5132 5133 static void emitTargetTeamsDistributeParallelForSimdRegion( 5134 CodeGenFunction &CGF, 5135 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5136 PrePostActionTy &Action) { 5137 Action.Enter(CGF); 5138 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5139 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5140 S.getDistInc()); 5141 }; 5142 5143 // Emit teams region as a standalone region. 5144 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5145 PrePostActionTy &Action) { 5146 Action.Enter(CGF); 5147 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5148 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5149 (void)PrivateScope.Privatize(); 5150 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5151 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5152 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5153 }; 5154 5155 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5156 CodeGenTeams); 5157 emitPostUpdateForReductionClause(CGF, S, 5158 [](CodeGenFunction &) { return nullptr; }); 5159 } 5160 5161 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5162 CodeGenModule &CGM, StringRef ParentName, 5163 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5164 // Emit SPMD target teams distribute parallel for simd region as a standalone 5165 // region. 5166 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5167 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5168 }; 5169 llvm::Function *Fn; 5170 llvm::Constant *Addr; 5171 // Emit target region as a standalone region. 5172 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5173 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5174 assert(Fn && Addr && "Target device function emission failed."); 5175 } 5176 5177 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5178 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5179 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5180 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5181 }; 5182 emitCommonOMPTargetDirective(*this, S, CodeGen); 5183 } 5184 5185 void CodeGenFunction::EmitOMPCancellationPointDirective( 5186 const OMPCancellationPointDirective &S) { 5187 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5188 S.getCancelRegion()); 5189 } 5190 5191 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5192 const Expr *IfCond = nullptr; 5193 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5194 if (C->getNameModifier() == OMPD_unknown || 5195 C->getNameModifier() == OMPD_cancel) { 5196 IfCond = C->getCondition(); 5197 break; 5198 } 5199 } 5200 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5201 // TODO: This check is necessary as we only generate `omp parallel` through 5202 // the OpenMPIRBuilder for now. 5203 if (S.getCancelRegion() == OMPD_parallel) { 5204 llvm::Value *IfCondition = nullptr; 5205 if (IfCond) 5206 IfCondition = EmitScalarExpr(IfCond, 5207 /*IgnoreResultAssign=*/true); 5208 return Builder.restoreIP( 5209 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5210 } 5211 } 5212 5213 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5214 S.getCancelRegion()); 5215 } 5216 5217 CodeGenFunction::JumpDest 5218 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5219 if (Kind == OMPD_parallel || Kind == OMPD_task || 5220 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5221 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5222 return ReturnBlock; 5223 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5224 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5225 Kind == OMPD_distribute_parallel_for || 5226 Kind == OMPD_target_parallel_for || 5227 Kind == OMPD_teams_distribute_parallel_for || 5228 Kind == OMPD_target_teams_distribute_parallel_for); 5229 return OMPCancelStack.getExitBlock(); 5230 } 5231 5232 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5233 const OMPClause &NC, OMPPrivateScope &PrivateScope, 5234 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5235 const auto &C = cast<OMPUseDevicePtrClause>(NC); 5236 auto OrigVarIt = C.varlist_begin(); 5237 auto InitIt = C.inits().begin(); 5238 for (const Expr *PvtVarIt : C.private_copies()) { 5239 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5240 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5241 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5242 5243 // In order to identify the right initializer we need to match the 5244 // declaration used by the mapping logic. In some cases we may get 5245 // OMPCapturedExprDecl that refers to the original declaration. 5246 const ValueDecl *MatchingVD = OrigVD; 5247 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5248 // OMPCapturedExprDecl are used to privative fields of the current 5249 // structure. 5250 const auto *ME = cast<MemberExpr>(OED->getInit()); 5251 assert(isa<CXXThisExpr>(ME->getBase()) && 5252 "Base should be the current struct!"); 5253 MatchingVD = ME->getMemberDecl(); 5254 } 5255 5256 // If we don't have information about the current list item, move on to 5257 // the next one. 5258 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5259 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5260 continue; 5261 5262 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5263 InitAddrIt, InitVD, 5264 PvtVD]() { 5265 // Initialize the temporary initialization variable with the address we 5266 // get from the runtime library. We have to cast the source address 5267 // because it is always a void *. References are materialized in the 5268 // privatization scope, so the initialization here disregards the fact 5269 // the original variable is a reference. 5270 QualType AddrQTy = 5271 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5272 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5273 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5274 setAddrOfLocalVar(InitVD, InitAddr); 5275 5276 // Emit private declaration, it will be initialized by the value we 5277 // declaration we just added to the local declarations map. 5278 EmitDecl(*PvtVD); 5279 5280 // The initialization variables reached its purpose in the emission 5281 // of the previous declaration, so we don't need it anymore. 5282 LocalDeclMap.erase(InitVD); 5283 5284 // Return the address of the private variable. 5285 return GetAddrOfLocalVar(PvtVD); 5286 }); 5287 assert(IsRegistered && "firstprivate var already registered as private"); 5288 // Silence the warning about unused variable. 5289 (void)IsRegistered; 5290 5291 ++OrigVarIt; 5292 ++InitIt; 5293 } 5294 } 5295 5296 // Generate the instructions for '#pragma omp target data' directive. 5297 void CodeGenFunction::EmitOMPTargetDataDirective( 5298 const OMPTargetDataDirective &S) { 5299 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5300 5301 // Create a pre/post action to signal the privatization of the device pointer. 5302 // This action can be replaced by the OpenMP runtime code generation to 5303 // deactivate privatization. 5304 bool PrivatizeDevicePointers = false; 5305 class DevicePointerPrivActionTy : public PrePostActionTy { 5306 bool &PrivatizeDevicePointers; 5307 5308 public: 5309 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5310 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5311 void Enter(CodeGenFunction &CGF) override { 5312 PrivatizeDevicePointers = true; 5313 } 5314 }; 5315 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5316 5317 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5318 CodeGenFunction &CGF, PrePostActionTy &Action) { 5319 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5320 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5321 }; 5322 5323 // Codegen that selects whether to generate the privatization code or not. 5324 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5325 &InnermostCodeGen](CodeGenFunction &CGF, 5326 PrePostActionTy &Action) { 5327 RegionCodeGenTy RCG(InnermostCodeGen); 5328 PrivatizeDevicePointers = false; 5329 5330 // Call the pre-action to change the status of PrivatizeDevicePointers if 5331 // needed. 5332 Action.Enter(CGF); 5333 5334 if (PrivatizeDevicePointers) { 5335 OMPPrivateScope PrivateScope(CGF); 5336 // Emit all instances of the use_device_ptr clause. 5337 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5338 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5339 Info.CaptureDeviceAddrMap); 5340 (void)PrivateScope.Privatize(); 5341 RCG(CGF); 5342 } else { 5343 RCG(CGF); 5344 } 5345 }; 5346 5347 // Forward the provided action to the privatization codegen. 5348 RegionCodeGenTy PrivRCG(PrivCodeGen); 5349 PrivRCG.setAction(Action); 5350 5351 // Notwithstanding the body of the region is emitted as inlined directive, 5352 // we don't use an inline scope as changes in the references inside the 5353 // region are expected to be visible outside, so we do not privative them. 5354 OMPLexicalScope Scope(CGF, S); 5355 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5356 PrivRCG); 5357 }; 5358 5359 RegionCodeGenTy RCG(CodeGen); 5360 5361 // If we don't have target devices, don't bother emitting the data mapping 5362 // code. 5363 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 5364 RCG(*this); 5365 return; 5366 } 5367 5368 // Check if we have any if clause associated with the directive. 5369 const Expr *IfCond = nullptr; 5370 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5371 IfCond = C->getCondition(); 5372 5373 // Check if we have any device clause associated with the directive. 5374 const Expr *Device = nullptr; 5375 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5376 Device = C->getDevice(); 5377 5378 // Set the action to signal privatization of device pointers. 5379 RCG.setAction(PrivAction); 5380 5381 // Emit region code. 5382 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 5383 Info); 5384 } 5385 5386 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 5387 const OMPTargetEnterDataDirective &S) { 5388 // If we don't have target devices, don't bother emitting the data mapping 5389 // code. 5390 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5391 return; 5392 5393 // Check if we have any if clause associated with the directive. 5394 const Expr *IfCond = nullptr; 5395 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5396 IfCond = C->getCondition(); 5397 5398 // Check if we have any device clause associated with the directive. 5399 const Expr *Device = nullptr; 5400 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5401 Device = C->getDevice(); 5402 5403 OMPLexicalScope Scope(*this, S, OMPD_task); 5404 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5405 } 5406 5407 void CodeGenFunction::EmitOMPTargetExitDataDirective( 5408 const OMPTargetExitDataDirective &S) { 5409 // If we don't have target devices, don't bother emitting the data mapping 5410 // code. 5411 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5412 return; 5413 5414 // Check if we have any if clause associated with the directive. 5415 const Expr *IfCond = nullptr; 5416 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5417 IfCond = C->getCondition(); 5418 5419 // Check if we have any device clause associated with the directive. 5420 const Expr *Device = nullptr; 5421 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5422 Device = C->getDevice(); 5423 5424 OMPLexicalScope Scope(*this, S, OMPD_task); 5425 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5426 } 5427 5428 static void emitTargetParallelRegion(CodeGenFunction &CGF, 5429 const OMPTargetParallelDirective &S, 5430 PrePostActionTy &Action) { 5431 // Get the captured statement associated with the 'parallel' region. 5432 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 5433 Action.Enter(CGF); 5434 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5435 Action.Enter(CGF); 5436 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5437 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5438 CGF.EmitOMPPrivateClause(S, PrivateScope); 5439 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5440 (void)PrivateScope.Privatize(); 5441 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5442 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5443 // TODO: Add support for clauses. 5444 CGF.EmitStmt(CS->getCapturedStmt()); 5445 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 5446 }; 5447 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 5448 emitEmptyBoundParameters); 5449 emitPostUpdateForReductionClause(CGF, S, 5450 [](CodeGenFunction &) { return nullptr; }); 5451 } 5452 5453 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 5454 CodeGenModule &CGM, StringRef ParentName, 5455 const OMPTargetParallelDirective &S) { 5456 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5457 emitTargetParallelRegion(CGF, S, Action); 5458 }; 5459 llvm::Function *Fn; 5460 llvm::Constant *Addr; 5461 // Emit target region as a standalone region. 5462 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5463 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5464 assert(Fn && Addr && "Target device function emission failed."); 5465 } 5466 5467 void CodeGenFunction::EmitOMPTargetParallelDirective( 5468 const OMPTargetParallelDirective &S) { 5469 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5470 emitTargetParallelRegion(CGF, S, Action); 5471 }; 5472 emitCommonOMPTargetDirective(*this, S, CodeGen); 5473 } 5474 5475 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 5476 const OMPTargetParallelForDirective &S, 5477 PrePostActionTy &Action) { 5478 Action.Enter(CGF); 5479 // Emit directive as a combined directive that consists of two implicit 5480 // directives: 'parallel' with 'for' directive. 5481 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5482 Action.Enter(CGF); 5483 CodeGenFunction::OMPCancelStackRAII CancelRegion( 5484 CGF, OMPD_target_parallel_for, S.hasCancel()); 5485 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5486 emitDispatchForLoopBounds); 5487 }; 5488 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 5489 emitEmptyBoundParameters); 5490 } 5491 5492 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 5493 CodeGenModule &CGM, StringRef ParentName, 5494 const OMPTargetParallelForDirective &S) { 5495 // Emit SPMD target parallel for region as a standalone region. 5496 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5497 emitTargetParallelForRegion(CGF, S, Action); 5498 }; 5499 llvm::Function *Fn; 5500 llvm::Constant *Addr; 5501 // Emit target region as a standalone region. 5502 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5503 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5504 assert(Fn && Addr && "Target device function emission failed."); 5505 } 5506 5507 void CodeGenFunction::EmitOMPTargetParallelForDirective( 5508 const OMPTargetParallelForDirective &S) { 5509 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5510 emitTargetParallelForRegion(CGF, S, Action); 5511 }; 5512 emitCommonOMPTargetDirective(*this, S, CodeGen); 5513 } 5514 5515 static void 5516 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 5517 const OMPTargetParallelForSimdDirective &S, 5518 PrePostActionTy &Action) { 5519 Action.Enter(CGF); 5520 // Emit directive as a combined directive that consists of two implicit 5521 // directives: 'parallel' with 'for' directive. 5522 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5523 Action.Enter(CGF); 5524 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5525 emitDispatchForLoopBounds); 5526 }; 5527 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 5528 emitEmptyBoundParameters); 5529 } 5530 5531 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 5532 CodeGenModule &CGM, StringRef ParentName, 5533 const OMPTargetParallelForSimdDirective &S) { 5534 // Emit SPMD target parallel for region as a standalone region. 5535 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5536 emitTargetParallelForSimdRegion(CGF, S, Action); 5537 }; 5538 llvm::Function *Fn; 5539 llvm::Constant *Addr; 5540 // Emit target region as a standalone region. 5541 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5542 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5543 assert(Fn && Addr && "Target device function emission failed."); 5544 } 5545 5546 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 5547 const OMPTargetParallelForSimdDirective &S) { 5548 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5549 emitTargetParallelForSimdRegion(CGF, S, Action); 5550 }; 5551 emitCommonOMPTargetDirective(*this, S, CodeGen); 5552 } 5553 5554 /// Emit a helper variable and return corresponding lvalue. 5555 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 5556 const ImplicitParamDecl *PVD, 5557 CodeGenFunction::OMPPrivateScope &Privates) { 5558 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 5559 Privates.addPrivate(VDecl, 5560 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 5561 } 5562 5563 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 5564 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 5565 // Emit outlined function for task construct. 5566 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 5567 Address CapturedStruct = Address::invalid(); 5568 { 5569 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 5570 CapturedStruct = GenerateCapturedStmtArgument(*CS); 5571 } 5572 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 5573 const Expr *IfCond = nullptr; 5574 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5575 if (C->getNameModifier() == OMPD_unknown || 5576 C->getNameModifier() == OMPD_taskloop) { 5577 IfCond = C->getCondition(); 5578 break; 5579 } 5580 } 5581 5582 OMPTaskDataTy Data; 5583 // Check if taskloop must be emitted without taskgroup. 5584 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 5585 // TODO: Check if we should emit tied or untied task. 5586 Data.Tied = true; 5587 // Set scheduling for taskloop 5588 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 5589 // grainsize clause 5590 Data.Schedule.setInt(/*IntVal=*/false); 5591 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 5592 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 5593 // num_tasks clause 5594 Data.Schedule.setInt(/*IntVal=*/true); 5595 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 5596 } 5597 5598 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 5599 // if (PreCond) { 5600 // for (IV in 0..LastIteration) BODY; 5601 // <Final counter/linear vars updates>; 5602 // } 5603 // 5604 5605 // Emit: if (PreCond) - begin. 5606 // If the condition constant folds and can be elided, avoid emitting the 5607 // whole loop. 5608 bool CondConstant; 5609 llvm::BasicBlock *ContBlock = nullptr; 5610 OMPLoopScope PreInitScope(CGF, S); 5611 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5612 if (!CondConstant) 5613 return; 5614 } else { 5615 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 5616 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 5617 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 5618 CGF.getProfileCount(&S)); 5619 CGF.EmitBlock(ThenBlock); 5620 CGF.incrementProfileCounter(&S); 5621 } 5622 5623 (void)CGF.EmitOMPLinearClauseInit(S); 5624 5625 OMPPrivateScope LoopScope(CGF); 5626 // Emit helper vars inits. 5627 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 5628 auto *I = CS->getCapturedDecl()->param_begin(); 5629 auto *LBP = std::next(I, LowerBound); 5630 auto *UBP = std::next(I, UpperBound); 5631 auto *STP = std::next(I, Stride); 5632 auto *LIP = std::next(I, LastIter); 5633 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 5634 LoopScope); 5635 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 5636 LoopScope); 5637 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 5638 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 5639 LoopScope); 5640 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 5641 CGF.EmitOMPLinearClause(S, LoopScope); 5642 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 5643 (void)LoopScope.Privatize(); 5644 // Emit the loop iteration variable. 5645 const Expr *IVExpr = S.getIterationVariable(); 5646 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 5647 CGF.EmitVarDecl(*IVDecl); 5648 CGF.EmitIgnoredExpr(S.getInit()); 5649 5650 // Emit the iterations count variable. 5651 // If it is not a variable, Sema decided to calculate iterations count on 5652 // each iteration (e.g., it is foldable into a constant). 5653 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5654 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5655 // Emit calculation of the iterations count. 5656 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 5657 } 5658 5659 { 5660 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 5661 emitCommonSimdLoop( 5662 CGF, S, 5663 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5664 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5665 CGF.EmitOMPSimdInit(S); 5666 }, 5667 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 5668 CGF.EmitOMPInnerLoop( 5669 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 5670 [&S](CodeGenFunction &CGF) { 5671 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 5672 CGF.EmitStopPoint(&S); 5673 }, 5674 [](CodeGenFunction &) {}); 5675 }); 5676 } 5677 // Emit: if (PreCond) - end. 5678 if (ContBlock) { 5679 CGF.EmitBranch(ContBlock); 5680 CGF.EmitBlock(ContBlock, true); 5681 } 5682 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5683 if (HasLastprivateClause) { 5684 CGF.EmitOMPLastprivateClauseFinal( 5685 S, isOpenMPSimdDirective(S.getDirectiveKind()), 5686 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 5687 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5688 (*LIP)->getType(), S.getBeginLoc()))); 5689 } 5690 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 5691 return CGF.Builder.CreateIsNotNull( 5692 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5693 (*LIP)->getType(), S.getBeginLoc())); 5694 }); 5695 }; 5696 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 5697 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 5698 const OMPTaskDataTy &Data) { 5699 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 5700 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 5701 OMPLoopScope PreInitScope(CGF, S); 5702 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 5703 OutlinedFn, SharedsTy, 5704 CapturedStruct, IfCond, Data); 5705 }; 5706 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 5707 CodeGen); 5708 }; 5709 if (Data.Nogroup) { 5710 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 5711 } else { 5712 CGM.getOpenMPRuntime().emitTaskgroupRegion( 5713 *this, 5714 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 5715 PrePostActionTy &Action) { 5716 Action.Enter(CGF); 5717 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 5718 Data); 5719 }, 5720 S.getBeginLoc()); 5721 } 5722 } 5723 5724 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 5725 auto LPCRegion = 5726 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5727 EmitOMPTaskLoopBasedDirective(S); 5728 } 5729 5730 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 5731 const OMPTaskLoopSimdDirective &S) { 5732 auto LPCRegion = 5733 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5734 OMPLexicalScope Scope(*this, S); 5735 EmitOMPTaskLoopBasedDirective(S); 5736 } 5737 5738 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 5739 const OMPMasterTaskLoopDirective &S) { 5740 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5741 Action.Enter(CGF); 5742 EmitOMPTaskLoopBasedDirective(S); 5743 }; 5744 auto LPCRegion = 5745 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5746 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 5747 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5748 } 5749 5750 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 5751 const OMPMasterTaskLoopSimdDirective &S) { 5752 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5753 Action.Enter(CGF); 5754 EmitOMPTaskLoopBasedDirective(S); 5755 }; 5756 auto LPCRegion = 5757 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5758 OMPLexicalScope Scope(*this, S); 5759 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5760 } 5761 5762 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 5763 const OMPParallelMasterTaskLoopDirective &S) { 5764 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5765 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5766 PrePostActionTy &Action) { 5767 Action.Enter(CGF); 5768 CGF.EmitOMPTaskLoopBasedDirective(S); 5769 }; 5770 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 5771 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5772 S.getBeginLoc()); 5773 }; 5774 auto LPCRegion = 5775 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5776 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 5777 emitEmptyBoundParameters); 5778 } 5779 5780 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 5781 const OMPParallelMasterTaskLoopSimdDirective &S) { 5782 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5783 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5784 PrePostActionTy &Action) { 5785 Action.Enter(CGF); 5786 CGF.EmitOMPTaskLoopBasedDirective(S); 5787 }; 5788 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 5789 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5790 S.getBeginLoc()); 5791 }; 5792 auto LPCRegion = 5793 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5794 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 5795 emitEmptyBoundParameters); 5796 } 5797 5798 // Generate the instructions for '#pragma omp target update' directive. 5799 void CodeGenFunction::EmitOMPTargetUpdateDirective( 5800 const OMPTargetUpdateDirective &S) { 5801 // If we don't have target devices, don't bother emitting the data mapping 5802 // code. 5803 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5804 return; 5805 5806 // Check if we have any if clause associated with the directive. 5807 const Expr *IfCond = nullptr; 5808 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5809 IfCond = C->getCondition(); 5810 5811 // Check if we have any device clause associated with the directive. 5812 const Expr *Device = nullptr; 5813 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5814 Device = C->getDevice(); 5815 5816 OMPLexicalScope Scope(*this, S, OMPD_task); 5817 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5818 } 5819 5820 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 5821 const OMPExecutableDirective &D) { 5822 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 5823 return; 5824 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 5825 OMPPrivateScope GlobalsScope(CGF); 5826 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 5827 // Capture global firstprivates to avoid crash. 5828 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 5829 for (const Expr *Ref : C->varlists()) { 5830 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 5831 if (!DRE) 5832 continue; 5833 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 5834 if (!VD || VD->hasLocalStorage()) 5835 continue; 5836 if (!CGF.LocalDeclMap.count(VD)) { 5837 LValue GlobLVal = CGF.EmitLValue(Ref); 5838 GlobalsScope.addPrivate( 5839 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 5840 } 5841 } 5842 } 5843 } 5844 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 5845 (void)GlobalsScope.Privatize(); 5846 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 5847 } else { 5848 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 5849 for (const Expr *E : LD->counters()) { 5850 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 5851 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 5852 LValue GlobLVal = CGF.EmitLValue(E); 5853 GlobalsScope.addPrivate( 5854 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 5855 } 5856 if (isa<OMPCapturedExprDecl>(VD)) { 5857 // Emit only those that were not explicitly referenced in clauses. 5858 if (!CGF.LocalDeclMap.count(VD)) 5859 CGF.EmitVarDecl(*VD); 5860 } 5861 } 5862 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 5863 if (!C->getNumForLoops()) 5864 continue; 5865 for (unsigned I = LD->getCollapsedNumber(), 5866 E = C->getLoopNumIterations().size(); 5867 I < E; ++I) { 5868 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 5869 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 5870 // Emit only those that were not explicitly referenced in clauses. 5871 if (!CGF.LocalDeclMap.count(VD)) 5872 CGF.EmitVarDecl(*VD); 5873 } 5874 } 5875 } 5876 } 5877 (void)GlobalsScope.Privatize(); 5878 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 5879 } 5880 }; 5881 { 5882 auto LPCRegion = 5883 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 5884 OMPSimdLexicalScope Scope(*this, D); 5885 CGM.getOpenMPRuntime().emitInlinedDirective( 5886 *this, 5887 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 5888 : D.getDirectiveKind(), 5889 CodeGen); 5890 } 5891 // Check for outer lastprivate conditional update. 5892 checkForLastprivateConditionalUpdate(*this, D); 5893 } 5894