1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Instructions.h" 28 #include "llvm/Support/AtomicOrdering.h" 29 using namespace clang; 30 using namespace CodeGen; 31 using namespace llvm::omp; 32 33 namespace { 34 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 35 /// for captured expressions. 36 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 37 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 38 for (const auto *C : S.clauses()) { 39 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 40 if (const auto *PreInit = 41 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 42 for (const auto *I : PreInit->decls()) { 43 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 44 CGF.EmitVarDecl(cast<VarDecl>(*I)); 45 } else { 46 CodeGenFunction::AutoVarEmission Emission = 47 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 48 CGF.EmitAutoVarCleanups(Emission); 49 } 50 } 51 } 52 } 53 } 54 } 55 CodeGenFunction::OMPPrivateScope InlinedShareds; 56 57 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 58 return CGF.LambdaCaptureFields.lookup(VD) || 59 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 60 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 61 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 62 } 63 64 public: 65 OMPLexicalScope( 66 CodeGenFunction &CGF, const OMPExecutableDirective &S, 67 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 68 const bool EmitPreInitStmt = true) 69 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 70 InlinedShareds(CGF) { 71 if (EmitPreInitStmt) 72 emitPreInitStmt(CGF, S); 73 if (!CapturedRegion.hasValue()) 74 return; 75 assert(S.hasAssociatedStmt() && 76 "Expected associated statement for inlined directive."); 77 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 78 for (const auto &C : CS->captures()) { 79 if (C.capturesVariable() || C.capturesVariableByCopy()) { 80 auto *VD = C.getCapturedVar(); 81 assert(VD == VD->getCanonicalDecl() && 82 "Canonical decl must be captured."); 83 DeclRefExpr DRE( 84 CGF.getContext(), const_cast<VarDecl *>(VD), 85 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 86 InlinedShareds.isGlobalVarCaptured(VD)), 87 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 88 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 89 return CGF.EmitLValue(&DRE).getAddress(CGF); 90 }); 91 } 92 } 93 (void)InlinedShareds.Privatize(); 94 } 95 }; 96 97 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 98 /// for captured expressions. 99 class OMPParallelScope final : public OMPLexicalScope { 100 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 101 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 102 return !(isOpenMPTargetExecutionDirective(Kind) || 103 isOpenMPLoopBoundSharingDirective(Kind)) && 104 isOpenMPParallelDirective(Kind); 105 } 106 107 public: 108 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 109 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 110 EmitPreInitStmt(S)) {} 111 }; 112 113 /// Lexical scope for OpenMP teams construct, that handles correct codegen 114 /// for captured expressions. 115 class OMPTeamsScope final : public OMPLexicalScope { 116 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 117 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 118 return !isOpenMPTargetExecutionDirective(Kind) && 119 isOpenMPTeamsDirective(Kind); 120 } 121 122 public: 123 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 124 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 125 EmitPreInitStmt(S)) {} 126 }; 127 128 /// Private scope for OpenMP loop-based directives, that supports capturing 129 /// of used expression from loop statement. 130 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 131 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 132 CodeGenFunction::OMPMapVars PreCondVars; 133 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 134 for (const auto *E : S.counters()) { 135 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 136 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 137 (void)PreCondVars.setVarAddr( 138 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 139 } 140 // Mark private vars as undefs. 141 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 142 for (const Expr *IRef : C->varlists()) { 143 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 144 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 145 (void)PreCondVars.setVarAddr( 146 CGF, OrigVD, 147 Address(llvm::UndefValue::get( 148 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 149 OrigVD->getType().getNonReferenceType()))), 150 CGF.getContext().getDeclAlign(OrigVD))); 151 } 152 } 153 } 154 (void)PreCondVars.apply(CGF); 155 // Emit init, __range and __end variables for C++ range loops. 156 const Stmt *Body = 157 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 158 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 159 Body = OMPLoopDirective::tryToFindNextInnerLoop( 160 Body, /*TryImperfectlyNestedLoops=*/true); 161 if (auto *For = dyn_cast<ForStmt>(Body)) { 162 Body = For->getBody(); 163 } else { 164 assert(isa<CXXForRangeStmt>(Body) && 165 "Expected canonical for loop or range-based for loop."); 166 auto *CXXFor = cast<CXXForRangeStmt>(Body); 167 if (const Stmt *Init = CXXFor->getInit()) 168 CGF.EmitStmt(Init); 169 CGF.EmitStmt(CXXFor->getRangeStmt()); 170 CGF.EmitStmt(CXXFor->getEndStmt()); 171 Body = CXXFor->getBody(); 172 } 173 } 174 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 175 for (const auto *I : PreInits->decls()) 176 CGF.EmitVarDecl(cast<VarDecl>(*I)); 177 } 178 PreCondVars.restore(CGF); 179 } 180 181 public: 182 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 183 : CodeGenFunction::RunCleanupsScope(CGF) { 184 emitPreInitStmt(CGF, S); 185 } 186 }; 187 188 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 189 CodeGenFunction::OMPPrivateScope InlinedShareds; 190 191 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 192 return CGF.LambdaCaptureFields.lookup(VD) || 193 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 194 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 195 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 196 } 197 198 public: 199 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 200 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 201 InlinedShareds(CGF) { 202 for (const auto *C : S.clauses()) { 203 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 204 if (const auto *PreInit = 205 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 206 for (const auto *I : PreInit->decls()) { 207 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 208 CGF.EmitVarDecl(cast<VarDecl>(*I)); 209 } else { 210 CodeGenFunction::AutoVarEmission Emission = 211 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 212 CGF.EmitAutoVarCleanups(Emission); 213 } 214 } 215 } 216 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 217 for (const Expr *E : UDP->varlists()) { 218 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 219 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 220 CGF.EmitVarDecl(*OED); 221 } 222 } 223 } 224 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 225 CGF.EmitOMPPrivateClause(S, InlinedShareds); 226 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 227 if (const Expr *E = TG->getReductionRef()) 228 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 229 } 230 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 231 while (CS) { 232 for (auto &C : CS->captures()) { 233 if (C.capturesVariable() || C.capturesVariableByCopy()) { 234 auto *VD = C.getCapturedVar(); 235 assert(VD == VD->getCanonicalDecl() && 236 "Canonical decl must be captured."); 237 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 238 isCapturedVar(CGF, VD) || 239 (CGF.CapturedStmtInfo && 240 InlinedShareds.isGlobalVarCaptured(VD)), 241 VD->getType().getNonReferenceType(), VK_LValue, 242 C.getLocation()); 243 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 244 return CGF.EmitLValue(&DRE).getAddress(CGF); 245 }); 246 } 247 } 248 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 249 } 250 (void)InlinedShareds.Privatize(); 251 } 252 }; 253 254 } // namespace 255 256 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 257 const OMPExecutableDirective &S, 258 const RegionCodeGenTy &CodeGen); 259 260 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 261 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 262 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 263 OrigVD = OrigVD->getCanonicalDecl(); 264 bool IsCaptured = 265 LambdaCaptureFields.lookup(OrigVD) || 266 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 267 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 268 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 269 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 270 return EmitLValue(&DRE); 271 } 272 } 273 return EmitLValue(E); 274 } 275 276 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 277 ASTContext &C = getContext(); 278 llvm::Value *Size = nullptr; 279 auto SizeInChars = C.getTypeSizeInChars(Ty); 280 if (SizeInChars.isZero()) { 281 // getTypeSizeInChars() returns 0 for a VLA. 282 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 283 VlaSizePair VlaSize = getVLASize(VAT); 284 Ty = VlaSize.Type; 285 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 286 : VlaSize.NumElts; 287 } 288 SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) 290 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 291 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 292 } 293 return CGM.getSize(SizeInChars); 294 } 295 296 void CodeGenFunction::GenerateOpenMPCapturedVars( 297 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 298 const RecordDecl *RD = S.getCapturedRecordDecl(); 299 auto CurField = RD->field_begin(); 300 auto CurCap = S.captures().begin(); 301 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 302 E = S.capture_init_end(); 303 I != E; ++I, ++CurField, ++CurCap) { 304 if (CurField->hasCapturedVLAType()) { 305 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 306 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 307 CapturedVars.push_back(Val); 308 } else if (CurCap->capturesThis()) { 309 CapturedVars.push_back(CXXThisValue); 310 } else if (CurCap->capturesVariableByCopy()) { 311 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 312 313 // If the field is not a pointer, we need to save the actual value 314 // and load it as a void pointer. 315 if (!CurField->getType()->isAnyPointerType()) { 316 ASTContext &Ctx = getContext(); 317 Address DstAddr = CreateMemTemp( 318 Ctx.getUIntPtrType(), 319 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 320 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 321 322 llvm::Value *SrcAddrVal = EmitScalarConversion( 323 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 324 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 325 LValue SrcLV = 326 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 327 328 // Store the value using the source type pointer. 329 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 330 331 // Load the value using the destination type pointer. 332 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 333 } 334 CapturedVars.push_back(CV); 335 } else { 336 assert(CurCap->capturesVariable() && "Expected capture by reference."); 337 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 338 } 339 } 340 } 341 342 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 343 QualType DstType, StringRef Name, 344 LValue AddrLV) { 345 ASTContext &Ctx = CGF.getContext(); 346 347 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 348 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 349 Ctx.getPointerType(DstType), Loc); 350 Address TmpAddr = 351 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 352 .getAddress(CGF); 353 return TmpAddr; 354 } 355 356 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 357 if (T->isLValueReferenceType()) 358 return C.getLValueReferenceType( 359 getCanonicalParamType(C, T.getNonReferenceType()), 360 /*SpelledAsLValue=*/false); 361 if (T->isPointerType()) 362 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 363 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 364 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 365 return getCanonicalParamType(C, VLA->getElementType()); 366 if (!A->isVariablyModifiedType()) 367 return C.getCanonicalType(T); 368 } 369 return C.getCanonicalParamType(T); 370 } 371 372 namespace { 373 /// Contains required data for proper outlined function codegen. 374 struct FunctionOptions { 375 /// Captured statement for which the function is generated. 376 const CapturedStmt *S = nullptr; 377 /// true if cast to/from UIntPtr is required for variables captured by 378 /// value. 379 const bool UIntPtrCastRequired = true; 380 /// true if only casted arguments must be registered as local args or VLA 381 /// sizes. 382 const bool RegisterCastedArgsOnly = false; 383 /// Name of the generated function. 384 const StringRef FunctionName; 385 /// Location of the non-debug version of the outlined function. 386 SourceLocation Loc; 387 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 388 bool RegisterCastedArgsOnly, StringRef FunctionName, 389 SourceLocation Loc) 390 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 391 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 392 FunctionName(FunctionName), Loc(Loc) {} 393 }; 394 } // namespace 395 396 static llvm::Function *emitOutlinedFunctionPrologue( 397 CodeGenFunction &CGF, FunctionArgList &Args, 398 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 399 &LocalAddrs, 400 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 401 &VLASizes, 402 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 403 const CapturedDecl *CD = FO.S->getCapturedDecl(); 404 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 405 assert(CD->hasBody() && "missing CapturedDecl body"); 406 407 CXXThisValue = nullptr; 408 // Build the argument list. 409 CodeGenModule &CGM = CGF.CGM; 410 ASTContext &Ctx = CGM.getContext(); 411 FunctionArgList TargetArgs; 412 Args.append(CD->param_begin(), 413 std::next(CD->param_begin(), CD->getContextParamPosition())); 414 TargetArgs.append( 415 CD->param_begin(), 416 std::next(CD->param_begin(), CD->getContextParamPosition())); 417 auto I = FO.S->captures().begin(); 418 FunctionDecl *DebugFunctionDecl = nullptr; 419 if (!FO.UIntPtrCastRequired) { 420 FunctionProtoType::ExtProtoInfo EPI; 421 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 422 DebugFunctionDecl = FunctionDecl::Create( 423 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 424 SourceLocation(), DeclarationName(), FunctionTy, 425 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 426 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 427 } 428 for (const FieldDecl *FD : RD->fields()) { 429 QualType ArgType = FD->getType(); 430 IdentifierInfo *II = nullptr; 431 VarDecl *CapVar = nullptr; 432 433 // If this is a capture by copy and the type is not a pointer, the outlined 434 // function argument type should be uintptr and the value properly casted to 435 // uintptr. This is necessary given that the runtime library is only able to 436 // deal with pointers. We can pass in the same way the VLA type sizes to the 437 // outlined function. 438 if (FO.UIntPtrCastRequired && 439 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 440 I->capturesVariableArrayType())) 441 ArgType = Ctx.getUIntPtrType(); 442 443 if (I->capturesVariable() || I->capturesVariableByCopy()) { 444 CapVar = I->getCapturedVar(); 445 II = CapVar->getIdentifier(); 446 } else if (I->capturesThis()) { 447 II = &Ctx.Idents.get("this"); 448 } else { 449 assert(I->capturesVariableArrayType()); 450 II = &Ctx.Idents.get("vla"); 451 } 452 if (ArgType->isVariablyModifiedType()) 453 ArgType = getCanonicalParamType(Ctx, ArgType); 454 VarDecl *Arg; 455 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 456 Arg = ParmVarDecl::Create( 457 Ctx, DebugFunctionDecl, 458 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 459 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 460 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 461 } else { 462 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 463 II, ArgType, ImplicitParamDecl::Other); 464 } 465 Args.emplace_back(Arg); 466 // Do not cast arguments if we emit function with non-original types. 467 TargetArgs.emplace_back( 468 FO.UIntPtrCastRequired 469 ? Arg 470 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 471 ++I; 472 } 473 Args.append( 474 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 475 CD->param_end()); 476 TargetArgs.append( 477 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 478 CD->param_end()); 479 480 // Create the function declaration. 481 const CGFunctionInfo &FuncInfo = 482 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 483 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 484 485 auto *F = 486 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 487 FO.FunctionName, &CGM.getModule()); 488 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 489 if (CD->isNothrow()) 490 F->setDoesNotThrow(); 491 F->setDoesNotRecurse(); 492 493 // Generate the function. 494 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 495 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 496 FO.UIntPtrCastRequired ? FO.Loc 497 : CD->getBody()->getBeginLoc()); 498 unsigned Cnt = CD->getContextParamPosition(); 499 I = FO.S->captures().begin(); 500 for (const FieldDecl *FD : RD->fields()) { 501 // Do not map arguments if we emit function with non-original types. 502 Address LocalAddr(Address::invalid()); 503 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 504 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 505 TargetArgs[Cnt]); 506 } else { 507 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 508 } 509 // If we are capturing a pointer by copy we don't need to do anything, just 510 // use the value that we get from the arguments. 511 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 512 const VarDecl *CurVD = I->getCapturedVar(); 513 if (!FO.RegisterCastedArgsOnly) 514 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 515 ++Cnt; 516 ++I; 517 continue; 518 } 519 520 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 521 AlignmentSource::Decl); 522 if (FD->hasCapturedVLAType()) { 523 if (FO.UIntPtrCastRequired) { 524 ArgLVal = CGF.MakeAddrLValue( 525 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 526 Args[Cnt]->getName(), ArgLVal), 527 FD->getType(), AlignmentSource::Decl); 528 } 529 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 530 const VariableArrayType *VAT = FD->getCapturedVLAType(); 531 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 532 } else if (I->capturesVariable()) { 533 const VarDecl *Var = I->getCapturedVar(); 534 QualType VarTy = Var->getType(); 535 Address ArgAddr = ArgLVal.getAddress(CGF); 536 if (ArgLVal.getType()->isLValueReferenceType()) { 537 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 538 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 539 assert(ArgLVal.getType()->isPointerType()); 540 ArgAddr = CGF.EmitLoadOfPointer( 541 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 542 } 543 if (!FO.RegisterCastedArgsOnly) { 544 LocalAddrs.insert( 545 {Args[Cnt], 546 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 547 } 548 } else if (I->capturesVariableByCopy()) { 549 assert(!FD->getType()->isAnyPointerType() && 550 "Not expecting a captured pointer."); 551 const VarDecl *Var = I->getCapturedVar(); 552 LocalAddrs.insert({Args[Cnt], 553 {Var, FO.UIntPtrCastRequired 554 ? castValueFromUintptr( 555 CGF, I->getLocation(), FD->getType(), 556 Args[Cnt]->getName(), ArgLVal) 557 : ArgLVal.getAddress(CGF)}}); 558 } else { 559 // If 'this' is captured, load it into CXXThisValue. 560 assert(I->capturesThis()); 561 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 562 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 563 } 564 ++Cnt; 565 ++I; 566 } 567 568 return F; 569 } 570 571 llvm::Function * 572 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 573 SourceLocation Loc) { 574 assert( 575 CapturedStmtInfo && 576 "CapturedStmtInfo should be set when generating the captured function"); 577 const CapturedDecl *CD = S.getCapturedDecl(); 578 // Build the argument list. 579 bool NeedWrapperFunction = 580 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 581 FunctionArgList Args; 582 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 583 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 584 SmallString<256> Buffer; 585 llvm::raw_svector_ostream Out(Buffer); 586 Out << CapturedStmtInfo->getHelperName(); 587 if (NeedWrapperFunction) 588 Out << "_debug__"; 589 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 590 Out.str(), Loc); 591 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 592 VLASizes, CXXThisValue, FO); 593 CodeGenFunction::OMPPrivateScope LocalScope(*this); 594 for (const auto &LocalAddrPair : LocalAddrs) { 595 if (LocalAddrPair.second.first) { 596 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 597 return LocalAddrPair.second.second; 598 }); 599 } 600 } 601 (void)LocalScope.Privatize(); 602 for (const auto &VLASizePair : VLASizes) 603 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 604 PGO.assignRegionCounters(GlobalDecl(CD), F); 605 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 606 (void)LocalScope.ForceCleanup(); 607 FinishFunction(CD->getBodyRBrace()); 608 if (!NeedWrapperFunction) 609 return F; 610 611 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 612 /*RegisterCastedArgsOnly=*/true, 613 CapturedStmtInfo->getHelperName(), Loc); 614 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 615 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 616 Args.clear(); 617 LocalAddrs.clear(); 618 VLASizes.clear(); 619 llvm::Function *WrapperF = 620 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 621 WrapperCGF.CXXThisValue, WrapperFO); 622 llvm::SmallVector<llvm::Value *, 4> CallArgs; 623 for (const auto *Arg : Args) { 624 llvm::Value *CallArg; 625 auto I = LocalAddrs.find(Arg); 626 if (I != LocalAddrs.end()) { 627 LValue LV = WrapperCGF.MakeAddrLValue( 628 I->second.second, 629 I->second.first ? I->second.first->getType() : Arg->getType(), 630 AlignmentSource::Decl); 631 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 632 } else { 633 auto EI = VLASizes.find(Arg); 634 if (EI != VLASizes.end()) { 635 CallArg = EI->second.second; 636 } else { 637 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 638 Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } 642 } 643 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 644 } 645 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 646 WrapperCGF.FinishFunction(); 647 return WrapperF; 648 } 649 650 //===----------------------------------------------------------------------===// 651 // OpenMP Directive Emission 652 //===----------------------------------------------------------------------===// 653 void CodeGenFunction::EmitOMPAggregateAssign( 654 Address DestAddr, Address SrcAddr, QualType OriginalType, 655 const llvm::function_ref<void(Address, Address)> CopyGen) { 656 // Perform element-by-element initialization. 657 QualType ElementTy; 658 659 // Drill down to the base element type on both arrays. 660 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 661 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 662 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 663 664 llvm::Value *SrcBegin = SrcAddr.getPointer(); 665 llvm::Value *DestBegin = DestAddr.getPointer(); 666 // Cast from pointer to array type to pointer to single element. 667 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 668 // The basic structure here is a while-do loop. 669 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 670 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 671 llvm::Value *IsEmpty = 672 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 673 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 674 675 // Enter the loop body, making that address the current address. 676 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 677 EmitBlock(BodyBB); 678 679 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 680 681 llvm::PHINode *SrcElementPHI = 682 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 683 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 684 Address SrcElementCurrent = 685 Address(SrcElementPHI, 686 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 687 688 llvm::PHINode *DestElementPHI = 689 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 690 DestElementPHI->addIncoming(DestBegin, EntryBB); 691 Address DestElementCurrent = 692 Address(DestElementPHI, 693 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 694 695 // Emit copy. 696 CopyGen(DestElementCurrent, SrcElementCurrent); 697 698 // Shift the address forward by one element. 699 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 700 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 701 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 702 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 703 // Check whether we've reached the end. 704 llvm::Value *Done = 705 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 706 Builder.CreateCondBr(Done, DoneBB, BodyBB); 707 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 708 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 709 710 // Done. 711 EmitBlock(DoneBB, /*IsFinished=*/true); 712 } 713 714 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 715 Address SrcAddr, const VarDecl *DestVD, 716 const VarDecl *SrcVD, const Expr *Copy) { 717 if (OriginalType->isArrayType()) { 718 const auto *BO = dyn_cast<BinaryOperator>(Copy); 719 if (BO && BO->getOpcode() == BO_Assign) { 720 // Perform simple memcpy for simple copying. 721 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 722 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 723 EmitAggregateAssign(Dest, Src, OriginalType); 724 } else { 725 // For arrays with complex element types perform element by element 726 // copying. 727 EmitOMPAggregateAssign( 728 DestAddr, SrcAddr, OriginalType, 729 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 730 // Working with the single array element, so have to remap 731 // destination and source variables to corresponding array 732 // elements. 733 CodeGenFunction::OMPPrivateScope Remap(*this); 734 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 735 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 736 (void)Remap.Privatize(); 737 EmitIgnoredExpr(Copy); 738 }); 739 } 740 } else { 741 // Remap pseudo source variable to private copy. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 744 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 745 (void)Remap.Privatize(); 746 // Emit copying of the whole variable. 747 EmitIgnoredExpr(Copy); 748 } 749 } 750 751 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 752 OMPPrivateScope &PrivateScope) { 753 if (!HaveInsertPoint()) 754 return false; 755 bool DeviceConstTarget = 756 getLangOpts().OpenMPIsDevice && 757 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 758 bool FirstprivateIsLastprivate = false; 759 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 760 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 761 for (const auto *D : C->varlists()) 762 Lastprivates.try_emplace( 763 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 764 C->getKind()); 765 } 766 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 767 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 768 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 769 // Force emission of the firstprivate copy if the directive does not emit 770 // outlined function, like omp for, omp simd, omp distribute etc. 771 bool MustEmitFirstprivateCopy = 772 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 773 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 774 const auto *IRef = C->varlist_begin(); 775 const auto *InitsRef = C->inits().begin(); 776 for (const Expr *IInit : C->private_copies()) { 777 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 778 bool ThisFirstprivateIsLastprivate = 779 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 780 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 781 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 782 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 783 !FD->getType()->isReferenceType() && 784 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 785 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 786 ++IRef; 787 ++InitsRef; 788 continue; 789 } 790 // Do not emit copy for firstprivate constant variables in target regions, 791 // captured by reference. 792 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 793 FD && FD->getType()->isReferenceType() && 794 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 795 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 796 OrigVD); 797 ++IRef; 798 ++InitsRef; 799 continue; 800 } 801 FirstprivateIsLastprivate = 802 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 803 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 804 const auto *VDInit = 805 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 806 bool IsRegistered; 807 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 808 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 809 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 810 LValue OriginalLVal; 811 if (!FD) { 812 // Check if the firstprivate variable is just a constant value. 813 ConstantEmission CE = tryEmitAsConstant(&DRE); 814 if (CE && !CE.isReference()) { 815 // Constant value, no need to create a copy. 816 ++IRef; 817 ++InitsRef; 818 continue; 819 } 820 if (CE && CE.isReference()) { 821 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 822 } else { 823 assert(!CE && "Expected non-constant firstprivate."); 824 OriginalLVal = EmitLValue(&DRE); 825 } 826 } else { 827 OriginalLVal = EmitLValue(&DRE); 828 } 829 QualType Type = VD->getType(); 830 if (Type->isArrayType()) { 831 // Emit VarDecl with copy init for arrays. 832 // Get the address of the original variable captured in current 833 // captured region. 834 IsRegistered = PrivateScope.addPrivate( 835 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 836 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 837 const Expr *Init = VD->getInit(); 838 if (!isa<CXXConstructExpr>(Init) || 839 isTrivialInitializer(Init)) { 840 // Perform simple memcpy. 841 LValue Dest = 842 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 843 EmitAggregateAssign(Dest, OriginalLVal, Type); 844 } else { 845 EmitOMPAggregateAssign( 846 Emission.getAllocatedAddress(), 847 OriginalLVal.getAddress(*this), Type, 848 [this, VDInit, Init](Address DestElement, 849 Address SrcElement) { 850 // Clean up any temporaries needed by the 851 // initialization. 852 RunCleanupsScope InitScope(*this); 853 // Emit initialization for single element. 854 setAddrOfLocalVar(VDInit, SrcElement); 855 EmitAnyExprToMem(Init, DestElement, 856 Init->getType().getQualifiers(), 857 /*IsInitializer*/ false); 858 LocalDeclMap.erase(VDInit); 859 }); 860 } 861 EmitAutoVarCleanups(Emission); 862 return Emission.getAllocatedAddress(); 863 }); 864 } else { 865 Address OriginalAddr = OriginalLVal.getAddress(*this); 866 IsRegistered = 867 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 868 ThisFirstprivateIsLastprivate, 869 OrigVD, &Lastprivates, IRef]() { 870 // Emit private VarDecl with copy init. 871 // Remap temp VDInit variable to the address of the original 872 // variable (for proper handling of captured global variables). 873 setAddrOfLocalVar(VDInit, OriginalAddr); 874 EmitDecl(*VD); 875 LocalDeclMap.erase(VDInit); 876 if (ThisFirstprivateIsLastprivate && 877 Lastprivates[OrigVD->getCanonicalDecl()] == 878 OMPC_LASTPRIVATE_conditional) { 879 // Create/init special variable for lastprivate conditionals. 880 Address VDAddr = 881 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 882 *this, OrigVD); 883 llvm::Value *V = EmitLoadOfScalar( 884 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 885 AlignmentSource::Decl), 886 (*IRef)->getExprLoc()); 887 EmitStoreOfScalar(V, 888 MakeAddrLValue(VDAddr, (*IRef)->getType(), 889 AlignmentSource::Decl)); 890 LocalDeclMap.erase(VD); 891 setAddrOfLocalVar(VD, VDAddr); 892 return VDAddr; 893 } 894 return GetAddrOfLocalVar(VD); 895 }); 896 } 897 assert(IsRegistered && 898 "firstprivate var already registered as private"); 899 // Silence the warning about unused variable. 900 (void)IsRegistered; 901 } 902 ++IRef; 903 ++InitsRef; 904 } 905 } 906 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 907 } 908 909 void CodeGenFunction::EmitOMPPrivateClause( 910 const OMPExecutableDirective &D, 911 CodeGenFunction::OMPPrivateScope &PrivateScope) { 912 if (!HaveInsertPoint()) 913 return; 914 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 915 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 916 auto IRef = C->varlist_begin(); 917 for (const Expr *IInit : C->private_copies()) { 918 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 919 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 920 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 921 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 922 // Emit private VarDecl with copy init. 923 EmitDecl(*VD); 924 return GetAddrOfLocalVar(VD); 925 }); 926 assert(IsRegistered && "private var already registered as private"); 927 // Silence the warning about unused variable. 928 (void)IsRegistered; 929 } 930 ++IRef; 931 } 932 } 933 } 934 935 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 936 if (!HaveInsertPoint()) 937 return false; 938 // threadprivate_var1 = master_threadprivate_var1; 939 // operator=(threadprivate_var2, master_threadprivate_var2); 940 // ... 941 // __kmpc_barrier(&loc, global_tid); 942 llvm::DenseSet<const VarDecl *> CopiedVars; 943 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 944 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 945 auto IRef = C->varlist_begin(); 946 auto ISrcRef = C->source_exprs().begin(); 947 auto IDestRef = C->destination_exprs().begin(); 948 for (const Expr *AssignOp : C->assignment_ops()) { 949 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 950 QualType Type = VD->getType(); 951 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 952 // Get the address of the master variable. If we are emitting code with 953 // TLS support, the address is passed from the master as field in the 954 // captured declaration. 955 Address MasterAddr = Address::invalid(); 956 if (getLangOpts().OpenMPUseTLS && 957 getContext().getTargetInfo().isTLSSupported()) { 958 assert(CapturedStmtInfo->lookup(VD) && 959 "Copyin threadprivates should have been captured!"); 960 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 961 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 962 MasterAddr = EmitLValue(&DRE).getAddress(*this); 963 LocalDeclMap.erase(VD); 964 } else { 965 MasterAddr = 966 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 967 : CGM.GetAddrOfGlobal(VD), 968 getContext().getDeclAlign(VD)); 969 } 970 // Get the address of the threadprivate variable. 971 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 972 if (CopiedVars.size() == 1) { 973 // At first check if current thread is a master thread. If it is, no 974 // need to copy data. 975 CopyBegin = createBasicBlock("copyin.not.master"); 976 CopyEnd = createBasicBlock("copyin.not.master.end"); 977 Builder.CreateCondBr( 978 Builder.CreateICmpNE( 979 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 980 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 981 CGM.IntPtrTy)), 982 CopyBegin, CopyEnd); 983 EmitBlock(CopyBegin); 984 } 985 const auto *SrcVD = 986 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 987 const auto *DestVD = 988 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 989 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 990 } 991 ++IRef; 992 ++ISrcRef; 993 ++IDestRef; 994 } 995 } 996 if (CopyEnd) { 997 // Exit out of copying procedure for non-master thread. 998 EmitBlock(CopyEnd, /*IsFinished=*/true); 999 return true; 1000 } 1001 return false; 1002 } 1003 1004 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1005 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1006 if (!HaveInsertPoint()) 1007 return false; 1008 bool HasAtLeastOneLastprivate = false; 1009 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1010 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1011 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1012 for (const Expr *C : LoopDirective->counters()) { 1013 SIMDLCVs.insert( 1014 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1015 } 1016 } 1017 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1018 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1019 HasAtLeastOneLastprivate = true; 1020 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1021 !getLangOpts().OpenMPSimd) 1022 break; 1023 const auto *IRef = C->varlist_begin(); 1024 const auto *IDestRef = C->destination_exprs().begin(); 1025 for (const Expr *IInit : C->private_copies()) { 1026 // Keep the address of the original variable for future update at the end 1027 // of the loop. 1028 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1029 // Taskloops do not require additional initialization, it is done in 1030 // runtime support library. 1031 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1032 const auto *DestVD = 1033 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1034 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1035 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1036 /*RefersToEnclosingVariableOrCapture=*/ 1037 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1038 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1039 return EmitLValue(&DRE).getAddress(*this); 1040 }); 1041 // Check if the variable is also a firstprivate: in this case IInit is 1042 // not generated. Initialization of this variable will happen in codegen 1043 // for 'firstprivate' clause. 1044 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1045 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1046 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1047 OrigVD]() { 1048 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1049 Address VDAddr = 1050 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1051 OrigVD); 1052 setAddrOfLocalVar(VD, VDAddr); 1053 return VDAddr; 1054 } 1055 // Emit private VarDecl with copy init. 1056 EmitDecl(*VD); 1057 return GetAddrOfLocalVar(VD); 1058 }); 1059 assert(IsRegistered && 1060 "lastprivate var already registered as private"); 1061 (void)IsRegistered; 1062 } 1063 } 1064 ++IRef; 1065 ++IDestRef; 1066 } 1067 } 1068 return HasAtLeastOneLastprivate; 1069 } 1070 1071 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1072 const OMPExecutableDirective &D, bool NoFinals, 1073 llvm::Value *IsLastIterCond) { 1074 if (!HaveInsertPoint()) 1075 return; 1076 // Emit following code: 1077 // if (<IsLastIterCond>) { 1078 // orig_var1 = private_orig_var1; 1079 // ... 1080 // orig_varn = private_orig_varn; 1081 // } 1082 llvm::BasicBlock *ThenBB = nullptr; 1083 llvm::BasicBlock *DoneBB = nullptr; 1084 if (IsLastIterCond) { 1085 // Emit implicit barrier if at least one lastprivate conditional is found 1086 // and this is not a simd mode. 1087 if (!getLangOpts().OpenMPSimd && 1088 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1089 [](const OMPLastprivateClause *C) { 1090 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1091 })) { 1092 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1093 OMPD_unknown, 1094 /*EmitChecks=*/false, 1095 /*ForceSimpleCall=*/true); 1096 } 1097 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1098 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1099 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1100 EmitBlock(ThenBB); 1101 } 1102 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1103 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1104 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1105 auto IC = LoopDirective->counters().begin(); 1106 for (const Expr *F : LoopDirective->finals()) { 1107 const auto *D = 1108 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1109 if (NoFinals) 1110 AlreadyEmittedVars.insert(D); 1111 else 1112 LoopCountersAndUpdates[D] = F; 1113 ++IC; 1114 } 1115 } 1116 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1117 auto IRef = C->varlist_begin(); 1118 auto ISrcRef = C->source_exprs().begin(); 1119 auto IDestRef = C->destination_exprs().begin(); 1120 for (const Expr *AssignOp : C->assignment_ops()) { 1121 const auto *PrivateVD = 1122 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1123 QualType Type = PrivateVD->getType(); 1124 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1125 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1126 // If lastprivate variable is a loop control variable for loop-based 1127 // directive, update its value before copyin back to original 1128 // variable. 1129 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1130 EmitIgnoredExpr(FinalExpr); 1131 const auto *SrcVD = 1132 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1133 const auto *DestVD = 1134 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1135 // Get the address of the private variable. 1136 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1137 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1138 PrivateAddr = 1139 Address(Builder.CreateLoad(PrivateAddr), 1140 getNaturalTypeAlignment(RefTy->getPointeeType())); 1141 // Store the last value to the private copy in the last iteration. 1142 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1143 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1144 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1145 (*IRef)->getExprLoc()); 1146 // Get the address of the original variable. 1147 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1148 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1149 } 1150 ++IRef; 1151 ++ISrcRef; 1152 ++IDestRef; 1153 } 1154 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1155 EmitIgnoredExpr(PostUpdate); 1156 } 1157 if (IsLastIterCond) 1158 EmitBlock(DoneBB, /*IsFinished=*/true); 1159 } 1160 1161 void CodeGenFunction::EmitOMPReductionClauseInit( 1162 const OMPExecutableDirective &D, 1163 CodeGenFunction::OMPPrivateScope &PrivateScope) { 1164 if (!HaveInsertPoint()) 1165 return; 1166 SmallVector<const Expr *, 4> Shareds; 1167 SmallVector<const Expr *, 4> Privates; 1168 SmallVector<const Expr *, 4> ReductionOps; 1169 SmallVector<const Expr *, 4> LHSs; 1170 SmallVector<const Expr *, 4> RHSs; 1171 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1172 auto IPriv = C->privates().begin(); 1173 auto IRed = C->reduction_ops().begin(); 1174 auto ILHS = C->lhs_exprs().begin(); 1175 auto IRHS = C->rhs_exprs().begin(); 1176 for (const Expr *Ref : C->varlists()) { 1177 Shareds.emplace_back(Ref); 1178 Privates.emplace_back(*IPriv); 1179 ReductionOps.emplace_back(*IRed); 1180 LHSs.emplace_back(*ILHS); 1181 RHSs.emplace_back(*IRHS); 1182 std::advance(IPriv, 1); 1183 std::advance(IRed, 1); 1184 std::advance(ILHS, 1); 1185 std::advance(IRHS, 1); 1186 } 1187 } 1188 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps); 1189 unsigned Count = 0; 1190 auto ILHS = LHSs.begin(); 1191 auto IRHS = RHSs.begin(); 1192 auto IPriv = Privates.begin(); 1193 for (const Expr *IRef : Shareds) { 1194 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1195 // Emit private VarDecl with reduction init. 1196 RedCG.emitSharedLValue(*this, Count); 1197 RedCG.emitAggregateType(*this, Count); 1198 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1199 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1200 RedCG.getSharedLValue(Count), 1201 [&Emission](CodeGenFunction &CGF) { 1202 CGF.EmitAutoVarInit(Emission); 1203 return true; 1204 }); 1205 EmitAutoVarCleanups(Emission); 1206 Address BaseAddr = RedCG.adjustPrivateAddress( 1207 *this, Count, Emission.getAllocatedAddress()); 1208 bool IsRegistered = PrivateScope.addPrivate( 1209 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1210 assert(IsRegistered && "private var already registered as private"); 1211 // Silence the warning about unused variable. 1212 (void)IsRegistered; 1213 1214 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1215 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1216 QualType Type = PrivateVD->getType(); 1217 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1218 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1219 // Store the address of the original variable associated with the LHS 1220 // implicit variable. 1221 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1222 return RedCG.getSharedLValue(Count).getAddress(*this); 1223 }); 1224 PrivateScope.addPrivate( 1225 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1226 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1227 isa<ArraySubscriptExpr>(IRef)) { 1228 // Store the address of the original variable associated with the LHS 1229 // implicit variable. 1230 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1231 return RedCG.getSharedLValue(Count).getAddress(*this); 1232 }); 1233 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1234 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1235 ConvertTypeForMem(RHSVD->getType()), 1236 "rhs.begin"); 1237 }); 1238 } else { 1239 QualType Type = PrivateVD->getType(); 1240 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1241 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1242 // Store the address of the original variable associated with the LHS 1243 // implicit variable. 1244 if (IsArray) { 1245 OriginalAddr = Builder.CreateElementBitCast( 1246 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1247 } 1248 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1249 PrivateScope.addPrivate( 1250 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1251 return IsArray 1252 ? Builder.CreateElementBitCast( 1253 GetAddrOfLocalVar(PrivateVD), 1254 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1255 : GetAddrOfLocalVar(PrivateVD); 1256 }); 1257 } 1258 ++ILHS; 1259 ++IRHS; 1260 ++IPriv; 1261 ++Count; 1262 } 1263 } 1264 1265 void CodeGenFunction::EmitOMPReductionClauseFinal( 1266 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1267 if (!HaveInsertPoint()) 1268 return; 1269 llvm::SmallVector<const Expr *, 8> Privates; 1270 llvm::SmallVector<const Expr *, 8> LHSExprs; 1271 llvm::SmallVector<const Expr *, 8> RHSExprs; 1272 llvm::SmallVector<const Expr *, 8> ReductionOps; 1273 bool HasAtLeastOneReduction = false; 1274 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1275 HasAtLeastOneReduction = true; 1276 Privates.append(C->privates().begin(), C->privates().end()); 1277 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1278 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1279 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1280 } 1281 if (HasAtLeastOneReduction) { 1282 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1283 isOpenMPParallelDirective(D.getDirectiveKind()) || 1284 ReductionKind == OMPD_simd; 1285 bool SimpleReduction = ReductionKind == OMPD_simd; 1286 // Emit nowait reduction if nowait clause is present or directive is a 1287 // parallel directive (it always has implicit barrier). 1288 CGM.getOpenMPRuntime().emitReduction( 1289 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1290 {WithNowait, SimpleReduction, ReductionKind}); 1291 } 1292 } 1293 1294 static void emitPostUpdateForReductionClause( 1295 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1296 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1297 if (!CGF.HaveInsertPoint()) 1298 return; 1299 llvm::BasicBlock *DoneBB = nullptr; 1300 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1301 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1302 if (!DoneBB) { 1303 if (llvm::Value *Cond = CondGen(CGF)) { 1304 // If the first post-update expression is found, emit conditional 1305 // block if it was requested. 1306 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1307 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1308 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1309 CGF.EmitBlock(ThenBB); 1310 } 1311 } 1312 CGF.EmitIgnoredExpr(PostUpdate); 1313 } 1314 } 1315 if (DoneBB) 1316 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1317 } 1318 1319 namespace { 1320 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1321 /// parallel function. This is necessary for combined constructs such as 1322 /// 'distribute parallel for' 1323 typedef llvm::function_ref<void(CodeGenFunction &, 1324 const OMPExecutableDirective &, 1325 llvm::SmallVectorImpl<llvm::Value *> &)> 1326 CodeGenBoundParametersTy; 1327 } // anonymous namespace 1328 1329 static void 1330 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1331 const OMPExecutableDirective &S) { 1332 if (CGF.getLangOpts().OpenMP < 50) 1333 return; 1334 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1335 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1336 for (const Expr *Ref : C->varlists()) { 1337 if (!Ref->getType()->isScalarType()) 1338 continue; 1339 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1340 if (!DRE) 1341 continue; 1342 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1343 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1344 } 1345 } 1346 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1347 for (const Expr *Ref : C->varlists()) { 1348 if (!Ref->getType()->isScalarType()) 1349 continue; 1350 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1351 if (!DRE) 1352 continue; 1353 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1354 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1355 } 1356 } 1357 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1358 for (const Expr *Ref : C->varlists()) { 1359 if (!Ref->getType()->isScalarType()) 1360 continue; 1361 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1362 if (!DRE) 1363 continue; 1364 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1365 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1366 } 1367 } 1368 // Privates should ne analyzed since they are not captured at all. 1369 // Task reductions may be skipped - tasks are ignored. 1370 // Firstprivates do not return value but may be passed by reference - no need 1371 // to check for updated lastprivate conditional. 1372 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1373 for (const Expr *Ref : C->varlists()) { 1374 if (!Ref->getType()->isScalarType()) 1375 continue; 1376 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1377 if (!DRE) 1378 continue; 1379 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1380 } 1381 } 1382 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1383 CGF, S, PrivateDecls); 1384 } 1385 1386 static void emitCommonOMPParallelDirective( 1387 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1388 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1389 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1390 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1391 llvm::Function *OutlinedFn = 1392 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1393 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1394 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1395 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1396 llvm::Value *NumThreads = 1397 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1398 /*IgnoreResultAssign=*/true); 1399 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1400 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1401 } 1402 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1403 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1404 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1405 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1406 } 1407 const Expr *IfCond = nullptr; 1408 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1409 if (C->getNameModifier() == OMPD_unknown || 1410 C->getNameModifier() == OMPD_parallel) { 1411 IfCond = C->getCondition(); 1412 break; 1413 } 1414 } 1415 1416 OMPParallelScope Scope(CGF, S); 1417 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1418 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1419 // lower and upper bounds with the pragma 'for' chunking mechanism. 1420 // The following lambda takes care of appending the lower and upper bound 1421 // parameters when necessary 1422 CodeGenBoundParameters(CGF, S, CapturedVars); 1423 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1424 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1425 CapturedVars, IfCond); 1426 } 1427 1428 static void emitEmptyBoundParameters(CodeGenFunction &, 1429 const OMPExecutableDirective &, 1430 llvm::SmallVectorImpl<llvm::Value *> &) {} 1431 1432 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1433 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1434 // Check if we have any if clause associated with the directive. 1435 llvm::Value *IfCond = nullptr; 1436 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1437 IfCond = EmitScalarExpr(C->getCondition(), 1438 /*IgnoreResultAssign=*/true); 1439 1440 llvm::Value *NumThreads = nullptr; 1441 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1442 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1443 /*IgnoreResultAssign=*/true); 1444 1445 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1446 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1447 ProcBind = ProcBindClause->getProcBindKind(); 1448 1449 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1450 1451 // The cleanup callback that finalizes all variabels at the given location, 1452 // thus calls destructors etc. 1453 auto FiniCB = [this](InsertPointTy IP) { 1454 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1455 }; 1456 1457 // Privatization callback that performs appropriate action for 1458 // shared/private/firstprivate/lastprivate/copyin/... variables. 1459 // 1460 // TODO: This defaults to shared right now. 1461 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1462 llvm::Value &Val, llvm::Value *&ReplVal) { 1463 // The next line is appropriate only for variables (Val) with the 1464 // data-sharing attribute "shared". 1465 ReplVal = &Val; 1466 1467 return CodeGenIP; 1468 }; 1469 1470 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1471 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1472 1473 auto BodyGenCB = [ParallelRegionBodyStmt, 1474 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1475 llvm::BasicBlock &ContinuationBB) { 1476 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1477 ContinuationBB); 1478 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1479 CodeGenIP, ContinuationBB); 1480 }; 1481 1482 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1483 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1484 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1485 FiniCB, IfCond, NumThreads, 1486 ProcBind, S.hasCancel())); 1487 return; 1488 } 1489 1490 // Emit parallel region as a standalone region. 1491 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1492 Action.Enter(CGF); 1493 OMPPrivateScope PrivateScope(CGF); 1494 bool Copyins = CGF.EmitOMPCopyinClause(S); 1495 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1496 if (Copyins) { 1497 // Emit implicit barrier to synchronize threads and avoid data races on 1498 // propagation master's thread values of threadprivate variables to local 1499 // instances of that variables of all other implicit threads. 1500 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1501 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1502 /*ForceSimpleCall=*/true); 1503 } 1504 CGF.EmitOMPPrivateClause(S, PrivateScope); 1505 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1506 (void)PrivateScope.Privatize(); 1507 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1508 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1509 }; 1510 { 1511 auto LPCRegion = 1512 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1513 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1514 emitEmptyBoundParameters); 1515 emitPostUpdateForReductionClause(*this, S, 1516 [](CodeGenFunction &) { return nullptr; }); 1517 } 1518 // Check for outer lastprivate conditional update. 1519 checkForLastprivateConditionalUpdate(*this, S); 1520 } 1521 1522 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1523 int MaxLevel, int Level = 0) { 1524 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1525 const Stmt *SimplifiedS = S->IgnoreContainers(); 1526 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1527 PrettyStackTraceLoc CrashInfo( 1528 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1529 "LLVM IR generation of compound statement ('{}')"); 1530 1531 // Keep track of the current cleanup stack depth, including debug scopes. 1532 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1533 for (const Stmt *CurStmt : CS->body()) 1534 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1535 return; 1536 } 1537 if (SimplifiedS == NextLoop) { 1538 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1539 S = For->getBody(); 1540 } else { 1541 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1542 "Expected canonical for loop or range-based for loop."); 1543 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1544 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1545 S = CXXFor->getBody(); 1546 } 1547 if (Level + 1 < MaxLevel) { 1548 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1549 S, /*TryImperfectlyNestedLoops=*/true); 1550 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1551 return; 1552 } 1553 } 1554 CGF.EmitStmt(S); 1555 } 1556 1557 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1558 JumpDest LoopExit) { 1559 RunCleanupsScope BodyScope(*this); 1560 // Update counters values on current iteration. 1561 for (const Expr *UE : D.updates()) 1562 EmitIgnoredExpr(UE); 1563 // Update the linear variables. 1564 // In distribute directives only loop counters may be marked as linear, no 1565 // need to generate the code for them. 1566 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1567 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1568 for (const Expr *UE : C->updates()) 1569 EmitIgnoredExpr(UE); 1570 } 1571 } 1572 1573 // On a continue in the body, jump to the end. 1574 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1575 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1576 for (const Expr *E : D.finals_conditions()) { 1577 if (!E) 1578 continue; 1579 // Check that loop counter in non-rectangular nest fits into the iteration 1580 // space. 1581 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1582 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1583 getProfileCount(D.getBody())); 1584 EmitBlock(NextBB); 1585 } 1586 // Emit loop variables for C++ range loops. 1587 const Stmt *Body = 1588 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1589 // Emit loop body. 1590 emitBody(*this, Body, 1591 OMPLoopDirective::tryToFindNextInnerLoop( 1592 Body, /*TryImperfectlyNestedLoops=*/true), 1593 D.getCollapsedNumber()); 1594 1595 // The end (updates/cleanups). 1596 EmitBlock(Continue.getBlock()); 1597 BreakContinueStack.pop_back(); 1598 } 1599 1600 void CodeGenFunction::EmitOMPInnerLoop( 1601 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 1602 const Expr *IncExpr, 1603 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1604 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1605 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1606 1607 // Start the loop with a block that tests the condition. 1608 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1609 EmitBlock(CondBlock); 1610 const SourceRange R = S.getSourceRange(); 1611 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1612 SourceLocToDebugLoc(R.getEnd())); 1613 1614 // If there are any cleanups between here and the loop-exit scope, 1615 // create a block to stage a loop exit along. 1616 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1617 if (RequiresCleanup) 1618 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1619 1620 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1621 1622 // Emit condition. 1623 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1624 if (ExitBlock != LoopExit.getBlock()) { 1625 EmitBlock(ExitBlock); 1626 EmitBranchThroughCleanup(LoopExit); 1627 } 1628 1629 EmitBlock(LoopBody); 1630 incrementProfileCounter(&S); 1631 1632 // Create a block for the increment. 1633 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1634 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1635 1636 BodyGen(*this); 1637 1638 // Emit "IV = IV + 1" and a back-edge to the condition block. 1639 EmitBlock(Continue.getBlock()); 1640 EmitIgnoredExpr(IncExpr); 1641 PostIncGen(*this); 1642 BreakContinueStack.pop_back(); 1643 EmitBranch(CondBlock); 1644 LoopStack.pop(); 1645 // Emit the fall-through block. 1646 EmitBlock(LoopExit.getBlock()); 1647 } 1648 1649 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1650 if (!HaveInsertPoint()) 1651 return false; 1652 // Emit inits for the linear variables. 1653 bool HasLinears = false; 1654 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1655 for (const Expr *Init : C->inits()) { 1656 HasLinears = true; 1657 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1658 if (const auto *Ref = 1659 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1660 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1661 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1662 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1663 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1664 VD->getInit()->getType(), VK_LValue, 1665 VD->getInit()->getExprLoc()); 1666 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1667 VD->getType()), 1668 /*capturedByInit=*/false); 1669 EmitAutoVarCleanups(Emission); 1670 } else { 1671 EmitVarDecl(*VD); 1672 } 1673 } 1674 // Emit the linear steps for the linear clauses. 1675 // If a step is not constant, it is pre-calculated before the loop. 1676 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1677 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1678 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1679 // Emit calculation of the linear step. 1680 EmitIgnoredExpr(CS); 1681 } 1682 } 1683 return HasLinears; 1684 } 1685 1686 void CodeGenFunction::EmitOMPLinearClauseFinal( 1687 const OMPLoopDirective &D, 1688 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1689 if (!HaveInsertPoint()) 1690 return; 1691 llvm::BasicBlock *DoneBB = nullptr; 1692 // Emit the final values of the linear variables. 1693 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1694 auto IC = C->varlist_begin(); 1695 for (const Expr *F : C->finals()) { 1696 if (!DoneBB) { 1697 if (llvm::Value *Cond = CondGen(*this)) { 1698 // If the first post-update expression is found, emit conditional 1699 // block if it was requested. 1700 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1701 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1702 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1703 EmitBlock(ThenBB); 1704 } 1705 } 1706 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1707 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1708 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1709 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1710 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1711 CodeGenFunction::OMPPrivateScope VarScope(*this); 1712 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1713 (void)VarScope.Privatize(); 1714 EmitIgnoredExpr(F); 1715 ++IC; 1716 } 1717 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1718 EmitIgnoredExpr(PostUpdate); 1719 } 1720 if (DoneBB) 1721 EmitBlock(DoneBB, /*IsFinished=*/true); 1722 } 1723 1724 static void emitAlignedClause(CodeGenFunction &CGF, 1725 const OMPExecutableDirective &D) { 1726 if (!CGF.HaveInsertPoint()) 1727 return; 1728 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1729 llvm::APInt ClauseAlignment(64, 0); 1730 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1731 auto *AlignmentCI = 1732 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1733 ClauseAlignment = AlignmentCI->getValue(); 1734 } 1735 for (const Expr *E : Clause->varlists()) { 1736 llvm::APInt Alignment(ClauseAlignment); 1737 if (Alignment == 0) { 1738 // OpenMP [2.8.1, Description] 1739 // If no optional parameter is specified, implementation-defined default 1740 // alignments for SIMD instructions on the target platforms are assumed. 1741 Alignment = 1742 CGF.getContext() 1743 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1744 E->getType()->getPointeeType())) 1745 .getQuantity(); 1746 } 1747 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1748 "alignment is not power of 2"); 1749 if (Alignment != 0) { 1750 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1751 CGF.emitAlignmentAssumption( 1752 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1753 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1754 } 1755 } 1756 } 1757 } 1758 1759 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1760 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1761 if (!HaveInsertPoint()) 1762 return; 1763 auto I = S.private_counters().begin(); 1764 for (const Expr *E : S.counters()) { 1765 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1766 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1767 // Emit var without initialization. 1768 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1769 EmitAutoVarCleanups(VarEmission); 1770 LocalDeclMap.erase(PrivateVD); 1771 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1772 return VarEmission.getAllocatedAddress(); 1773 }); 1774 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1775 VD->hasGlobalStorage()) { 1776 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1777 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1778 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1779 E->getType(), VK_LValue, E->getExprLoc()); 1780 return EmitLValue(&DRE).getAddress(*this); 1781 }); 1782 } else { 1783 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1784 return VarEmission.getAllocatedAddress(); 1785 }); 1786 } 1787 ++I; 1788 } 1789 // Privatize extra loop counters used in loops for ordered(n) clauses. 1790 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1791 if (!C->getNumForLoops()) 1792 continue; 1793 for (unsigned I = S.getCollapsedNumber(), 1794 E = C->getLoopNumIterations().size(); 1795 I < E; ++I) { 1796 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1797 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1798 // Override only those variables that can be captured to avoid re-emission 1799 // of the variables declared within the loops. 1800 if (DRE->refersToEnclosingVariableOrCapture()) { 1801 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1802 return CreateMemTemp(DRE->getType(), VD->getName()); 1803 }); 1804 } 1805 } 1806 } 1807 } 1808 1809 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1810 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1811 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1812 if (!CGF.HaveInsertPoint()) 1813 return; 1814 { 1815 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1816 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1817 (void)PreCondScope.Privatize(); 1818 // Get initial values of real counters. 1819 for (const Expr *I : S.inits()) { 1820 CGF.EmitIgnoredExpr(I); 1821 } 1822 } 1823 // Create temp loop control variables with their init values to support 1824 // non-rectangular loops. 1825 CodeGenFunction::OMPMapVars PreCondVars; 1826 for (const Expr * E: S.dependent_counters()) { 1827 if (!E) 1828 continue; 1829 assert(!E->getType().getNonReferenceType()->isRecordType() && 1830 "dependent counter must not be an iterator."); 1831 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1832 Address CounterAddr = 1833 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 1834 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 1835 } 1836 (void)PreCondVars.apply(CGF); 1837 for (const Expr *E : S.dependent_inits()) { 1838 if (!E) 1839 continue; 1840 CGF.EmitIgnoredExpr(E); 1841 } 1842 // Check that loop is executed at least one time. 1843 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 1844 PreCondVars.restore(CGF); 1845 } 1846 1847 void CodeGenFunction::EmitOMPLinearClause( 1848 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 1849 if (!HaveInsertPoint()) 1850 return; 1851 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1852 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1853 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1854 for (const Expr *C : LoopDirective->counters()) { 1855 SIMDLCVs.insert( 1856 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1857 } 1858 } 1859 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1860 auto CurPrivate = C->privates().begin(); 1861 for (const Expr *E : C->varlists()) { 1862 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1863 const auto *PrivateVD = 1864 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 1865 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 1866 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 1867 // Emit private VarDecl with copy init. 1868 EmitVarDecl(*PrivateVD); 1869 return GetAddrOfLocalVar(PrivateVD); 1870 }); 1871 assert(IsRegistered && "linear var already registered as private"); 1872 // Silence the warning about unused variable. 1873 (void)IsRegistered; 1874 } else { 1875 EmitVarDecl(*PrivateVD); 1876 } 1877 ++CurPrivate; 1878 } 1879 } 1880 } 1881 1882 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 1883 const OMPExecutableDirective &D, 1884 bool IsMonotonic) { 1885 if (!CGF.HaveInsertPoint()) 1886 return; 1887 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 1888 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 1889 /*ignoreResult=*/true); 1890 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1891 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1892 // In presence of finite 'safelen', it may be unsafe to mark all 1893 // the memory instructions parallel, because loop-carried 1894 // dependences of 'safelen' iterations are possible. 1895 if (!IsMonotonic) 1896 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 1897 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 1898 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 1899 /*ignoreResult=*/true); 1900 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1901 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1902 // In presence of finite 'safelen', it may be unsafe to mark all 1903 // the memory instructions parallel, because loop-carried 1904 // dependences of 'safelen' iterations are possible. 1905 CGF.LoopStack.setParallel(/*Enable=*/false); 1906 } 1907 } 1908 1909 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 1910 bool IsMonotonic) { 1911 // Walk clauses and process safelen/lastprivate. 1912 LoopStack.setParallel(!IsMonotonic); 1913 LoopStack.setVectorizeEnable(); 1914 emitSimdlenSafelenClause(*this, D, IsMonotonic); 1915 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 1916 if (C->getKind() == OMPC_ORDER_concurrent) 1917 LoopStack.setParallel(/*Enable=*/true); 1918 } 1919 1920 void CodeGenFunction::EmitOMPSimdFinal( 1921 const OMPLoopDirective &D, 1922 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1923 if (!HaveInsertPoint()) 1924 return; 1925 llvm::BasicBlock *DoneBB = nullptr; 1926 auto IC = D.counters().begin(); 1927 auto IPC = D.private_counters().begin(); 1928 for (const Expr *F : D.finals()) { 1929 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 1930 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 1931 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 1932 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 1933 OrigVD->hasGlobalStorage() || CED) { 1934 if (!DoneBB) { 1935 if (llvm::Value *Cond = CondGen(*this)) { 1936 // If the first post-update expression is found, emit conditional 1937 // block if it was requested. 1938 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 1939 DoneBB = createBasicBlock(".omp.final.done"); 1940 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1941 EmitBlock(ThenBB); 1942 } 1943 } 1944 Address OrigAddr = Address::invalid(); 1945 if (CED) { 1946 OrigAddr = 1947 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 1948 } else { 1949 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 1950 /*RefersToEnclosingVariableOrCapture=*/false, 1951 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 1952 OrigAddr = EmitLValue(&DRE).getAddress(*this); 1953 } 1954 OMPPrivateScope VarScope(*this); 1955 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1956 (void)VarScope.Privatize(); 1957 EmitIgnoredExpr(F); 1958 } 1959 ++IC; 1960 ++IPC; 1961 } 1962 if (DoneBB) 1963 EmitBlock(DoneBB, /*IsFinished=*/true); 1964 } 1965 1966 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 1967 const OMPLoopDirective &S, 1968 CodeGenFunction::JumpDest LoopExit) { 1969 CGF.EmitOMPLoopBody(S, LoopExit); 1970 CGF.EmitStopPoint(&S); 1971 } 1972 1973 /// Emit a helper variable and return corresponding lvalue. 1974 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1975 const DeclRefExpr *Helper) { 1976 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1977 CGF.EmitVarDecl(*VDecl); 1978 return CGF.EmitLValue(Helper); 1979 } 1980 1981 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 1982 const RegionCodeGenTy &SimdInitGen, 1983 const RegionCodeGenTy &BodyCodeGen) { 1984 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 1985 PrePostActionTy &) { 1986 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 1987 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 1988 SimdInitGen(CGF); 1989 1990 BodyCodeGen(CGF); 1991 }; 1992 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 1993 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 1994 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 1995 1996 BodyCodeGen(CGF); 1997 }; 1998 const Expr *IfCond = nullptr; 1999 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2000 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2001 if (CGF.getLangOpts().OpenMP >= 50 && 2002 (C->getNameModifier() == OMPD_unknown || 2003 C->getNameModifier() == OMPD_simd)) { 2004 IfCond = C->getCondition(); 2005 break; 2006 } 2007 } 2008 } 2009 if (IfCond) { 2010 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2011 } else { 2012 RegionCodeGenTy ThenRCG(ThenGen); 2013 ThenRCG(CGF); 2014 } 2015 } 2016 2017 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2018 PrePostActionTy &Action) { 2019 Action.Enter(CGF); 2020 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2021 "Expected simd directive"); 2022 OMPLoopScope PreInitScope(CGF, S); 2023 // if (PreCond) { 2024 // for (IV in 0..LastIteration) BODY; 2025 // <Final counter/linear vars updates>; 2026 // } 2027 // 2028 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2029 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2030 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2031 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2032 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2033 } 2034 2035 // Emit: if (PreCond) - begin. 2036 // If the condition constant folds and can be elided, avoid emitting the 2037 // whole loop. 2038 bool CondConstant; 2039 llvm::BasicBlock *ContBlock = nullptr; 2040 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2041 if (!CondConstant) 2042 return; 2043 } else { 2044 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2045 ContBlock = CGF.createBasicBlock("simd.if.end"); 2046 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2047 CGF.getProfileCount(&S)); 2048 CGF.EmitBlock(ThenBlock); 2049 CGF.incrementProfileCounter(&S); 2050 } 2051 2052 // Emit the loop iteration variable. 2053 const Expr *IVExpr = S.getIterationVariable(); 2054 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2055 CGF.EmitVarDecl(*IVDecl); 2056 CGF.EmitIgnoredExpr(S.getInit()); 2057 2058 // Emit the iterations count variable. 2059 // If it is not a variable, Sema decided to calculate iterations count on 2060 // each iteration (e.g., it is foldable into a constant). 2061 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2062 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2063 // Emit calculation of the iterations count. 2064 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2065 } 2066 2067 emitAlignedClause(CGF, S); 2068 (void)CGF.EmitOMPLinearClauseInit(S); 2069 { 2070 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2071 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2072 CGF.EmitOMPLinearClause(S, LoopScope); 2073 CGF.EmitOMPPrivateClause(S, LoopScope); 2074 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2075 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2076 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2077 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2078 (void)LoopScope.Privatize(); 2079 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2080 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2081 2082 emitCommonSimdLoop( 2083 CGF, S, 2084 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2085 CGF.EmitOMPSimdInit(S); 2086 }, 2087 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2088 CGF.EmitOMPInnerLoop( 2089 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2090 [&S](CodeGenFunction &CGF) { 2091 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 2092 CGF.EmitStopPoint(&S); 2093 }, 2094 [](CodeGenFunction &) {}); 2095 }); 2096 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2097 // Emit final copy of the lastprivate variables at the end of loops. 2098 if (HasLastprivateClause) 2099 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2100 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2101 emitPostUpdateForReductionClause(CGF, S, 2102 [](CodeGenFunction &) { return nullptr; }); 2103 } 2104 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2105 // Emit: if (PreCond) - end. 2106 if (ContBlock) { 2107 CGF.EmitBranch(ContBlock); 2108 CGF.EmitBlock(ContBlock, true); 2109 } 2110 } 2111 2112 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2113 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2114 emitOMPSimdRegion(CGF, S, Action); 2115 }; 2116 { 2117 auto LPCRegion = 2118 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2119 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2120 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2121 } 2122 // Check for outer lastprivate conditional update. 2123 checkForLastprivateConditionalUpdate(*this, S); 2124 } 2125 2126 void CodeGenFunction::EmitOMPOuterLoop( 2127 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2128 CodeGenFunction::OMPPrivateScope &LoopScope, 2129 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2130 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2131 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2132 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2133 2134 const Expr *IVExpr = S.getIterationVariable(); 2135 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2136 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2137 2138 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2139 2140 // Start the loop with a block that tests the condition. 2141 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2142 EmitBlock(CondBlock); 2143 const SourceRange R = S.getSourceRange(); 2144 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2145 SourceLocToDebugLoc(R.getEnd())); 2146 2147 llvm::Value *BoolCondVal = nullptr; 2148 if (!DynamicOrOrdered) { 2149 // UB = min(UB, GlobalUB) or 2150 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2151 // 'distribute parallel for') 2152 EmitIgnoredExpr(LoopArgs.EUB); 2153 // IV = LB 2154 EmitIgnoredExpr(LoopArgs.Init); 2155 // IV < UB 2156 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2157 } else { 2158 BoolCondVal = 2159 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2160 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2161 } 2162 2163 // If there are any cleanups between here and the loop-exit scope, 2164 // create a block to stage a loop exit along. 2165 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2166 if (LoopScope.requiresCleanups()) 2167 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2168 2169 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2170 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2171 if (ExitBlock != LoopExit.getBlock()) { 2172 EmitBlock(ExitBlock); 2173 EmitBranchThroughCleanup(LoopExit); 2174 } 2175 EmitBlock(LoopBody); 2176 2177 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2178 // LB for loop condition and emitted it above). 2179 if (DynamicOrOrdered) 2180 EmitIgnoredExpr(LoopArgs.Init); 2181 2182 // Create a block for the increment. 2183 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2184 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2185 2186 emitCommonSimdLoop( 2187 *this, S, 2188 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2189 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2190 // with dynamic/guided scheduling and without ordered clause. 2191 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2192 CGF.LoopStack.setParallel(!IsMonotonic); 2193 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2194 if (C->getKind() == OMPC_ORDER_concurrent) 2195 CGF.LoopStack.setParallel(/*Enable=*/true); 2196 } else { 2197 CGF.EmitOMPSimdInit(S, IsMonotonic); 2198 } 2199 }, 2200 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2201 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2202 SourceLocation Loc = S.getBeginLoc(); 2203 // when 'distribute' is not combined with a 'for': 2204 // while (idx <= UB) { BODY; ++idx; } 2205 // when 'distribute' is combined with a 'for' 2206 // (e.g. 'distribute parallel for') 2207 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2208 CGF.EmitOMPInnerLoop( 2209 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2210 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2211 CodeGenLoop(CGF, S, LoopExit); 2212 }, 2213 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2214 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2215 }); 2216 }); 2217 2218 EmitBlock(Continue.getBlock()); 2219 BreakContinueStack.pop_back(); 2220 if (!DynamicOrOrdered) { 2221 // Emit "LB = LB + Stride", "UB = UB + Stride". 2222 EmitIgnoredExpr(LoopArgs.NextLB); 2223 EmitIgnoredExpr(LoopArgs.NextUB); 2224 } 2225 2226 EmitBranch(CondBlock); 2227 LoopStack.pop(); 2228 // Emit the fall-through block. 2229 EmitBlock(LoopExit.getBlock()); 2230 2231 // Tell the runtime we are done. 2232 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2233 if (!DynamicOrOrdered) 2234 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2235 S.getDirectiveKind()); 2236 }; 2237 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2238 } 2239 2240 void CodeGenFunction::EmitOMPForOuterLoop( 2241 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2242 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2243 const OMPLoopArguments &LoopArgs, 2244 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2245 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2246 2247 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2248 const bool DynamicOrOrdered = 2249 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2250 2251 assert((Ordered || 2252 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2253 LoopArgs.Chunk != nullptr)) && 2254 "static non-chunked schedule does not need outer loop"); 2255 2256 // Emit outer loop. 2257 // 2258 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2259 // When schedule(dynamic,chunk_size) is specified, the iterations are 2260 // distributed to threads in the team in chunks as the threads request them. 2261 // Each thread executes a chunk of iterations, then requests another chunk, 2262 // until no chunks remain to be distributed. Each chunk contains chunk_size 2263 // iterations, except for the last chunk to be distributed, which may have 2264 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2265 // 2266 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2267 // to threads in the team in chunks as the executing threads request them. 2268 // Each thread executes a chunk of iterations, then requests another chunk, 2269 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2270 // each chunk is proportional to the number of unassigned iterations divided 2271 // by the number of threads in the team, decreasing to 1. For a chunk_size 2272 // with value k (greater than 1), the size of each chunk is determined in the 2273 // same way, with the restriction that the chunks do not contain fewer than k 2274 // iterations (except for the last chunk to be assigned, which may have fewer 2275 // than k iterations). 2276 // 2277 // When schedule(auto) is specified, the decision regarding scheduling is 2278 // delegated to the compiler and/or runtime system. The programmer gives the 2279 // implementation the freedom to choose any possible mapping of iterations to 2280 // threads in the team. 2281 // 2282 // When schedule(runtime) is specified, the decision regarding scheduling is 2283 // deferred until run time, and the schedule and chunk size are taken from the 2284 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2285 // implementation defined 2286 // 2287 // while(__kmpc_dispatch_next(&LB, &UB)) { 2288 // idx = LB; 2289 // while (idx <= UB) { BODY; ++idx; 2290 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2291 // } // inner loop 2292 // } 2293 // 2294 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2295 // When schedule(static, chunk_size) is specified, iterations are divided into 2296 // chunks of size chunk_size, and the chunks are assigned to the threads in 2297 // the team in a round-robin fashion in the order of the thread number. 2298 // 2299 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2300 // while (idx <= UB) { BODY; ++idx; } // inner loop 2301 // LB = LB + ST; 2302 // UB = UB + ST; 2303 // } 2304 // 2305 2306 const Expr *IVExpr = S.getIterationVariable(); 2307 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2308 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2309 2310 if (DynamicOrOrdered) { 2311 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2312 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2313 llvm::Value *LBVal = DispatchBounds.first; 2314 llvm::Value *UBVal = DispatchBounds.second; 2315 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2316 LoopArgs.Chunk}; 2317 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2318 IVSigned, Ordered, DipatchRTInputValues); 2319 } else { 2320 CGOpenMPRuntime::StaticRTInput StaticInit( 2321 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2322 LoopArgs.ST, LoopArgs.Chunk); 2323 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2324 ScheduleKind, StaticInit); 2325 } 2326 2327 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2328 const unsigned IVSize, 2329 const bool IVSigned) { 2330 if (Ordered) { 2331 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2332 IVSigned); 2333 } 2334 }; 2335 2336 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2337 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2338 OuterLoopArgs.IncExpr = S.getInc(); 2339 OuterLoopArgs.Init = S.getInit(); 2340 OuterLoopArgs.Cond = S.getCond(); 2341 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2342 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2343 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2344 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2345 } 2346 2347 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2348 const unsigned IVSize, const bool IVSigned) {} 2349 2350 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2351 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2352 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2353 const CodeGenLoopTy &CodeGenLoopContent) { 2354 2355 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2356 2357 // Emit outer loop. 2358 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2359 // dynamic 2360 // 2361 2362 const Expr *IVExpr = S.getIterationVariable(); 2363 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2364 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2365 2366 CGOpenMPRuntime::StaticRTInput StaticInit( 2367 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2368 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2369 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2370 2371 // for combined 'distribute' and 'for' the increment expression of distribute 2372 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2373 Expr *IncExpr; 2374 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2375 IncExpr = S.getDistInc(); 2376 else 2377 IncExpr = S.getInc(); 2378 2379 // this routine is shared by 'omp distribute parallel for' and 2380 // 'omp distribute': select the right EUB expression depending on the 2381 // directive 2382 OMPLoopArguments OuterLoopArgs; 2383 OuterLoopArgs.LB = LoopArgs.LB; 2384 OuterLoopArgs.UB = LoopArgs.UB; 2385 OuterLoopArgs.ST = LoopArgs.ST; 2386 OuterLoopArgs.IL = LoopArgs.IL; 2387 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2388 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2389 ? S.getCombinedEnsureUpperBound() 2390 : S.getEnsureUpperBound(); 2391 OuterLoopArgs.IncExpr = IncExpr; 2392 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2393 ? S.getCombinedInit() 2394 : S.getInit(); 2395 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2396 ? S.getCombinedCond() 2397 : S.getCond(); 2398 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2399 ? S.getCombinedNextLowerBound() 2400 : S.getNextLowerBound(); 2401 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2402 ? S.getCombinedNextUpperBound() 2403 : S.getNextUpperBound(); 2404 2405 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2406 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2407 emitEmptyOrdered); 2408 } 2409 2410 static std::pair<LValue, LValue> 2411 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2412 const OMPExecutableDirective &S) { 2413 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2414 LValue LB = 2415 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2416 LValue UB = 2417 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2418 2419 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2420 // parallel for') we need to use the 'distribute' 2421 // chunk lower and upper bounds rather than the whole loop iteration 2422 // space. These are parameters to the outlined function for 'parallel' 2423 // and we copy the bounds of the previous schedule into the 2424 // the current ones. 2425 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2426 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2427 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2428 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2429 PrevLBVal = CGF.EmitScalarConversion( 2430 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2431 LS.getIterationVariable()->getType(), 2432 LS.getPrevLowerBoundVariable()->getExprLoc()); 2433 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2434 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2435 PrevUBVal = CGF.EmitScalarConversion( 2436 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2437 LS.getIterationVariable()->getType(), 2438 LS.getPrevUpperBoundVariable()->getExprLoc()); 2439 2440 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2441 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2442 2443 return {LB, UB}; 2444 } 2445 2446 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2447 /// we need to use the LB and UB expressions generated by the worksharing 2448 /// code generation support, whereas in non combined situations we would 2449 /// just emit 0 and the LastIteration expression 2450 /// This function is necessary due to the difference of the LB and UB 2451 /// types for the RT emission routines for 'for_static_init' and 2452 /// 'for_dispatch_init' 2453 static std::pair<llvm::Value *, llvm::Value *> 2454 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2455 const OMPExecutableDirective &S, 2456 Address LB, Address UB) { 2457 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2458 const Expr *IVExpr = LS.getIterationVariable(); 2459 // when implementing a dynamic schedule for a 'for' combined with a 2460 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2461 // is not normalized as each team only executes its own assigned 2462 // distribute chunk 2463 QualType IteratorTy = IVExpr->getType(); 2464 llvm::Value *LBVal = 2465 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2466 llvm::Value *UBVal = 2467 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2468 return {LBVal, UBVal}; 2469 } 2470 2471 static void emitDistributeParallelForDistributeInnerBoundParams( 2472 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2473 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2474 const auto &Dir = cast<OMPLoopDirective>(S); 2475 LValue LB = 2476 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2477 llvm::Value *LBCast = 2478 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2479 CGF.SizeTy, /*isSigned=*/false); 2480 CapturedVars.push_back(LBCast); 2481 LValue UB = 2482 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2483 2484 llvm::Value *UBCast = 2485 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2486 CGF.SizeTy, /*isSigned=*/false); 2487 CapturedVars.push_back(UBCast); 2488 } 2489 2490 static void 2491 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2492 const OMPLoopDirective &S, 2493 CodeGenFunction::JumpDest LoopExit) { 2494 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2495 PrePostActionTy &Action) { 2496 Action.Enter(CGF); 2497 bool HasCancel = false; 2498 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2499 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2500 HasCancel = D->hasCancel(); 2501 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2502 HasCancel = D->hasCancel(); 2503 else if (const auto *D = 2504 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2505 HasCancel = D->hasCancel(); 2506 } 2507 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2508 HasCancel); 2509 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2510 emitDistributeParallelForInnerBounds, 2511 emitDistributeParallelForDispatchBounds); 2512 }; 2513 2514 emitCommonOMPParallelDirective( 2515 CGF, S, 2516 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2517 CGInlinedWorksharingLoop, 2518 emitDistributeParallelForDistributeInnerBoundParams); 2519 } 2520 2521 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2522 const OMPDistributeParallelForDirective &S) { 2523 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2524 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2525 S.getDistInc()); 2526 }; 2527 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2528 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2529 } 2530 2531 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2532 const OMPDistributeParallelForSimdDirective &S) { 2533 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2534 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2535 S.getDistInc()); 2536 }; 2537 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2538 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2539 } 2540 2541 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2542 const OMPDistributeSimdDirective &S) { 2543 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2544 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2545 }; 2546 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2547 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2548 } 2549 2550 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2551 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2552 // Emit SPMD target parallel for region as a standalone region. 2553 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2554 emitOMPSimdRegion(CGF, S, Action); 2555 }; 2556 llvm::Function *Fn; 2557 llvm::Constant *Addr; 2558 // Emit target region as a standalone region. 2559 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2560 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2561 assert(Fn && Addr && "Target device function emission failed."); 2562 } 2563 2564 void CodeGenFunction::EmitOMPTargetSimdDirective( 2565 const OMPTargetSimdDirective &S) { 2566 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2567 emitOMPSimdRegion(CGF, S, Action); 2568 }; 2569 emitCommonOMPTargetDirective(*this, S, CodeGen); 2570 } 2571 2572 namespace { 2573 struct ScheduleKindModifiersTy { 2574 OpenMPScheduleClauseKind Kind; 2575 OpenMPScheduleClauseModifier M1; 2576 OpenMPScheduleClauseModifier M2; 2577 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2578 OpenMPScheduleClauseModifier M1, 2579 OpenMPScheduleClauseModifier M2) 2580 : Kind(Kind), M1(M1), M2(M2) {} 2581 }; 2582 } // namespace 2583 2584 bool CodeGenFunction::EmitOMPWorksharingLoop( 2585 const OMPLoopDirective &S, Expr *EUB, 2586 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2587 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2588 // Emit the loop iteration variable. 2589 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2590 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2591 EmitVarDecl(*IVDecl); 2592 2593 // Emit the iterations count variable. 2594 // If it is not a variable, Sema decided to calculate iterations count on each 2595 // iteration (e.g., it is foldable into a constant). 2596 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2597 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2598 // Emit calculation of the iterations count. 2599 EmitIgnoredExpr(S.getCalcLastIteration()); 2600 } 2601 2602 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2603 2604 bool HasLastprivateClause; 2605 // Check pre-condition. 2606 { 2607 OMPLoopScope PreInitScope(*this, S); 2608 // Skip the entire loop if we don't meet the precondition. 2609 // If the condition constant folds and can be elided, avoid emitting the 2610 // whole loop. 2611 bool CondConstant; 2612 llvm::BasicBlock *ContBlock = nullptr; 2613 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2614 if (!CondConstant) 2615 return false; 2616 } else { 2617 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2618 ContBlock = createBasicBlock("omp.precond.end"); 2619 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2620 getProfileCount(&S)); 2621 EmitBlock(ThenBlock); 2622 incrementProfileCounter(&S); 2623 } 2624 2625 RunCleanupsScope DoacrossCleanupScope(*this); 2626 bool Ordered = false; 2627 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2628 if (OrderedClause->getNumForLoops()) 2629 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2630 else 2631 Ordered = true; 2632 } 2633 2634 llvm::DenseSet<const Expr *> EmittedFinals; 2635 emitAlignedClause(*this, S); 2636 bool HasLinears = EmitOMPLinearClauseInit(S); 2637 // Emit helper vars inits. 2638 2639 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2640 LValue LB = Bounds.first; 2641 LValue UB = Bounds.second; 2642 LValue ST = 2643 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2644 LValue IL = 2645 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2646 2647 // Emit 'then' code. 2648 { 2649 OMPPrivateScope LoopScope(*this); 2650 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2651 // Emit implicit barrier to synchronize threads and avoid data races on 2652 // initialization of firstprivate variables and post-update of 2653 // lastprivate variables. 2654 CGM.getOpenMPRuntime().emitBarrierCall( 2655 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2656 /*ForceSimpleCall=*/true); 2657 } 2658 EmitOMPPrivateClause(S, LoopScope); 2659 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2660 *this, S, EmitLValue(S.getIterationVariable())); 2661 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2662 EmitOMPReductionClauseInit(S, LoopScope); 2663 EmitOMPPrivateLoopCounters(S, LoopScope); 2664 EmitOMPLinearClause(S, LoopScope); 2665 (void)LoopScope.Privatize(); 2666 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2667 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2668 2669 // Detect the loop schedule kind and chunk. 2670 const Expr *ChunkExpr = nullptr; 2671 OpenMPScheduleTy ScheduleKind; 2672 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2673 ScheduleKind.Schedule = C->getScheduleKind(); 2674 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2675 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2676 ChunkExpr = C->getChunkSize(); 2677 } else { 2678 // Default behaviour for schedule clause. 2679 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2680 *this, S, ScheduleKind.Schedule, ChunkExpr); 2681 } 2682 bool HasChunkSizeOne = false; 2683 llvm::Value *Chunk = nullptr; 2684 if (ChunkExpr) { 2685 Chunk = EmitScalarExpr(ChunkExpr); 2686 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2687 S.getIterationVariable()->getType(), 2688 S.getBeginLoc()); 2689 Expr::EvalResult Result; 2690 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2691 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2692 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2693 } 2694 } 2695 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2696 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2697 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2698 // If the static schedule kind is specified or if the ordered clause is 2699 // specified, and if no monotonic modifier is specified, the effect will 2700 // be as if the monotonic modifier was specified. 2701 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2702 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2703 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2704 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2705 /* Chunked */ Chunk != nullptr) || 2706 StaticChunkedOne) && 2707 !Ordered) { 2708 JumpDest LoopExit = 2709 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2710 emitCommonSimdLoop( 2711 *this, S, 2712 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2713 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2714 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2715 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2716 if (C->getKind() == OMPC_ORDER_concurrent) 2717 CGF.LoopStack.setParallel(/*Enable=*/true); 2718 } 2719 }, 2720 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2721 &S, ScheduleKind, LoopExit, 2722 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2723 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2724 // When no chunk_size is specified, the iteration space is divided 2725 // into chunks that are approximately equal in size, and at most 2726 // one chunk is distributed to each thread. Note that the size of 2727 // the chunks is unspecified in this case. 2728 CGOpenMPRuntime::StaticRTInput StaticInit( 2729 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2730 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2731 StaticChunkedOne ? Chunk : nullptr); 2732 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2733 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2734 StaticInit); 2735 // UB = min(UB, GlobalUB); 2736 if (!StaticChunkedOne) 2737 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2738 // IV = LB; 2739 CGF.EmitIgnoredExpr(S.getInit()); 2740 // For unchunked static schedule generate: 2741 // 2742 // while (idx <= UB) { 2743 // BODY; 2744 // ++idx; 2745 // } 2746 // 2747 // For static schedule with chunk one: 2748 // 2749 // while (IV <= PrevUB) { 2750 // BODY; 2751 // IV += ST; 2752 // } 2753 CGF.EmitOMPInnerLoop( 2754 S, LoopScope.requiresCleanups(), 2755 StaticChunkedOne ? S.getCombinedParForInDistCond() 2756 : S.getCond(), 2757 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2758 [&S, LoopExit](CodeGenFunction &CGF) { 2759 CGF.EmitOMPLoopBody(S, LoopExit); 2760 CGF.EmitStopPoint(&S); 2761 }, 2762 [](CodeGenFunction &) {}); 2763 }); 2764 EmitBlock(LoopExit.getBlock()); 2765 // Tell the runtime we are done. 2766 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2767 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2768 S.getDirectiveKind()); 2769 }; 2770 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2771 } else { 2772 const bool IsMonotonic = 2773 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2774 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2775 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2776 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2777 // Emit the outer loop, which requests its work chunk [LB..UB] from 2778 // runtime and runs the inner loop to process it. 2779 const OMPLoopArguments LoopArguments( 2780 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2781 IL.getAddress(*this), Chunk, EUB); 2782 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2783 LoopArguments, CGDispatchBounds); 2784 } 2785 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2786 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2787 return CGF.Builder.CreateIsNotNull( 2788 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2789 }); 2790 } 2791 EmitOMPReductionClauseFinal( 2792 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2793 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2794 : /*Parallel only*/ OMPD_parallel); 2795 // Emit post-update of the reduction variables if IsLastIter != 0. 2796 emitPostUpdateForReductionClause( 2797 *this, S, [IL, &S](CodeGenFunction &CGF) { 2798 return CGF.Builder.CreateIsNotNull( 2799 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2800 }); 2801 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2802 if (HasLastprivateClause) 2803 EmitOMPLastprivateClauseFinal( 2804 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2805 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2806 } 2807 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2808 return CGF.Builder.CreateIsNotNull( 2809 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2810 }); 2811 DoacrossCleanupScope.ForceCleanup(); 2812 // We're now done with the loop, so jump to the continuation block. 2813 if (ContBlock) { 2814 EmitBranch(ContBlock); 2815 EmitBlock(ContBlock, /*IsFinished=*/true); 2816 } 2817 } 2818 return HasLastprivateClause; 2819 } 2820 2821 /// The following two functions generate expressions for the loop lower 2822 /// and upper bounds in case of static and dynamic (dispatch) schedule 2823 /// of the associated 'for' or 'distribute' loop. 2824 static std::pair<LValue, LValue> 2825 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 2826 const auto &LS = cast<OMPLoopDirective>(S); 2827 LValue LB = 2828 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2829 LValue UB = 2830 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2831 return {LB, UB}; 2832 } 2833 2834 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 2835 /// consider the lower and upper bound expressions generated by the 2836 /// worksharing loop support, but we use 0 and the iteration space size as 2837 /// constants 2838 static std::pair<llvm::Value *, llvm::Value *> 2839 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 2840 Address LB, Address UB) { 2841 const auto &LS = cast<OMPLoopDirective>(S); 2842 const Expr *IVExpr = LS.getIterationVariable(); 2843 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 2844 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 2845 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 2846 return {LBVal, UBVal}; 2847 } 2848 2849 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 2850 bool HasLastprivates = false; 2851 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2852 PrePostActionTy &) { 2853 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 2854 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2855 emitForLoopBounds, 2856 emitDispatchForLoopBounds); 2857 }; 2858 { 2859 auto LPCRegion = 2860 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2861 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2862 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 2863 S.hasCancel()); 2864 } 2865 2866 // Emit an implicit barrier at the end. 2867 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2868 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2869 // Check for outer lastprivate conditional update. 2870 checkForLastprivateConditionalUpdate(*this, S); 2871 } 2872 2873 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 2874 bool HasLastprivates = false; 2875 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2876 PrePostActionTy &) { 2877 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2878 emitForLoopBounds, 2879 emitDispatchForLoopBounds); 2880 }; 2881 { 2882 auto LPCRegion = 2883 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2884 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2885 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2886 } 2887 2888 // Emit an implicit barrier at the end. 2889 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 2890 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 2891 // Check for outer lastprivate conditional update. 2892 checkForLastprivateConditionalUpdate(*this, S); 2893 } 2894 2895 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 2896 const Twine &Name, 2897 llvm::Value *Init = nullptr) { 2898 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 2899 if (Init) 2900 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 2901 return LVal; 2902 } 2903 2904 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 2905 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 2906 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 2907 bool HasLastprivates = false; 2908 auto &&CodeGen = [&S, CapturedStmt, CS, 2909 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 2910 ASTContext &C = CGF.getContext(); 2911 QualType KmpInt32Ty = 2912 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 2913 // Emit helper vars inits. 2914 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 2915 CGF.Builder.getInt32(0)); 2916 llvm::ConstantInt *GlobalUBVal = CS != nullptr 2917 ? CGF.Builder.getInt32(CS->size() - 1) 2918 : CGF.Builder.getInt32(0); 2919 LValue UB = 2920 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 2921 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 2922 CGF.Builder.getInt32(1)); 2923 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 2924 CGF.Builder.getInt32(0)); 2925 // Loop counter. 2926 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 2927 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2928 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 2929 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 2930 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 2931 // Generate condition for loop. 2932 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 2933 OK_Ordinary, S.getBeginLoc(), FPOptions()); 2934 // Increment for loop counter. 2935 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 2936 S.getBeginLoc(), true); 2937 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 2938 // Iterate through all sections and emit a switch construct: 2939 // switch (IV) { 2940 // case 0: 2941 // <SectionStmt[0]>; 2942 // break; 2943 // ... 2944 // case <NumSection> - 1: 2945 // <SectionStmt[<NumSection> - 1]>; 2946 // break; 2947 // } 2948 // .omp.sections.exit: 2949 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 2950 llvm::SwitchInst *SwitchStmt = 2951 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 2952 ExitBB, CS == nullptr ? 1 : CS->size()); 2953 if (CS) { 2954 unsigned CaseNumber = 0; 2955 for (const Stmt *SubStmt : CS->children()) { 2956 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2957 CGF.EmitBlock(CaseBB); 2958 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 2959 CGF.EmitStmt(SubStmt); 2960 CGF.EmitBranch(ExitBB); 2961 ++CaseNumber; 2962 } 2963 } else { 2964 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2965 CGF.EmitBlock(CaseBB); 2966 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 2967 CGF.EmitStmt(CapturedStmt); 2968 CGF.EmitBranch(ExitBB); 2969 } 2970 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2971 }; 2972 2973 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2974 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 2975 // Emit implicit barrier to synchronize threads and avoid data races on 2976 // initialization of firstprivate variables and post-update of lastprivate 2977 // variables. 2978 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 2979 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2980 /*ForceSimpleCall=*/true); 2981 } 2982 CGF.EmitOMPPrivateClause(S, LoopScope); 2983 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 2984 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2985 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2986 (void)LoopScope.Privatize(); 2987 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2988 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2989 2990 // Emit static non-chunked loop. 2991 OpenMPScheduleTy ScheduleKind; 2992 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 2993 CGOpenMPRuntime::StaticRTInput StaticInit( 2994 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 2995 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 2996 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2997 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 2998 // UB = min(UB, GlobalUB); 2999 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3000 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3001 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3002 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3003 // IV = LB; 3004 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3005 // while (idx <= UB) { BODY; ++idx; } 3006 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 3007 [](CodeGenFunction &) {}); 3008 // Tell the runtime we are done. 3009 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3010 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3011 S.getDirectiveKind()); 3012 }; 3013 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3014 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3015 // Emit post-update of the reduction variables if IsLastIter != 0. 3016 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3017 return CGF.Builder.CreateIsNotNull( 3018 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3019 }); 3020 3021 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3022 if (HasLastprivates) 3023 CGF.EmitOMPLastprivateClauseFinal( 3024 S, /*NoFinals=*/false, 3025 CGF.Builder.CreateIsNotNull( 3026 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3027 }; 3028 3029 bool HasCancel = false; 3030 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3031 HasCancel = OSD->hasCancel(); 3032 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3033 HasCancel = OPSD->hasCancel(); 3034 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3035 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3036 HasCancel); 3037 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3038 // clause. Otherwise the barrier will be generated by the codegen for the 3039 // directive. 3040 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3041 // Emit implicit barrier to synchronize threads and avoid data races on 3042 // initialization of firstprivate variables. 3043 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3044 OMPD_unknown); 3045 } 3046 } 3047 3048 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3049 { 3050 auto LPCRegion = 3051 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3052 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3053 EmitSections(S); 3054 } 3055 // Emit an implicit barrier at the end. 3056 if (!S.getSingleClause<OMPNowaitClause>()) { 3057 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3058 OMPD_sections); 3059 } 3060 // Check for outer lastprivate conditional update. 3061 checkForLastprivateConditionalUpdate(*this, S); 3062 } 3063 3064 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3065 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3066 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3067 }; 3068 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3069 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3070 S.hasCancel()); 3071 } 3072 3073 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3074 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3075 llvm::SmallVector<const Expr *, 8> DestExprs; 3076 llvm::SmallVector<const Expr *, 8> SrcExprs; 3077 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3078 // Check if there are any 'copyprivate' clauses associated with this 3079 // 'single' construct. 3080 // Build a list of copyprivate variables along with helper expressions 3081 // (<source>, <destination>, <destination>=<source> expressions) 3082 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3083 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3084 DestExprs.append(C->destination_exprs().begin(), 3085 C->destination_exprs().end()); 3086 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3087 AssignmentOps.append(C->assignment_ops().begin(), 3088 C->assignment_ops().end()); 3089 } 3090 // Emit code for 'single' region along with 'copyprivate' clauses 3091 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3092 Action.Enter(CGF); 3093 OMPPrivateScope SingleScope(CGF); 3094 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3095 CGF.EmitOMPPrivateClause(S, SingleScope); 3096 (void)SingleScope.Privatize(); 3097 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3098 }; 3099 { 3100 auto LPCRegion = 3101 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3102 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3103 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3104 CopyprivateVars, DestExprs, 3105 SrcExprs, AssignmentOps); 3106 } 3107 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3108 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3109 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3110 CGM.getOpenMPRuntime().emitBarrierCall( 3111 *this, S.getBeginLoc(), 3112 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3113 } 3114 // Check for outer lastprivate conditional update. 3115 checkForLastprivateConditionalUpdate(*this, S); 3116 } 3117 3118 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3119 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3120 Action.Enter(CGF); 3121 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3122 }; 3123 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3124 } 3125 3126 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3127 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3128 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3129 3130 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3131 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3132 3133 auto FiniCB = [this](InsertPointTy IP) { 3134 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3135 }; 3136 3137 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3138 InsertPointTy CodeGenIP, 3139 llvm::BasicBlock &FiniBB) { 3140 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3141 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3142 CodeGenIP, FiniBB); 3143 }; 3144 3145 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3146 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3147 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3148 3149 return; 3150 } 3151 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3152 emitMaster(*this, S); 3153 } 3154 3155 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3156 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3157 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3158 3159 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3160 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3161 const Expr *Hint = nullptr; 3162 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3163 Hint = HintClause->getHint(); 3164 3165 // TODO: This is slightly different from what's currently being done in 3166 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3167 // about typing is final. 3168 llvm::Value *HintInst = nullptr; 3169 if (Hint) 3170 HintInst = 3171 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3172 3173 auto FiniCB = [this](InsertPointTy IP) { 3174 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3175 }; 3176 3177 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3178 InsertPointTy CodeGenIP, 3179 llvm::BasicBlock &FiniBB) { 3180 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3181 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3182 CodeGenIP, FiniBB); 3183 }; 3184 3185 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3186 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3187 Builder.restoreIP(OMPBuilder->CreateCritical( 3188 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3189 HintInst)); 3190 3191 return; 3192 } 3193 3194 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3195 Action.Enter(CGF); 3196 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3197 }; 3198 const Expr *Hint = nullptr; 3199 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3200 Hint = HintClause->getHint(); 3201 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3202 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3203 S.getDirectiveName().getAsString(), 3204 CodeGen, S.getBeginLoc(), Hint); 3205 } 3206 3207 void CodeGenFunction::EmitOMPParallelForDirective( 3208 const OMPParallelForDirective &S) { 3209 // Emit directive as a combined directive that consists of two implicit 3210 // directives: 'parallel' with 'for' directive. 3211 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3212 Action.Enter(CGF); 3213 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 3214 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3215 emitDispatchForLoopBounds); 3216 }; 3217 { 3218 auto LPCRegion = 3219 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3220 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3221 emitEmptyBoundParameters); 3222 } 3223 // Check for outer lastprivate conditional update. 3224 checkForLastprivateConditionalUpdate(*this, S); 3225 } 3226 3227 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3228 const OMPParallelForSimdDirective &S) { 3229 // Emit directive as a combined directive that consists of two implicit 3230 // directives: 'parallel' with 'for' directive. 3231 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3232 Action.Enter(CGF); 3233 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3234 emitDispatchForLoopBounds); 3235 }; 3236 { 3237 auto LPCRegion = 3238 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3239 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3240 emitEmptyBoundParameters); 3241 } 3242 // Check for outer lastprivate conditional update. 3243 checkForLastprivateConditionalUpdate(*this, S); 3244 } 3245 3246 void CodeGenFunction::EmitOMPParallelMasterDirective( 3247 const OMPParallelMasterDirective &S) { 3248 // Emit directive as a combined directive that consists of two implicit 3249 // directives: 'parallel' with 'master' directive. 3250 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3251 Action.Enter(CGF); 3252 OMPPrivateScope PrivateScope(CGF); 3253 bool Copyins = CGF.EmitOMPCopyinClause(S); 3254 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3255 if (Copyins) { 3256 // Emit implicit barrier to synchronize threads and avoid data races on 3257 // propagation master's thread values of threadprivate variables to local 3258 // instances of that variables of all other implicit threads. 3259 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3260 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3261 /*ForceSimpleCall=*/true); 3262 } 3263 CGF.EmitOMPPrivateClause(S, PrivateScope); 3264 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3265 (void)PrivateScope.Privatize(); 3266 emitMaster(CGF, S); 3267 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3268 }; 3269 { 3270 auto LPCRegion = 3271 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3272 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3273 emitEmptyBoundParameters); 3274 emitPostUpdateForReductionClause(*this, S, 3275 [](CodeGenFunction &) { return nullptr; }); 3276 } 3277 // Check for outer lastprivate conditional update. 3278 checkForLastprivateConditionalUpdate(*this, S); 3279 } 3280 3281 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3282 const OMPParallelSectionsDirective &S) { 3283 // Emit directive as a combined directive that consists of two implicit 3284 // directives: 'parallel' with 'sections' directive. 3285 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3286 Action.Enter(CGF); 3287 CGF.EmitSections(S); 3288 }; 3289 { 3290 auto LPCRegion = 3291 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3292 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3293 emitEmptyBoundParameters); 3294 } 3295 // Check for outer lastprivate conditional update. 3296 checkForLastprivateConditionalUpdate(*this, S); 3297 } 3298 3299 void CodeGenFunction::EmitOMPTaskBasedDirective( 3300 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3301 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3302 OMPTaskDataTy &Data) { 3303 // Emit outlined function for task construct. 3304 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3305 auto I = CS->getCapturedDecl()->param_begin(); 3306 auto PartId = std::next(I); 3307 auto TaskT = std::next(I, 4); 3308 // Check if the task is final 3309 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3310 // If the condition constant folds and can be elided, try to avoid emitting 3311 // the condition and the dead arm of the if/else. 3312 const Expr *Cond = Clause->getCondition(); 3313 bool CondConstant; 3314 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3315 Data.Final.setInt(CondConstant); 3316 else 3317 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3318 } else { 3319 // By default the task is not final. 3320 Data.Final.setInt(/*IntVal=*/false); 3321 } 3322 // Check if the task has 'priority' clause. 3323 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3324 const Expr *Prio = Clause->getPriority(); 3325 Data.Priority.setInt(/*IntVal=*/true); 3326 Data.Priority.setPointer(EmitScalarConversion( 3327 EmitScalarExpr(Prio), Prio->getType(), 3328 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3329 Prio->getExprLoc())); 3330 } 3331 // The first function argument for tasks is a thread id, the second one is a 3332 // part id (0 for tied tasks, >=0 for untied task). 3333 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3334 // Get list of private variables. 3335 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3336 auto IRef = C->varlist_begin(); 3337 for (const Expr *IInit : C->private_copies()) { 3338 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3339 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3340 Data.PrivateVars.push_back(*IRef); 3341 Data.PrivateCopies.push_back(IInit); 3342 } 3343 ++IRef; 3344 } 3345 } 3346 EmittedAsPrivate.clear(); 3347 // Get list of firstprivate variables. 3348 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3349 auto IRef = C->varlist_begin(); 3350 auto IElemInitRef = C->inits().begin(); 3351 for (const Expr *IInit : C->private_copies()) { 3352 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3353 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3354 Data.FirstprivateVars.push_back(*IRef); 3355 Data.FirstprivateCopies.push_back(IInit); 3356 Data.FirstprivateInits.push_back(*IElemInitRef); 3357 } 3358 ++IRef; 3359 ++IElemInitRef; 3360 } 3361 } 3362 // Get list of lastprivate variables (for taskloops). 3363 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3364 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3365 auto IRef = C->varlist_begin(); 3366 auto ID = C->destination_exprs().begin(); 3367 for (const Expr *IInit : C->private_copies()) { 3368 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3369 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3370 Data.LastprivateVars.push_back(*IRef); 3371 Data.LastprivateCopies.push_back(IInit); 3372 } 3373 LastprivateDstsOrigs.insert( 3374 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3375 cast<DeclRefExpr>(*IRef)}); 3376 ++IRef; 3377 ++ID; 3378 } 3379 } 3380 SmallVector<const Expr *, 4> LHSs; 3381 SmallVector<const Expr *, 4> RHSs; 3382 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3383 auto IPriv = C->privates().begin(); 3384 auto IRed = C->reduction_ops().begin(); 3385 auto ILHS = C->lhs_exprs().begin(); 3386 auto IRHS = C->rhs_exprs().begin(); 3387 for (const Expr *Ref : C->varlists()) { 3388 Data.ReductionVars.emplace_back(Ref); 3389 Data.ReductionCopies.emplace_back(*IPriv); 3390 Data.ReductionOps.emplace_back(*IRed); 3391 LHSs.emplace_back(*ILHS); 3392 RHSs.emplace_back(*IRHS); 3393 std::advance(IPriv, 1); 3394 std::advance(IRed, 1); 3395 std::advance(ILHS, 1); 3396 std::advance(IRHS, 1); 3397 } 3398 } 3399 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3400 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3401 // Build list of dependences. 3402 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3403 for (const Expr *IRef : C->varlists()) 3404 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3405 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3406 CapturedRegion](CodeGenFunction &CGF, 3407 PrePostActionTy &Action) { 3408 // Set proper addresses for generated private copies. 3409 OMPPrivateScope Scope(CGF); 3410 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3411 !Data.LastprivateVars.empty()) { 3412 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3413 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3414 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3415 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3416 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3417 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3418 CS->getCapturedDecl()->getParam(PrivatesParam))); 3419 // Map privates. 3420 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3421 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3422 CallArgs.push_back(PrivatesPtr); 3423 for (const Expr *E : Data.PrivateVars) { 3424 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3425 Address PrivatePtr = CGF.CreateMemTemp( 3426 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3427 PrivatePtrs.emplace_back(VD, PrivatePtr); 3428 CallArgs.push_back(PrivatePtr.getPointer()); 3429 } 3430 for (const Expr *E : Data.FirstprivateVars) { 3431 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3432 Address PrivatePtr = 3433 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3434 ".firstpriv.ptr.addr"); 3435 PrivatePtrs.emplace_back(VD, PrivatePtr); 3436 CallArgs.push_back(PrivatePtr.getPointer()); 3437 } 3438 for (const Expr *E : Data.LastprivateVars) { 3439 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3440 Address PrivatePtr = 3441 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3442 ".lastpriv.ptr.addr"); 3443 PrivatePtrs.emplace_back(VD, PrivatePtr); 3444 CallArgs.push_back(PrivatePtr.getPointer()); 3445 } 3446 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3447 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3448 for (const auto &Pair : LastprivateDstsOrigs) { 3449 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3450 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3451 /*RefersToEnclosingVariableOrCapture=*/ 3452 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3453 Pair.second->getType(), VK_LValue, 3454 Pair.second->getExprLoc()); 3455 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3456 return CGF.EmitLValue(&DRE).getAddress(CGF); 3457 }); 3458 } 3459 for (const auto &Pair : PrivatePtrs) { 3460 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3461 CGF.getContext().getDeclAlign(Pair.first)); 3462 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3463 } 3464 } 3465 if (Data.Reductions) { 3466 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3467 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies, 3468 Data.ReductionOps); 3469 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3470 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3471 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3472 RedCG.emitSharedLValue(CGF, Cnt); 3473 RedCG.emitAggregateType(CGF, Cnt); 3474 // FIXME: This must removed once the runtime library is fixed. 3475 // Emit required threadprivate variables for 3476 // initializer/combiner/finalizer. 3477 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3478 RedCG, Cnt); 3479 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3480 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3481 Replacement = 3482 Address(CGF.EmitScalarConversion( 3483 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3484 CGF.getContext().getPointerType( 3485 Data.ReductionCopies[Cnt]->getType()), 3486 Data.ReductionCopies[Cnt]->getExprLoc()), 3487 Replacement.getAlignment()); 3488 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3489 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3490 [Replacement]() { return Replacement; }); 3491 } 3492 } 3493 // Privatize all private variables except for in_reduction items. 3494 (void)Scope.Privatize(); 3495 SmallVector<const Expr *, 4> InRedVars; 3496 SmallVector<const Expr *, 4> InRedPrivs; 3497 SmallVector<const Expr *, 4> InRedOps; 3498 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3499 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3500 auto IPriv = C->privates().begin(); 3501 auto IRed = C->reduction_ops().begin(); 3502 auto ITD = C->taskgroup_descriptors().begin(); 3503 for (const Expr *Ref : C->varlists()) { 3504 InRedVars.emplace_back(Ref); 3505 InRedPrivs.emplace_back(*IPriv); 3506 InRedOps.emplace_back(*IRed); 3507 TaskgroupDescriptors.emplace_back(*ITD); 3508 std::advance(IPriv, 1); 3509 std::advance(IRed, 1); 3510 std::advance(ITD, 1); 3511 } 3512 } 3513 // Privatize in_reduction items here, because taskgroup descriptors must be 3514 // privatized earlier. 3515 OMPPrivateScope InRedScope(CGF); 3516 if (!InRedVars.empty()) { 3517 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps); 3518 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3519 RedCG.emitSharedLValue(CGF, Cnt); 3520 RedCG.emitAggregateType(CGF, Cnt); 3521 // The taskgroup descriptor variable is always implicit firstprivate and 3522 // privatized already during processing of the firstprivates. 3523 // FIXME: This must removed once the runtime library is fixed. 3524 // Emit required threadprivate variables for 3525 // initializer/combiner/finalizer. 3526 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3527 RedCG, Cnt); 3528 llvm::Value *ReductionsPtr = 3529 CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]), 3530 TaskgroupDescriptors[Cnt]->getExprLoc()); 3531 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3532 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3533 Replacement = Address( 3534 CGF.EmitScalarConversion( 3535 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3536 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3537 InRedPrivs[Cnt]->getExprLoc()), 3538 Replacement.getAlignment()); 3539 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3540 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3541 [Replacement]() { return Replacement; }); 3542 } 3543 } 3544 (void)InRedScope.Privatize(); 3545 3546 Action.Enter(CGF); 3547 BodyGen(CGF); 3548 }; 3549 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3550 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3551 Data.NumberOfParts); 3552 OMPLexicalScope Scope(*this, S, llvm::None, 3553 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3554 !isOpenMPSimdDirective(S.getDirectiveKind())); 3555 TaskGen(*this, OutlinedFn, Data); 3556 } 3557 3558 static ImplicitParamDecl * 3559 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3560 QualType Ty, CapturedDecl *CD, 3561 SourceLocation Loc) { 3562 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3563 ImplicitParamDecl::Other); 3564 auto *OrigRef = DeclRefExpr::Create( 3565 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3566 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3567 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3568 ImplicitParamDecl::Other); 3569 auto *PrivateRef = DeclRefExpr::Create( 3570 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3571 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3572 QualType ElemType = C.getBaseElementType(Ty); 3573 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3574 ImplicitParamDecl::Other); 3575 auto *InitRef = DeclRefExpr::Create( 3576 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3577 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3578 PrivateVD->setInitStyle(VarDecl::CInit); 3579 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3580 InitRef, /*BasePath=*/nullptr, 3581 VK_RValue)); 3582 Data.FirstprivateVars.emplace_back(OrigRef); 3583 Data.FirstprivateCopies.emplace_back(PrivateRef); 3584 Data.FirstprivateInits.emplace_back(InitRef); 3585 return OrigVD; 3586 } 3587 3588 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3589 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3590 OMPTargetDataInfo &InputInfo) { 3591 // Emit outlined function for task construct. 3592 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3593 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3594 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3595 auto I = CS->getCapturedDecl()->param_begin(); 3596 auto PartId = std::next(I); 3597 auto TaskT = std::next(I, 4); 3598 OMPTaskDataTy Data; 3599 // The task is not final. 3600 Data.Final.setInt(/*IntVal=*/false); 3601 // Get list of firstprivate variables. 3602 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3603 auto IRef = C->varlist_begin(); 3604 auto IElemInitRef = C->inits().begin(); 3605 for (auto *IInit : C->private_copies()) { 3606 Data.FirstprivateVars.push_back(*IRef); 3607 Data.FirstprivateCopies.push_back(IInit); 3608 Data.FirstprivateInits.push_back(*IElemInitRef); 3609 ++IRef; 3610 ++IElemInitRef; 3611 } 3612 } 3613 OMPPrivateScope TargetScope(*this); 3614 VarDecl *BPVD = nullptr; 3615 VarDecl *PVD = nullptr; 3616 VarDecl *SVD = nullptr; 3617 if (InputInfo.NumberOfTargetItems > 0) { 3618 auto *CD = CapturedDecl::Create( 3619 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 3620 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 3621 QualType BaseAndPointersType = getContext().getConstantArrayType( 3622 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 3623 /*IndexTypeQuals=*/0); 3624 BPVD = createImplicitFirstprivateForType( 3625 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3626 PVD = createImplicitFirstprivateForType( 3627 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 3628 QualType SizesType = getContext().getConstantArrayType( 3629 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 3630 ArrSize, nullptr, ArrayType::Normal, 3631 /*IndexTypeQuals=*/0); 3632 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 3633 S.getBeginLoc()); 3634 TargetScope.addPrivate( 3635 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 3636 TargetScope.addPrivate(PVD, 3637 [&InputInfo]() { return InputInfo.PointersArray; }); 3638 TargetScope.addPrivate(SVD, 3639 [&InputInfo]() { return InputInfo.SizesArray; }); 3640 } 3641 (void)TargetScope.Privatize(); 3642 // Build list of dependences. 3643 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 3644 for (const Expr *IRef : C->varlists()) 3645 Data.Dependences.emplace_back(C->getDependencyKind(), IRef); 3646 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 3647 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 3648 // Set proper addresses for generated private copies. 3649 OMPPrivateScope Scope(CGF); 3650 if (!Data.FirstprivateVars.empty()) { 3651 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3652 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3653 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3654 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3655 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3656 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3657 CS->getCapturedDecl()->getParam(PrivatesParam))); 3658 // Map privates. 3659 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3660 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3661 CallArgs.push_back(PrivatesPtr); 3662 for (const Expr *E : Data.FirstprivateVars) { 3663 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3664 Address PrivatePtr = 3665 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3666 ".firstpriv.ptr.addr"); 3667 PrivatePtrs.emplace_back(VD, PrivatePtr); 3668 CallArgs.push_back(PrivatePtr.getPointer()); 3669 } 3670 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3671 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3672 for (const auto &Pair : PrivatePtrs) { 3673 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3674 CGF.getContext().getDeclAlign(Pair.first)); 3675 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3676 } 3677 } 3678 // Privatize all private variables except for in_reduction items. 3679 (void)Scope.Privatize(); 3680 if (InputInfo.NumberOfTargetItems > 0) { 3681 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 3682 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 3683 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 3684 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 3685 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 3686 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 3687 } 3688 3689 Action.Enter(CGF); 3690 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 3691 BodyGen(CGF); 3692 }; 3693 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3694 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 3695 Data.NumberOfParts); 3696 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 3697 IntegerLiteral IfCond(getContext(), TrueOrFalse, 3698 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 3699 SourceLocation()); 3700 3701 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 3702 SharedsTy, CapturedStruct, &IfCond, Data); 3703 } 3704 3705 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 3706 // Emit outlined function for task construct. 3707 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3708 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3709 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3710 const Expr *IfCond = nullptr; 3711 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 3712 if (C->getNameModifier() == OMPD_unknown || 3713 C->getNameModifier() == OMPD_task) { 3714 IfCond = C->getCondition(); 3715 break; 3716 } 3717 } 3718 3719 OMPTaskDataTy Data; 3720 // Check if we should emit tied or untied task. 3721 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 3722 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 3723 CGF.EmitStmt(CS->getCapturedStmt()); 3724 }; 3725 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 3726 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 3727 const OMPTaskDataTy &Data) { 3728 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 3729 SharedsTy, CapturedStruct, IfCond, 3730 Data); 3731 }; 3732 auto LPCRegion = 3733 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3734 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 3735 } 3736 3737 void CodeGenFunction::EmitOMPTaskyieldDirective( 3738 const OMPTaskyieldDirective &S) { 3739 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 3740 } 3741 3742 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 3743 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 3744 } 3745 3746 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 3747 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 3748 } 3749 3750 void CodeGenFunction::EmitOMPTaskgroupDirective( 3751 const OMPTaskgroupDirective &S) { 3752 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3753 Action.Enter(CGF); 3754 if (const Expr *E = S.getReductionRef()) { 3755 SmallVector<const Expr *, 4> LHSs; 3756 SmallVector<const Expr *, 4> RHSs; 3757 OMPTaskDataTy Data; 3758 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 3759 auto IPriv = C->privates().begin(); 3760 auto IRed = C->reduction_ops().begin(); 3761 auto ILHS = C->lhs_exprs().begin(); 3762 auto IRHS = C->rhs_exprs().begin(); 3763 for (const Expr *Ref : C->varlists()) { 3764 Data.ReductionVars.emplace_back(Ref); 3765 Data.ReductionCopies.emplace_back(*IPriv); 3766 Data.ReductionOps.emplace_back(*IRed); 3767 LHSs.emplace_back(*ILHS); 3768 RHSs.emplace_back(*IRHS); 3769 std::advance(IPriv, 1); 3770 std::advance(IRed, 1); 3771 std::advance(ILHS, 1); 3772 std::advance(IRHS, 1); 3773 } 3774 } 3775 llvm::Value *ReductionDesc = 3776 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 3777 LHSs, RHSs, Data); 3778 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3779 CGF.EmitVarDecl(*VD); 3780 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 3781 /*Volatile=*/false, E->getType()); 3782 } 3783 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3784 }; 3785 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3786 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 3787 } 3788 3789 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 3790 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 3791 ? llvm::AtomicOrdering::NotAtomic 3792 : llvm::AtomicOrdering::AcquireRelease; 3793 CGM.getOpenMPRuntime().emitFlush( 3794 *this, 3795 [&S]() -> ArrayRef<const Expr *> { 3796 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 3797 return llvm::makeArrayRef(FlushClause->varlist_begin(), 3798 FlushClause->varlist_end()); 3799 return llvm::None; 3800 }(), 3801 S.getBeginLoc(), AO); 3802 } 3803 3804 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 3805 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 3806 LValue DOLVal = EmitLValue(DO->getDepobj()); 3807 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 3808 SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> 3809 Dependencies; 3810 for (const Expr *IRef : DC->varlists()) 3811 Dependencies.emplace_back(DC->getDependencyKind(), IRef); 3812 Address DepAddr = CGM.getOpenMPRuntime().emitDependClause( 3813 *this, Dependencies, /*ForDepobj=*/true, DC->getBeginLoc()).second; 3814 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 3815 return; 3816 } 3817 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 3818 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 3819 return; 3820 } 3821 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 3822 CGM.getOpenMPRuntime().emitUpdateClause( 3823 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 3824 return; 3825 } 3826 } 3827 3828 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 3829 const CodeGenLoopTy &CodeGenLoop, 3830 Expr *IncExpr) { 3831 // Emit the loop iteration variable. 3832 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3833 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3834 EmitVarDecl(*IVDecl); 3835 3836 // Emit the iterations count variable. 3837 // If it is not a variable, Sema decided to calculate iterations count on each 3838 // iteration (e.g., it is foldable into a constant). 3839 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3840 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3841 // Emit calculation of the iterations count. 3842 EmitIgnoredExpr(S.getCalcLastIteration()); 3843 } 3844 3845 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3846 3847 bool HasLastprivateClause = false; 3848 // Check pre-condition. 3849 { 3850 OMPLoopScope PreInitScope(*this, S); 3851 // Skip the entire loop if we don't meet the precondition. 3852 // If the condition constant folds and can be elided, avoid emitting the 3853 // whole loop. 3854 bool CondConstant; 3855 llvm::BasicBlock *ContBlock = nullptr; 3856 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3857 if (!CondConstant) 3858 return; 3859 } else { 3860 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3861 ContBlock = createBasicBlock("omp.precond.end"); 3862 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3863 getProfileCount(&S)); 3864 EmitBlock(ThenBlock); 3865 incrementProfileCounter(&S); 3866 } 3867 3868 emitAlignedClause(*this, S); 3869 // Emit 'then' code. 3870 { 3871 // Emit helper vars inits. 3872 3873 LValue LB = EmitOMPHelperVar( 3874 *this, cast<DeclRefExpr>( 3875 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3876 ? S.getCombinedLowerBoundVariable() 3877 : S.getLowerBoundVariable()))); 3878 LValue UB = EmitOMPHelperVar( 3879 *this, cast<DeclRefExpr>( 3880 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3881 ? S.getCombinedUpperBoundVariable() 3882 : S.getUpperBoundVariable()))); 3883 LValue ST = 3884 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3885 LValue IL = 3886 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3887 3888 OMPPrivateScope LoopScope(*this); 3889 if (EmitOMPFirstprivateClause(S, LoopScope)) { 3890 // Emit implicit barrier to synchronize threads and avoid data races 3891 // on initialization of firstprivate variables and post-update of 3892 // lastprivate variables. 3893 CGM.getOpenMPRuntime().emitBarrierCall( 3894 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3895 /*ForceSimpleCall=*/true); 3896 } 3897 EmitOMPPrivateClause(S, LoopScope); 3898 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 3899 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3900 !isOpenMPTeamsDirective(S.getDirectiveKind())) 3901 EmitOMPReductionClauseInit(S, LoopScope); 3902 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3903 EmitOMPPrivateLoopCounters(S, LoopScope); 3904 (void)LoopScope.Privatize(); 3905 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3906 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3907 3908 // Detect the distribute schedule kind and chunk. 3909 llvm::Value *Chunk = nullptr; 3910 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 3911 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 3912 ScheduleKind = C->getDistScheduleKind(); 3913 if (const Expr *Ch = C->getChunkSize()) { 3914 Chunk = EmitScalarExpr(Ch); 3915 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 3916 S.getIterationVariable()->getType(), 3917 S.getBeginLoc()); 3918 } 3919 } else { 3920 // Default behaviour for dist_schedule clause. 3921 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 3922 *this, S, ScheduleKind, Chunk); 3923 } 3924 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3925 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3926 3927 // OpenMP [2.10.8, distribute Construct, Description] 3928 // If dist_schedule is specified, kind must be static. If specified, 3929 // iterations are divided into chunks of size chunk_size, chunks are 3930 // assigned to the teams of the league in a round-robin fashion in the 3931 // order of the team number. When no chunk_size is specified, the 3932 // iteration space is divided into chunks that are approximately equal 3933 // in size, and at most one chunk is distributed to each team of the 3934 // league. The size of the chunks is unspecified in this case. 3935 bool StaticChunked = RT.isStaticChunked( 3936 ScheduleKind, /* Chunked */ Chunk != nullptr) && 3937 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3938 if (RT.isStaticNonchunked(ScheduleKind, 3939 /* Chunked */ Chunk != nullptr) || 3940 StaticChunked) { 3941 CGOpenMPRuntime::StaticRTInput StaticInit( 3942 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 3943 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3944 StaticChunked ? Chunk : nullptr); 3945 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 3946 StaticInit); 3947 JumpDest LoopExit = 3948 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3949 // UB = min(UB, GlobalUB); 3950 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3951 ? S.getCombinedEnsureUpperBound() 3952 : S.getEnsureUpperBound()); 3953 // IV = LB; 3954 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3955 ? S.getCombinedInit() 3956 : S.getInit()); 3957 3958 const Expr *Cond = 3959 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3960 ? S.getCombinedCond() 3961 : S.getCond(); 3962 3963 if (StaticChunked) 3964 Cond = S.getCombinedDistCond(); 3965 3966 // For static unchunked schedules generate: 3967 // 3968 // 1. For distribute alone, codegen 3969 // while (idx <= UB) { 3970 // BODY; 3971 // ++idx; 3972 // } 3973 // 3974 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 3975 // while (idx <= UB) { 3976 // <CodeGen rest of pragma>(LB, UB); 3977 // idx += ST; 3978 // } 3979 // 3980 // For static chunk one schedule generate: 3981 // 3982 // while (IV <= GlobalUB) { 3983 // <CodeGen rest of pragma>(LB, UB); 3984 // LB += ST; 3985 // UB += ST; 3986 // UB = min(UB, GlobalUB); 3987 // IV = LB; 3988 // } 3989 // 3990 emitCommonSimdLoop( 3991 *this, S, 3992 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3993 if (isOpenMPSimdDirective(S.getDirectiveKind())) 3994 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 3995 }, 3996 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 3997 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 3998 CGF.EmitOMPInnerLoop( 3999 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4000 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4001 CodeGenLoop(CGF, S, LoopExit); 4002 }, 4003 [&S, StaticChunked](CodeGenFunction &CGF) { 4004 if (StaticChunked) { 4005 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4006 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4007 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4008 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4009 } 4010 }); 4011 }); 4012 EmitBlock(LoopExit.getBlock()); 4013 // Tell the runtime we are done. 4014 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4015 } else { 4016 // Emit the outer loop, which requests its work chunk [LB..UB] from 4017 // runtime and runs the inner loop to process it. 4018 const OMPLoopArguments LoopArguments = { 4019 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4020 IL.getAddress(*this), Chunk}; 4021 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4022 CodeGenLoop); 4023 } 4024 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4025 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4026 return CGF.Builder.CreateIsNotNull( 4027 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4028 }); 4029 } 4030 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4031 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4032 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4033 EmitOMPReductionClauseFinal(S, OMPD_simd); 4034 // Emit post-update of the reduction variables if IsLastIter != 0. 4035 emitPostUpdateForReductionClause( 4036 *this, S, [IL, &S](CodeGenFunction &CGF) { 4037 return CGF.Builder.CreateIsNotNull( 4038 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4039 }); 4040 } 4041 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4042 if (HasLastprivateClause) { 4043 EmitOMPLastprivateClauseFinal( 4044 S, /*NoFinals=*/false, 4045 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4046 } 4047 } 4048 4049 // We're now done with the loop, so jump to the continuation block. 4050 if (ContBlock) { 4051 EmitBranch(ContBlock); 4052 EmitBlock(ContBlock, true); 4053 } 4054 } 4055 } 4056 4057 void CodeGenFunction::EmitOMPDistributeDirective( 4058 const OMPDistributeDirective &S) { 4059 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4060 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4061 }; 4062 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4063 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4064 } 4065 4066 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4067 const CapturedStmt *S, 4068 SourceLocation Loc) { 4069 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4070 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4071 CGF.CapturedStmtInfo = &CapStmtInfo; 4072 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4073 Fn->setDoesNotRecurse(); 4074 return Fn; 4075 } 4076 4077 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4078 if (S.hasClausesOfKind<OMPDependClause>()) { 4079 assert(!S.getAssociatedStmt() && 4080 "No associated statement must be in ordered depend construct."); 4081 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4082 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4083 return; 4084 } 4085 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4086 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4087 PrePostActionTy &Action) { 4088 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4089 if (C) { 4090 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4091 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4092 llvm::Function *OutlinedFn = 4093 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4094 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4095 OutlinedFn, CapturedVars); 4096 } else { 4097 Action.Enter(CGF); 4098 CGF.EmitStmt(CS->getCapturedStmt()); 4099 } 4100 }; 4101 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4102 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4103 } 4104 4105 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4106 QualType SrcType, QualType DestType, 4107 SourceLocation Loc) { 4108 assert(CGF.hasScalarEvaluationKind(DestType) && 4109 "DestType must have scalar evaluation kind."); 4110 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4111 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4112 DestType, Loc) 4113 : CGF.EmitComplexToScalarConversion( 4114 Val.getComplexVal(), SrcType, DestType, Loc); 4115 } 4116 4117 static CodeGenFunction::ComplexPairTy 4118 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4119 QualType DestType, SourceLocation Loc) { 4120 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4121 "DestType must have complex evaluation kind."); 4122 CodeGenFunction::ComplexPairTy ComplexVal; 4123 if (Val.isScalar()) { 4124 // Convert the input element to the element type of the complex. 4125 QualType DestElementType = 4126 DestType->castAs<ComplexType>()->getElementType(); 4127 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4128 Val.getScalarVal(), SrcType, DestElementType, Loc); 4129 ComplexVal = CodeGenFunction::ComplexPairTy( 4130 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4131 } else { 4132 assert(Val.isComplex() && "Must be a scalar or complex."); 4133 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4134 QualType DestElementType = 4135 DestType->castAs<ComplexType>()->getElementType(); 4136 ComplexVal.first = CGF.EmitScalarConversion( 4137 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4138 ComplexVal.second = CGF.EmitScalarConversion( 4139 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4140 } 4141 return ComplexVal; 4142 } 4143 4144 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4145 LValue LVal, RValue RVal) { 4146 if (LVal.isGlobalReg()) 4147 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4148 else 4149 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4150 } 4151 4152 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4153 llvm::AtomicOrdering AO, LValue LVal, 4154 SourceLocation Loc) { 4155 if (LVal.isGlobalReg()) 4156 return CGF.EmitLoadOfLValue(LVal, Loc); 4157 return CGF.EmitAtomicLoad( 4158 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4159 LVal.isVolatile()); 4160 } 4161 4162 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4163 QualType RValTy, SourceLocation Loc) { 4164 switch (getEvaluationKind(LVal.getType())) { 4165 case TEK_Scalar: 4166 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4167 *this, RVal, RValTy, LVal.getType(), Loc)), 4168 LVal); 4169 break; 4170 case TEK_Complex: 4171 EmitStoreOfComplex( 4172 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4173 /*isInit=*/false); 4174 break; 4175 case TEK_Aggregate: 4176 llvm_unreachable("Must be a scalar or complex."); 4177 } 4178 } 4179 4180 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4181 const Expr *X, const Expr *V, 4182 SourceLocation Loc) { 4183 // v = x; 4184 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4185 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4186 LValue XLValue = CGF.EmitLValue(X); 4187 LValue VLValue = CGF.EmitLValue(V); 4188 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4189 // OpenMP, 2.17.7, atomic Construct 4190 // If the read or capture clause is specified and the acquire, acq_rel, or 4191 // seq_cst clause is specified then the strong flush on exit from the atomic 4192 // operation is also an acquire flush. 4193 switch (AO) { 4194 case llvm::AtomicOrdering::Acquire: 4195 case llvm::AtomicOrdering::AcquireRelease: 4196 case llvm::AtomicOrdering::SequentiallyConsistent: 4197 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4198 llvm::AtomicOrdering::Acquire); 4199 break; 4200 case llvm::AtomicOrdering::Monotonic: 4201 case llvm::AtomicOrdering::Release: 4202 break; 4203 case llvm::AtomicOrdering::NotAtomic: 4204 case llvm::AtomicOrdering::Unordered: 4205 llvm_unreachable("Unexpected ordering."); 4206 } 4207 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4208 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4209 } 4210 4211 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4212 llvm::AtomicOrdering AO, const Expr *X, 4213 const Expr *E, SourceLocation Loc) { 4214 // x = expr; 4215 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4216 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4217 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4218 // OpenMP, 2.17.7, atomic Construct 4219 // If the write, update, or capture clause is specified and the release, 4220 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4221 // the atomic operation is also a release flush. 4222 switch (AO) { 4223 case llvm::AtomicOrdering::Release: 4224 case llvm::AtomicOrdering::AcquireRelease: 4225 case llvm::AtomicOrdering::SequentiallyConsistent: 4226 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4227 llvm::AtomicOrdering::Release); 4228 break; 4229 case llvm::AtomicOrdering::Acquire: 4230 case llvm::AtomicOrdering::Monotonic: 4231 break; 4232 case llvm::AtomicOrdering::NotAtomic: 4233 case llvm::AtomicOrdering::Unordered: 4234 llvm_unreachable("Unexpected ordering."); 4235 } 4236 } 4237 4238 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4239 RValue Update, 4240 BinaryOperatorKind BO, 4241 llvm::AtomicOrdering AO, 4242 bool IsXLHSInRHSPart) { 4243 ASTContext &Context = CGF.getContext(); 4244 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4245 // expression is simple and atomic is allowed for the given type for the 4246 // target platform. 4247 if (BO == BO_Comma || !Update.isScalar() || 4248 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4249 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4250 (Update.getScalarVal()->getType() != 4251 X.getAddress(CGF).getElementType())) || 4252 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4253 !Context.getTargetInfo().hasBuiltinAtomic( 4254 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4255 return std::make_pair(false, RValue::get(nullptr)); 4256 4257 llvm::AtomicRMWInst::BinOp RMWOp; 4258 switch (BO) { 4259 case BO_Add: 4260 RMWOp = llvm::AtomicRMWInst::Add; 4261 break; 4262 case BO_Sub: 4263 if (!IsXLHSInRHSPart) 4264 return std::make_pair(false, RValue::get(nullptr)); 4265 RMWOp = llvm::AtomicRMWInst::Sub; 4266 break; 4267 case BO_And: 4268 RMWOp = llvm::AtomicRMWInst::And; 4269 break; 4270 case BO_Or: 4271 RMWOp = llvm::AtomicRMWInst::Or; 4272 break; 4273 case BO_Xor: 4274 RMWOp = llvm::AtomicRMWInst::Xor; 4275 break; 4276 case BO_LT: 4277 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4278 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4279 : llvm::AtomicRMWInst::Max) 4280 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4281 : llvm::AtomicRMWInst::UMax); 4282 break; 4283 case BO_GT: 4284 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4285 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4286 : llvm::AtomicRMWInst::Min) 4287 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4288 : llvm::AtomicRMWInst::UMin); 4289 break; 4290 case BO_Assign: 4291 RMWOp = llvm::AtomicRMWInst::Xchg; 4292 break; 4293 case BO_Mul: 4294 case BO_Div: 4295 case BO_Rem: 4296 case BO_Shl: 4297 case BO_Shr: 4298 case BO_LAnd: 4299 case BO_LOr: 4300 return std::make_pair(false, RValue::get(nullptr)); 4301 case BO_PtrMemD: 4302 case BO_PtrMemI: 4303 case BO_LE: 4304 case BO_GE: 4305 case BO_EQ: 4306 case BO_NE: 4307 case BO_Cmp: 4308 case BO_AddAssign: 4309 case BO_SubAssign: 4310 case BO_AndAssign: 4311 case BO_OrAssign: 4312 case BO_XorAssign: 4313 case BO_MulAssign: 4314 case BO_DivAssign: 4315 case BO_RemAssign: 4316 case BO_ShlAssign: 4317 case BO_ShrAssign: 4318 case BO_Comma: 4319 llvm_unreachable("Unsupported atomic update operation"); 4320 } 4321 llvm::Value *UpdateVal = Update.getScalarVal(); 4322 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4323 UpdateVal = CGF.Builder.CreateIntCast( 4324 IC, X.getAddress(CGF).getElementType(), 4325 X.getType()->hasSignedIntegerRepresentation()); 4326 } 4327 llvm::Value *Res = 4328 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4329 return std::make_pair(true, RValue::get(Res)); 4330 } 4331 4332 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4333 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4334 llvm::AtomicOrdering AO, SourceLocation Loc, 4335 const llvm::function_ref<RValue(RValue)> CommonGen) { 4336 // Update expressions are allowed to have the following forms: 4337 // x binop= expr; -> xrval + expr; 4338 // x++, ++x -> xrval + 1; 4339 // x--, --x -> xrval - 1; 4340 // x = x binop expr; -> xrval binop expr 4341 // x = expr Op x; - > expr binop xrval; 4342 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4343 if (!Res.first) { 4344 if (X.isGlobalReg()) { 4345 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4346 // 'xrval'. 4347 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4348 } else { 4349 // Perform compare-and-swap procedure. 4350 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4351 } 4352 } 4353 return Res; 4354 } 4355 4356 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4357 llvm::AtomicOrdering AO, const Expr *X, 4358 const Expr *E, const Expr *UE, 4359 bool IsXLHSInRHSPart, SourceLocation Loc) { 4360 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4361 "Update expr in 'atomic update' must be a binary operator."); 4362 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4363 // Update expressions are allowed to have the following forms: 4364 // x binop= expr; -> xrval + expr; 4365 // x++, ++x -> xrval + 1; 4366 // x--, --x -> xrval - 1; 4367 // x = x binop expr; -> xrval binop expr 4368 // x = expr Op x; - > expr binop xrval; 4369 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4370 LValue XLValue = CGF.EmitLValue(X); 4371 RValue ExprRValue = CGF.EmitAnyExpr(E); 4372 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4373 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4374 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4375 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4376 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4377 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4378 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4379 return CGF.EmitAnyExpr(UE); 4380 }; 4381 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4382 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4383 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4384 // OpenMP, 2.17.7, atomic Construct 4385 // If the write, update, or capture clause is specified and the release, 4386 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4387 // the atomic operation is also a release flush. 4388 switch (AO) { 4389 case llvm::AtomicOrdering::Release: 4390 case llvm::AtomicOrdering::AcquireRelease: 4391 case llvm::AtomicOrdering::SequentiallyConsistent: 4392 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4393 llvm::AtomicOrdering::Release); 4394 break; 4395 case llvm::AtomicOrdering::Acquire: 4396 case llvm::AtomicOrdering::Monotonic: 4397 break; 4398 case llvm::AtomicOrdering::NotAtomic: 4399 case llvm::AtomicOrdering::Unordered: 4400 llvm_unreachable("Unexpected ordering."); 4401 } 4402 } 4403 4404 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 4405 QualType SourceType, QualType ResType, 4406 SourceLocation Loc) { 4407 switch (CGF.getEvaluationKind(ResType)) { 4408 case TEK_Scalar: 4409 return RValue::get( 4410 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 4411 case TEK_Complex: { 4412 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 4413 return RValue::getComplex(Res.first, Res.second); 4414 } 4415 case TEK_Aggregate: 4416 break; 4417 } 4418 llvm_unreachable("Must be a scalar or complex."); 4419 } 4420 4421 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 4422 llvm::AtomicOrdering AO, 4423 bool IsPostfixUpdate, const Expr *V, 4424 const Expr *X, const Expr *E, 4425 const Expr *UE, bool IsXLHSInRHSPart, 4426 SourceLocation Loc) { 4427 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 4428 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 4429 RValue NewVVal; 4430 LValue VLValue = CGF.EmitLValue(V); 4431 LValue XLValue = CGF.EmitLValue(X); 4432 RValue ExprRValue = CGF.EmitAnyExpr(E); 4433 QualType NewVValType; 4434 if (UE) { 4435 // 'x' is updated with some additional value. 4436 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4437 "Update expr in 'atomic capture' must be a binary operator."); 4438 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4439 // Update expressions are allowed to have the following forms: 4440 // x binop= expr; -> xrval + expr; 4441 // x++, ++x -> xrval + 1; 4442 // x--, --x -> xrval - 1; 4443 // x = x binop expr; -> xrval binop expr 4444 // x = expr Op x; - > expr binop xrval; 4445 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4446 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4447 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4448 NewVValType = XRValExpr->getType(); 4449 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4450 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 4451 IsPostfixUpdate](RValue XRValue) { 4452 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4453 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4454 RValue Res = CGF.EmitAnyExpr(UE); 4455 NewVVal = IsPostfixUpdate ? XRValue : Res; 4456 return Res; 4457 }; 4458 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4459 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4460 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4461 if (Res.first) { 4462 // 'atomicrmw' instruction was generated. 4463 if (IsPostfixUpdate) { 4464 // Use old value from 'atomicrmw'. 4465 NewVVal = Res.second; 4466 } else { 4467 // 'atomicrmw' does not provide new value, so evaluate it using old 4468 // value of 'x'. 4469 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4470 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 4471 NewVVal = CGF.EmitAnyExpr(UE); 4472 } 4473 } 4474 } else { 4475 // 'x' is simply rewritten with some 'expr'. 4476 NewVValType = X->getType().getNonReferenceType(); 4477 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 4478 X->getType().getNonReferenceType(), Loc); 4479 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 4480 NewVVal = XRValue; 4481 return ExprRValue; 4482 }; 4483 // Try to perform atomicrmw xchg, otherwise simple exchange. 4484 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 4485 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 4486 Loc, Gen); 4487 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4488 if (Res.first) { 4489 // 'atomicrmw' instruction was generated. 4490 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 4491 } 4492 } 4493 // Emit post-update store to 'v' of old/new 'x' value. 4494 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 4495 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4496 // OpenMP, 2.17.7, atomic Construct 4497 // If the write, update, or capture clause is specified and the release, 4498 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4499 // the atomic operation is also a release flush. 4500 // If the read or capture clause is specified and the acquire, acq_rel, or 4501 // seq_cst clause is specified then the strong flush on exit from the atomic 4502 // operation is also an acquire flush. 4503 switch (AO) { 4504 case llvm::AtomicOrdering::Release: 4505 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4506 llvm::AtomicOrdering::Release); 4507 break; 4508 case llvm::AtomicOrdering::Acquire: 4509 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4510 llvm::AtomicOrdering::Acquire); 4511 break; 4512 case llvm::AtomicOrdering::AcquireRelease: 4513 case llvm::AtomicOrdering::SequentiallyConsistent: 4514 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4515 llvm::AtomicOrdering::AcquireRelease); 4516 break; 4517 case llvm::AtomicOrdering::Monotonic: 4518 break; 4519 case llvm::AtomicOrdering::NotAtomic: 4520 case llvm::AtomicOrdering::Unordered: 4521 llvm_unreachable("Unexpected ordering."); 4522 } 4523 } 4524 4525 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 4526 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 4527 const Expr *X, const Expr *V, const Expr *E, 4528 const Expr *UE, bool IsXLHSInRHSPart, 4529 SourceLocation Loc) { 4530 switch (Kind) { 4531 case OMPC_read: 4532 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 4533 break; 4534 case OMPC_write: 4535 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 4536 break; 4537 case OMPC_unknown: 4538 case OMPC_update: 4539 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 4540 break; 4541 case OMPC_capture: 4542 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 4543 IsXLHSInRHSPart, Loc); 4544 break; 4545 case OMPC_if: 4546 case OMPC_final: 4547 case OMPC_num_threads: 4548 case OMPC_private: 4549 case OMPC_firstprivate: 4550 case OMPC_lastprivate: 4551 case OMPC_reduction: 4552 case OMPC_task_reduction: 4553 case OMPC_in_reduction: 4554 case OMPC_safelen: 4555 case OMPC_simdlen: 4556 case OMPC_allocator: 4557 case OMPC_allocate: 4558 case OMPC_collapse: 4559 case OMPC_default: 4560 case OMPC_seq_cst: 4561 case OMPC_acq_rel: 4562 case OMPC_acquire: 4563 case OMPC_release: 4564 case OMPC_relaxed: 4565 case OMPC_shared: 4566 case OMPC_linear: 4567 case OMPC_aligned: 4568 case OMPC_copyin: 4569 case OMPC_copyprivate: 4570 case OMPC_flush: 4571 case OMPC_depobj: 4572 case OMPC_proc_bind: 4573 case OMPC_schedule: 4574 case OMPC_ordered: 4575 case OMPC_nowait: 4576 case OMPC_untied: 4577 case OMPC_threadprivate: 4578 case OMPC_depend: 4579 case OMPC_mergeable: 4580 case OMPC_device: 4581 case OMPC_threads: 4582 case OMPC_simd: 4583 case OMPC_map: 4584 case OMPC_num_teams: 4585 case OMPC_thread_limit: 4586 case OMPC_priority: 4587 case OMPC_grainsize: 4588 case OMPC_nogroup: 4589 case OMPC_num_tasks: 4590 case OMPC_hint: 4591 case OMPC_dist_schedule: 4592 case OMPC_defaultmap: 4593 case OMPC_uniform: 4594 case OMPC_to: 4595 case OMPC_from: 4596 case OMPC_use_device_ptr: 4597 case OMPC_is_device_ptr: 4598 case OMPC_unified_address: 4599 case OMPC_unified_shared_memory: 4600 case OMPC_reverse_offload: 4601 case OMPC_dynamic_allocators: 4602 case OMPC_atomic_default_mem_order: 4603 case OMPC_device_type: 4604 case OMPC_match: 4605 case OMPC_nontemporal: 4606 case OMPC_order: 4607 case OMPC_destroy: 4608 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 4609 } 4610 } 4611 4612 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 4613 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 4614 bool MemOrderingSpecified = false; 4615 if (S.getSingleClause<OMPSeqCstClause>()) { 4616 AO = llvm::AtomicOrdering::SequentiallyConsistent; 4617 MemOrderingSpecified = true; 4618 } else if (S.getSingleClause<OMPAcqRelClause>()) { 4619 AO = llvm::AtomicOrdering::AcquireRelease; 4620 MemOrderingSpecified = true; 4621 } else if (S.getSingleClause<OMPAcquireClause>()) { 4622 AO = llvm::AtomicOrdering::Acquire; 4623 MemOrderingSpecified = true; 4624 } else if (S.getSingleClause<OMPReleaseClause>()) { 4625 AO = llvm::AtomicOrdering::Release; 4626 MemOrderingSpecified = true; 4627 } else if (S.getSingleClause<OMPRelaxedClause>()) { 4628 AO = llvm::AtomicOrdering::Monotonic; 4629 MemOrderingSpecified = true; 4630 } 4631 OpenMPClauseKind Kind = OMPC_unknown; 4632 for (const OMPClause *C : S.clauses()) { 4633 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 4634 // if it is first). 4635 if (C->getClauseKind() != OMPC_seq_cst && 4636 C->getClauseKind() != OMPC_acq_rel && 4637 C->getClauseKind() != OMPC_acquire && 4638 C->getClauseKind() != OMPC_release && 4639 C->getClauseKind() != OMPC_relaxed) { 4640 Kind = C->getClauseKind(); 4641 break; 4642 } 4643 } 4644 if (!MemOrderingSpecified) { 4645 llvm::AtomicOrdering DefaultOrder = 4646 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 4647 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 4648 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 4649 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 4650 Kind == OMPC_capture)) { 4651 AO = DefaultOrder; 4652 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 4653 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 4654 AO = llvm::AtomicOrdering::Release; 4655 } else if (Kind == OMPC_read) { 4656 assert(Kind == OMPC_read && "Unexpected atomic kind."); 4657 AO = llvm::AtomicOrdering::Acquire; 4658 } 4659 } 4660 } 4661 4662 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 4663 if (const auto *FE = dyn_cast<FullExpr>(CS)) 4664 enterFullExpression(FE); 4665 // Processing for statements under 'atomic capture'. 4666 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 4667 for (const Stmt *C : Compound->body()) { 4668 if (const auto *FE = dyn_cast<FullExpr>(C)) 4669 enterFullExpression(FE); 4670 } 4671 } 4672 4673 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 4674 PrePostActionTy &) { 4675 CGF.EmitStopPoint(CS); 4676 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 4677 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 4678 S.getBeginLoc()); 4679 }; 4680 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4681 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 4682 } 4683 4684 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 4685 const OMPExecutableDirective &S, 4686 const RegionCodeGenTy &CodeGen) { 4687 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 4688 CodeGenModule &CGM = CGF.CGM; 4689 4690 // On device emit this construct as inlined code. 4691 if (CGM.getLangOpts().OpenMPIsDevice) { 4692 OMPLexicalScope Scope(CGF, S, OMPD_target); 4693 CGM.getOpenMPRuntime().emitInlinedDirective( 4694 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4695 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4696 }); 4697 return; 4698 } 4699 4700 auto LPCRegion = 4701 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 4702 llvm::Function *Fn = nullptr; 4703 llvm::Constant *FnID = nullptr; 4704 4705 const Expr *IfCond = nullptr; 4706 // Check for the at most one if clause associated with the target region. 4707 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4708 if (C->getNameModifier() == OMPD_unknown || 4709 C->getNameModifier() == OMPD_target) { 4710 IfCond = C->getCondition(); 4711 break; 4712 } 4713 } 4714 4715 // Check if we have any device clause associated with the directive. 4716 const Expr *Device = nullptr; 4717 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4718 Device = C->getDevice(); 4719 4720 // Check if we have an if clause whose conditional always evaluates to false 4721 // or if we do not have any targets specified. If so the target region is not 4722 // an offload entry point. 4723 bool IsOffloadEntry = true; 4724 if (IfCond) { 4725 bool Val; 4726 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 4727 IsOffloadEntry = false; 4728 } 4729 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4730 IsOffloadEntry = false; 4731 4732 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 4733 StringRef ParentName; 4734 // In case we have Ctors/Dtors we use the complete type variant to produce 4735 // the mangling of the device outlined kernel. 4736 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 4737 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 4738 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 4739 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 4740 else 4741 ParentName = 4742 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 4743 4744 // Emit target region as a standalone region. 4745 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 4746 IsOffloadEntry, CodeGen); 4747 OMPLexicalScope Scope(CGF, S, OMPD_task); 4748 auto &&SizeEmitter = 4749 [IsOffloadEntry](CodeGenFunction &CGF, 4750 const OMPLoopDirective &D) -> llvm::Value * { 4751 if (IsOffloadEntry) { 4752 OMPLoopScope(CGF, D); 4753 // Emit calculation of the iterations count. 4754 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 4755 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 4756 /*isSigned=*/false); 4757 return NumIterations; 4758 } 4759 return nullptr; 4760 }; 4761 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 4762 SizeEmitter); 4763 } 4764 4765 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 4766 PrePostActionTy &Action) { 4767 Action.Enter(CGF); 4768 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4769 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4770 CGF.EmitOMPPrivateClause(S, PrivateScope); 4771 (void)PrivateScope.Privatize(); 4772 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4773 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4774 4775 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 4776 } 4777 4778 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 4779 StringRef ParentName, 4780 const OMPTargetDirective &S) { 4781 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4782 emitTargetRegion(CGF, S, Action); 4783 }; 4784 llvm::Function *Fn; 4785 llvm::Constant *Addr; 4786 // Emit target region as a standalone region. 4787 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4788 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4789 assert(Fn && Addr && "Target device function emission failed."); 4790 } 4791 4792 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 4793 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4794 emitTargetRegion(CGF, S, Action); 4795 }; 4796 emitCommonOMPTargetDirective(*this, S, CodeGen); 4797 } 4798 4799 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 4800 const OMPExecutableDirective &S, 4801 OpenMPDirectiveKind InnermostKind, 4802 const RegionCodeGenTy &CodeGen) { 4803 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 4804 llvm::Function *OutlinedFn = 4805 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 4806 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 4807 4808 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 4809 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 4810 if (NT || TL) { 4811 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 4812 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 4813 4814 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 4815 S.getBeginLoc()); 4816 } 4817 4818 OMPTeamsScope Scope(CGF, S); 4819 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4820 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4821 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 4822 CapturedVars); 4823 } 4824 4825 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 4826 // Emit teams region as a standalone region. 4827 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4828 Action.Enter(CGF); 4829 OMPPrivateScope PrivateScope(CGF); 4830 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4831 CGF.EmitOMPPrivateClause(S, PrivateScope); 4832 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4833 (void)PrivateScope.Privatize(); 4834 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 4835 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4836 }; 4837 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 4838 emitPostUpdateForReductionClause(*this, S, 4839 [](CodeGenFunction &) { return nullptr; }); 4840 } 4841 4842 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4843 const OMPTargetTeamsDirective &S) { 4844 auto *CS = S.getCapturedStmt(OMPD_teams); 4845 Action.Enter(CGF); 4846 // Emit teams region as a standalone region. 4847 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 4848 Action.Enter(CGF); 4849 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4850 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4851 CGF.EmitOMPPrivateClause(S, PrivateScope); 4852 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4853 (void)PrivateScope.Privatize(); 4854 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4855 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 4856 CGF.EmitStmt(CS->getCapturedStmt()); 4857 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4858 }; 4859 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 4860 emitPostUpdateForReductionClause(CGF, S, 4861 [](CodeGenFunction &) { return nullptr; }); 4862 } 4863 4864 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 4865 CodeGenModule &CGM, StringRef ParentName, 4866 const OMPTargetTeamsDirective &S) { 4867 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4868 emitTargetTeamsRegion(CGF, Action, S); 4869 }; 4870 llvm::Function *Fn; 4871 llvm::Constant *Addr; 4872 // Emit target region as a standalone region. 4873 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4874 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4875 assert(Fn && Addr && "Target device function emission failed."); 4876 } 4877 4878 void CodeGenFunction::EmitOMPTargetTeamsDirective( 4879 const OMPTargetTeamsDirective &S) { 4880 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4881 emitTargetTeamsRegion(CGF, Action, S); 4882 }; 4883 emitCommonOMPTargetDirective(*this, S, CodeGen); 4884 } 4885 4886 static void 4887 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 4888 const OMPTargetTeamsDistributeDirective &S) { 4889 Action.Enter(CGF); 4890 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4891 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4892 }; 4893 4894 // Emit teams region as a standalone region. 4895 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4896 PrePostActionTy &Action) { 4897 Action.Enter(CGF); 4898 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4899 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4900 (void)PrivateScope.Privatize(); 4901 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4902 CodeGenDistribute); 4903 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4904 }; 4905 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 4906 emitPostUpdateForReductionClause(CGF, S, 4907 [](CodeGenFunction &) { return nullptr; }); 4908 } 4909 4910 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 4911 CodeGenModule &CGM, StringRef ParentName, 4912 const OMPTargetTeamsDistributeDirective &S) { 4913 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4914 emitTargetTeamsDistributeRegion(CGF, Action, S); 4915 }; 4916 llvm::Function *Fn; 4917 llvm::Constant *Addr; 4918 // Emit target region as a standalone region. 4919 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4920 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4921 assert(Fn && Addr && "Target device function emission failed."); 4922 } 4923 4924 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 4925 const OMPTargetTeamsDistributeDirective &S) { 4926 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4927 emitTargetTeamsDistributeRegion(CGF, Action, S); 4928 }; 4929 emitCommonOMPTargetDirective(*this, S, CodeGen); 4930 } 4931 4932 static void emitTargetTeamsDistributeSimdRegion( 4933 CodeGenFunction &CGF, PrePostActionTy &Action, 4934 const OMPTargetTeamsDistributeSimdDirective &S) { 4935 Action.Enter(CGF); 4936 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4937 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4938 }; 4939 4940 // Emit teams region as a standalone region. 4941 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4942 PrePostActionTy &Action) { 4943 Action.Enter(CGF); 4944 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4945 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4946 (void)PrivateScope.Privatize(); 4947 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4948 CodeGenDistribute); 4949 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4950 }; 4951 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 4952 emitPostUpdateForReductionClause(CGF, S, 4953 [](CodeGenFunction &) { return nullptr; }); 4954 } 4955 4956 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 4957 CodeGenModule &CGM, StringRef ParentName, 4958 const OMPTargetTeamsDistributeSimdDirective &S) { 4959 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4960 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4961 }; 4962 llvm::Function *Fn; 4963 llvm::Constant *Addr; 4964 // Emit target region as a standalone region. 4965 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4966 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4967 assert(Fn && Addr && "Target device function emission failed."); 4968 } 4969 4970 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 4971 const OMPTargetTeamsDistributeSimdDirective &S) { 4972 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4973 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 4974 }; 4975 emitCommonOMPTargetDirective(*this, S, CodeGen); 4976 } 4977 4978 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 4979 const OMPTeamsDistributeDirective &S) { 4980 4981 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4982 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4983 }; 4984 4985 // Emit teams region as a standalone region. 4986 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 4987 PrePostActionTy &Action) { 4988 Action.Enter(CGF); 4989 OMPPrivateScope PrivateScope(CGF); 4990 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4991 (void)PrivateScope.Privatize(); 4992 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 4993 CodeGenDistribute); 4994 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 4995 }; 4996 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 4997 emitPostUpdateForReductionClause(*this, S, 4998 [](CodeGenFunction &) { return nullptr; }); 4999 } 5000 5001 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5002 const OMPTeamsDistributeSimdDirective &S) { 5003 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5004 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5005 }; 5006 5007 // Emit teams region as a standalone region. 5008 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5009 PrePostActionTy &Action) { 5010 Action.Enter(CGF); 5011 OMPPrivateScope PrivateScope(CGF); 5012 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5013 (void)PrivateScope.Privatize(); 5014 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5015 CodeGenDistribute); 5016 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5017 }; 5018 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5019 emitPostUpdateForReductionClause(*this, S, 5020 [](CodeGenFunction &) { return nullptr; }); 5021 } 5022 5023 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5024 const OMPTeamsDistributeParallelForDirective &S) { 5025 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5026 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5027 S.getDistInc()); 5028 }; 5029 5030 // Emit teams region as a standalone region. 5031 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5032 PrePostActionTy &Action) { 5033 Action.Enter(CGF); 5034 OMPPrivateScope PrivateScope(CGF); 5035 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5036 (void)PrivateScope.Privatize(); 5037 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5038 CodeGenDistribute); 5039 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5040 }; 5041 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5042 emitPostUpdateForReductionClause(*this, S, 5043 [](CodeGenFunction &) { return nullptr; }); 5044 } 5045 5046 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5047 const OMPTeamsDistributeParallelForSimdDirective &S) { 5048 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5049 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5050 S.getDistInc()); 5051 }; 5052 5053 // Emit teams region as a standalone region. 5054 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5055 PrePostActionTy &Action) { 5056 Action.Enter(CGF); 5057 OMPPrivateScope PrivateScope(CGF); 5058 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5059 (void)PrivateScope.Privatize(); 5060 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5061 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5062 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5063 }; 5064 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5065 CodeGen); 5066 emitPostUpdateForReductionClause(*this, S, 5067 [](CodeGenFunction &) { return nullptr; }); 5068 } 5069 5070 static void emitTargetTeamsDistributeParallelForRegion( 5071 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5072 PrePostActionTy &Action) { 5073 Action.Enter(CGF); 5074 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5075 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5076 S.getDistInc()); 5077 }; 5078 5079 // Emit teams region as a standalone region. 5080 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5081 PrePostActionTy &Action) { 5082 Action.Enter(CGF); 5083 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5084 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5085 (void)PrivateScope.Privatize(); 5086 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5087 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5088 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5089 }; 5090 5091 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5092 CodeGenTeams); 5093 emitPostUpdateForReductionClause(CGF, S, 5094 [](CodeGenFunction &) { return nullptr; }); 5095 } 5096 5097 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5098 CodeGenModule &CGM, StringRef ParentName, 5099 const OMPTargetTeamsDistributeParallelForDirective &S) { 5100 // Emit SPMD target teams distribute parallel for region as a standalone 5101 // region. 5102 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5103 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5104 }; 5105 llvm::Function *Fn; 5106 llvm::Constant *Addr; 5107 // Emit target region as a standalone region. 5108 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5109 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5110 assert(Fn && Addr && "Target device function emission failed."); 5111 } 5112 5113 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5114 const OMPTargetTeamsDistributeParallelForDirective &S) { 5115 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5116 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5117 }; 5118 emitCommonOMPTargetDirective(*this, S, CodeGen); 5119 } 5120 5121 static void emitTargetTeamsDistributeParallelForSimdRegion( 5122 CodeGenFunction &CGF, 5123 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5124 PrePostActionTy &Action) { 5125 Action.Enter(CGF); 5126 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5127 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5128 S.getDistInc()); 5129 }; 5130 5131 // Emit teams region as a standalone region. 5132 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5133 PrePostActionTy &Action) { 5134 Action.Enter(CGF); 5135 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5136 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5137 (void)PrivateScope.Privatize(); 5138 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5139 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5140 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5141 }; 5142 5143 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5144 CodeGenTeams); 5145 emitPostUpdateForReductionClause(CGF, S, 5146 [](CodeGenFunction &) { return nullptr; }); 5147 } 5148 5149 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5150 CodeGenModule &CGM, StringRef ParentName, 5151 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5152 // Emit SPMD target teams distribute parallel for simd region as a standalone 5153 // region. 5154 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5155 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5156 }; 5157 llvm::Function *Fn; 5158 llvm::Constant *Addr; 5159 // Emit target region as a standalone region. 5160 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5161 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5162 assert(Fn && Addr && "Target device function emission failed."); 5163 } 5164 5165 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5166 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5167 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5168 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5169 }; 5170 emitCommonOMPTargetDirective(*this, S, CodeGen); 5171 } 5172 5173 void CodeGenFunction::EmitOMPCancellationPointDirective( 5174 const OMPCancellationPointDirective &S) { 5175 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5176 S.getCancelRegion()); 5177 } 5178 5179 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5180 const Expr *IfCond = nullptr; 5181 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5182 if (C->getNameModifier() == OMPD_unknown || 5183 C->getNameModifier() == OMPD_cancel) { 5184 IfCond = C->getCondition(); 5185 break; 5186 } 5187 } 5188 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5189 // TODO: This check is necessary as we only generate `omp parallel` through 5190 // the OpenMPIRBuilder for now. 5191 if (S.getCancelRegion() == OMPD_parallel) { 5192 llvm::Value *IfCondition = nullptr; 5193 if (IfCond) 5194 IfCondition = EmitScalarExpr(IfCond, 5195 /*IgnoreResultAssign=*/true); 5196 return Builder.restoreIP( 5197 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5198 } 5199 } 5200 5201 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5202 S.getCancelRegion()); 5203 } 5204 5205 CodeGenFunction::JumpDest 5206 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5207 if (Kind == OMPD_parallel || Kind == OMPD_task || 5208 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5209 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5210 return ReturnBlock; 5211 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5212 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5213 Kind == OMPD_distribute_parallel_for || 5214 Kind == OMPD_target_parallel_for || 5215 Kind == OMPD_teams_distribute_parallel_for || 5216 Kind == OMPD_target_teams_distribute_parallel_for); 5217 return OMPCancelStack.getExitBlock(); 5218 } 5219 5220 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5221 const OMPClause &NC, OMPPrivateScope &PrivateScope, 5222 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5223 const auto &C = cast<OMPUseDevicePtrClause>(NC); 5224 auto OrigVarIt = C.varlist_begin(); 5225 auto InitIt = C.inits().begin(); 5226 for (const Expr *PvtVarIt : C.private_copies()) { 5227 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5228 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5229 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5230 5231 // In order to identify the right initializer we need to match the 5232 // declaration used by the mapping logic. In some cases we may get 5233 // OMPCapturedExprDecl that refers to the original declaration. 5234 const ValueDecl *MatchingVD = OrigVD; 5235 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5236 // OMPCapturedExprDecl are used to privative fields of the current 5237 // structure. 5238 const auto *ME = cast<MemberExpr>(OED->getInit()); 5239 assert(isa<CXXThisExpr>(ME->getBase()) && 5240 "Base should be the current struct!"); 5241 MatchingVD = ME->getMemberDecl(); 5242 } 5243 5244 // If we don't have information about the current list item, move on to 5245 // the next one. 5246 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5247 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5248 continue; 5249 5250 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5251 InitAddrIt, InitVD, 5252 PvtVD]() { 5253 // Initialize the temporary initialization variable with the address we 5254 // get from the runtime library. We have to cast the source address 5255 // because it is always a void *. References are materialized in the 5256 // privatization scope, so the initialization here disregards the fact 5257 // the original variable is a reference. 5258 QualType AddrQTy = 5259 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5260 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5261 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5262 setAddrOfLocalVar(InitVD, InitAddr); 5263 5264 // Emit private declaration, it will be initialized by the value we 5265 // declaration we just added to the local declarations map. 5266 EmitDecl(*PvtVD); 5267 5268 // The initialization variables reached its purpose in the emission 5269 // of the previous declaration, so we don't need it anymore. 5270 LocalDeclMap.erase(InitVD); 5271 5272 // Return the address of the private variable. 5273 return GetAddrOfLocalVar(PvtVD); 5274 }); 5275 assert(IsRegistered && "firstprivate var already registered as private"); 5276 // Silence the warning about unused variable. 5277 (void)IsRegistered; 5278 5279 ++OrigVarIt; 5280 ++InitIt; 5281 } 5282 } 5283 5284 // Generate the instructions for '#pragma omp target data' directive. 5285 void CodeGenFunction::EmitOMPTargetDataDirective( 5286 const OMPTargetDataDirective &S) { 5287 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5288 5289 // Create a pre/post action to signal the privatization of the device pointer. 5290 // This action can be replaced by the OpenMP runtime code generation to 5291 // deactivate privatization. 5292 bool PrivatizeDevicePointers = false; 5293 class DevicePointerPrivActionTy : public PrePostActionTy { 5294 bool &PrivatizeDevicePointers; 5295 5296 public: 5297 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5298 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5299 void Enter(CodeGenFunction &CGF) override { 5300 PrivatizeDevicePointers = true; 5301 } 5302 }; 5303 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5304 5305 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5306 CodeGenFunction &CGF, PrePostActionTy &Action) { 5307 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5308 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5309 }; 5310 5311 // Codegen that selects whether to generate the privatization code or not. 5312 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5313 &InnermostCodeGen](CodeGenFunction &CGF, 5314 PrePostActionTy &Action) { 5315 RegionCodeGenTy RCG(InnermostCodeGen); 5316 PrivatizeDevicePointers = false; 5317 5318 // Call the pre-action to change the status of PrivatizeDevicePointers if 5319 // needed. 5320 Action.Enter(CGF); 5321 5322 if (PrivatizeDevicePointers) { 5323 OMPPrivateScope PrivateScope(CGF); 5324 // Emit all instances of the use_device_ptr clause. 5325 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5326 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5327 Info.CaptureDeviceAddrMap); 5328 (void)PrivateScope.Privatize(); 5329 RCG(CGF); 5330 } else { 5331 RCG(CGF); 5332 } 5333 }; 5334 5335 // Forward the provided action to the privatization codegen. 5336 RegionCodeGenTy PrivRCG(PrivCodeGen); 5337 PrivRCG.setAction(Action); 5338 5339 // Notwithstanding the body of the region is emitted as inlined directive, 5340 // we don't use an inline scope as changes in the references inside the 5341 // region are expected to be visible outside, so we do not privative them. 5342 OMPLexicalScope Scope(CGF, S); 5343 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5344 PrivRCG); 5345 }; 5346 5347 RegionCodeGenTy RCG(CodeGen); 5348 5349 // If we don't have target devices, don't bother emitting the data mapping 5350 // code. 5351 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 5352 RCG(*this); 5353 return; 5354 } 5355 5356 // Check if we have any if clause associated with the directive. 5357 const Expr *IfCond = nullptr; 5358 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5359 IfCond = C->getCondition(); 5360 5361 // Check if we have any device clause associated with the directive. 5362 const Expr *Device = nullptr; 5363 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5364 Device = C->getDevice(); 5365 5366 // Set the action to signal privatization of device pointers. 5367 RCG.setAction(PrivAction); 5368 5369 // Emit region code. 5370 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 5371 Info); 5372 } 5373 5374 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 5375 const OMPTargetEnterDataDirective &S) { 5376 // If we don't have target devices, don't bother emitting the data mapping 5377 // code. 5378 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5379 return; 5380 5381 // Check if we have any if clause associated with the directive. 5382 const Expr *IfCond = nullptr; 5383 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5384 IfCond = C->getCondition(); 5385 5386 // Check if we have any device clause associated with the directive. 5387 const Expr *Device = nullptr; 5388 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5389 Device = C->getDevice(); 5390 5391 OMPLexicalScope Scope(*this, S, OMPD_task); 5392 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5393 } 5394 5395 void CodeGenFunction::EmitOMPTargetExitDataDirective( 5396 const OMPTargetExitDataDirective &S) { 5397 // If we don't have target devices, don't bother emitting the data mapping 5398 // code. 5399 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5400 return; 5401 5402 // Check if we have any if clause associated with the directive. 5403 const Expr *IfCond = nullptr; 5404 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5405 IfCond = C->getCondition(); 5406 5407 // Check if we have any device clause associated with the directive. 5408 const Expr *Device = nullptr; 5409 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5410 Device = C->getDevice(); 5411 5412 OMPLexicalScope Scope(*this, S, OMPD_task); 5413 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5414 } 5415 5416 static void emitTargetParallelRegion(CodeGenFunction &CGF, 5417 const OMPTargetParallelDirective &S, 5418 PrePostActionTy &Action) { 5419 // Get the captured statement associated with the 'parallel' region. 5420 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 5421 Action.Enter(CGF); 5422 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5423 Action.Enter(CGF); 5424 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5425 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5426 CGF.EmitOMPPrivateClause(S, PrivateScope); 5427 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5428 (void)PrivateScope.Privatize(); 5429 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5430 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5431 // TODO: Add support for clauses. 5432 CGF.EmitStmt(CS->getCapturedStmt()); 5433 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 5434 }; 5435 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 5436 emitEmptyBoundParameters); 5437 emitPostUpdateForReductionClause(CGF, S, 5438 [](CodeGenFunction &) { return nullptr; }); 5439 } 5440 5441 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 5442 CodeGenModule &CGM, StringRef ParentName, 5443 const OMPTargetParallelDirective &S) { 5444 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5445 emitTargetParallelRegion(CGF, S, Action); 5446 }; 5447 llvm::Function *Fn; 5448 llvm::Constant *Addr; 5449 // Emit target region as a standalone region. 5450 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5451 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5452 assert(Fn && Addr && "Target device function emission failed."); 5453 } 5454 5455 void CodeGenFunction::EmitOMPTargetParallelDirective( 5456 const OMPTargetParallelDirective &S) { 5457 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5458 emitTargetParallelRegion(CGF, S, Action); 5459 }; 5460 emitCommonOMPTargetDirective(*this, S, CodeGen); 5461 } 5462 5463 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 5464 const OMPTargetParallelForDirective &S, 5465 PrePostActionTy &Action) { 5466 Action.Enter(CGF); 5467 // Emit directive as a combined directive that consists of two implicit 5468 // directives: 'parallel' with 'for' directive. 5469 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5470 Action.Enter(CGF); 5471 CodeGenFunction::OMPCancelStackRAII CancelRegion( 5472 CGF, OMPD_target_parallel_for, S.hasCancel()); 5473 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5474 emitDispatchForLoopBounds); 5475 }; 5476 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 5477 emitEmptyBoundParameters); 5478 } 5479 5480 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 5481 CodeGenModule &CGM, StringRef ParentName, 5482 const OMPTargetParallelForDirective &S) { 5483 // Emit SPMD target parallel for region as a standalone region. 5484 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5485 emitTargetParallelForRegion(CGF, S, Action); 5486 }; 5487 llvm::Function *Fn; 5488 llvm::Constant *Addr; 5489 // Emit target region as a standalone region. 5490 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5491 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5492 assert(Fn && Addr && "Target device function emission failed."); 5493 } 5494 5495 void CodeGenFunction::EmitOMPTargetParallelForDirective( 5496 const OMPTargetParallelForDirective &S) { 5497 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5498 emitTargetParallelForRegion(CGF, S, Action); 5499 }; 5500 emitCommonOMPTargetDirective(*this, S, CodeGen); 5501 } 5502 5503 static void 5504 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 5505 const OMPTargetParallelForSimdDirective &S, 5506 PrePostActionTy &Action) { 5507 Action.Enter(CGF); 5508 // Emit directive as a combined directive that consists of two implicit 5509 // directives: 'parallel' with 'for' directive. 5510 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5511 Action.Enter(CGF); 5512 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 5513 emitDispatchForLoopBounds); 5514 }; 5515 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 5516 emitEmptyBoundParameters); 5517 } 5518 5519 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 5520 CodeGenModule &CGM, StringRef ParentName, 5521 const OMPTargetParallelForSimdDirective &S) { 5522 // Emit SPMD target parallel for region as a standalone region. 5523 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5524 emitTargetParallelForSimdRegion(CGF, S, Action); 5525 }; 5526 llvm::Function *Fn; 5527 llvm::Constant *Addr; 5528 // Emit target region as a standalone region. 5529 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5530 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5531 assert(Fn && Addr && "Target device function emission failed."); 5532 } 5533 5534 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 5535 const OMPTargetParallelForSimdDirective &S) { 5536 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5537 emitTargetParallelForSimdRegion(CGF, S, Action); 5538 }; 5539 emitCommonOMPTargetDirective(*this, S, CodeGen); 5540 } 5541 5542 /// Emit a helper variable and return corresponding lvalue. 5543 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 5544 const ImplicitParamDecl *PVD, 5545 CodeGenFunction::OMPPrivateScope &Privates) { 5546 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 5547 Privates.addPrivate(VDecl, 5548 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 5549 } 5550 5551 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 5552 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 5553 // Emit outlined function for task construct. 5554 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 5555 Address CapturedStruct = Address::invalid(); 5556 { 5557 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 5558 CapturedStruct = GenerateCapturedStmtArgument(*CS); 5559 } 5560 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 5561 const Expr *IfCond = nullptr; 5562 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5563 if (C->getNameModifier() == OMPD_unknown || 5564 C->getNameModifier() == OMPD_taskloop) { 5565 IfCond = C->getCondition(); 5566 break; 5567 } 5568 } 5569 5570 OMPTaskDataTy Data; 5571 // Check if taskloop must be emitted without taskgroup. 5572 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 5573 // TODO: Check if we should emit tied or untied task. 5574 Data.Tied = true; 5575 // Set scheduling for taskloop 5576 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 5577 // grainsize clause 5578 Data.Schedule.setInt(/*IntVal=*/false); 5579 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 5580 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 5581 // num_tasks clause 5582 Data.Schedule.setInt(/*IntVal=*/true); 5583 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 5584 } 5585 5586 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 5587 // if (PreCond) { 5588 // for (IV in 0..LastIteration) BODY; 5589 // <Final counter/linear vars updates>; 5590 // } 5591 // 5592 5593 // Emit: if (PreCond) - begin. 5594 // If the condition constant folds and can be elided, avoid emitting the 5595 // whole loop. 5596 bool CondConstant; 5597 llvm::BasicBlock *ContBlock = nullptr; 5598 OMPLoopScope PreInitScope(CGF, S); 5599 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5600 if (!CondConstant) 5601 return; 5602 } else { 5603 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 5604 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 5605 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 5606 CGF.getProfileCount(&S)); 5607 CGF.EmitBlock(ThenBlock); 5608 CGF.incrementProfileCounter(&S); 5609 } 5610 5611 (void)CGF.EmitOMPLinearClauseInit(S); 5612 5613 OMPPrivateScope LoopScope(CGF); 5614 // Emit helper vars inits. 5615 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 5616 auto *I = CS->getCapturedDecl()->param_begin(); 5617 auto *LBP = std::next(I, LowerBound); 5618 auto *UBP = std::next(I, UpperBound); 5619 auto *STP = std::next(I, Stride); 5620 auto *LIP = std::next(I, LastIter); 5621 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 5622 LoopScope); 5623 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 5624 LoopScope); 5625 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 5626 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 5627 LoopScope); 5628 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 5629 CGF.EmitOMPLinearClause(S, LoopScope); 5630 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 5631 (void)LoopScope.Privatize(); 5632 // Emit the loop iteration variable. 5633 const Expr *IVExpr = S.getIterationVariable(); 5634 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 5635 CGF.EmitVarDecl(*IVDecl); 5636 CGF.EmitIgnoredExpr(S.getInit()); 5637 5638 // Emit the iterations count variable. 5639 // If it is not a variable, Sema decided to calculate iterations count on 5640 // each iteration (e.g., it is foldable into a constant). 5641 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5642 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5643 // Emit calculation of the iterations count. 5644 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 5645 } 5646 5647 { 5648 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 5649 emitCommonSimdLoop( 5650 CGF, S, 5651 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5652 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5653 CGF.EmitOMPSimdInit(S); 5654 }, 5655 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 5656 CGF.EmitOMPInnerLoop( 5657 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 5658 [&S](CodeGenFunction &CGF) { 5659 CGF.EmitOMPLoopBody(S, CodeGenFunction::JumpDest()); 5660 CGF.EmitStopPoint(&S); 5661 }, 5662 [](CodeGenFunction &) {}); 5663 }); 5664 } 5665 // Emit: if (PreCond) - end. 5666 if (ContBlock) { 5667 CGF.EmitBranch(ContBlock); 5668 CGF.EmitBlock(ContBlock, true); 5669 } 5670 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5671 if (HasLastprivateClause) { 5672 CGF.EmitOMPLastprivateClauseFinal( 5673 S, isOpenMPSimdDirective(S.getDirectiveKind()), 5674 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 5675 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5676 (*LIP)->getType(), S.getBeginLoc()))); 5677 } 5678 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 5679 return CGF.Builder.CreateIsNotNull( 5680 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 5681 (*LIP)->getType(), S.getBeginLoc())); 5682 }); 5683 }; 5684 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 5685 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 5686 const OMPTaskDataTy &Data) { 5687 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 5688 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 5689 OMPLoopScope PreInitScope(CGF, S); 5690 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 5691 OutlinedFn, SharedsTy, 5692 CapturedStruct, IfCond, Data); 5693 }; 5694 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 5695 CodeGen); 5696 }; 5697 if (Data.Nogroup) { 5698 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 5699 } else { 5700 CGM.getOpenMPRuntime().emitTaskgroupRegion( 5701 *this, 5702 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 5703 PrePostActionTy &Action) { 5704 Action.Enter(CGF); 5705 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 5706 Data); 5707 }, 5708 S.getBeginLoc()); 5709 } 5710 } 5711 5712 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 5713 auto LPCRegion = 5714 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5715 EmitOMPTaskLoopBasedDirective(S); 5716 } 5717 5718 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 5719 const OMPTaskLoopSimdDirective &S) { 5720 auto LPCRegion = 5721 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5722 OMPLexicalScope Scope(*this, S); 5723 EmitOMPTaskLoopBasedDirective(S); 5724 } 5725 5726 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 5727 const OMPMasterTaskLoopDirective &S) { 5728 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5729 Action.Enter(CGF); 5730 EmitOMPTaskLoopBasedDirective(S); 5731 }; 5732 auto LPCRegion = 5733 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5734 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 5735 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5736 } 5737 5738 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 5739 const OMPMasterTaskLoopSimdDirective &S) { 5740 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5741 Action.Enter(CGF); 5742 EmitOMPTaskLoopBasedDirective(S); 5743 }; 5744 auto LPCRegion = 5745 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5746 OMPLexicalScope Scope(*this, S); 5747 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 5748 } 5749 5750 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 5751 const OMPParallelMasterTaskLoopDirective &S) { 5752 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5753 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5754 PrePostActionTy &Action) { 5755 Action.Enter(CGF); 5756 CGF.EmitOMPTaskLoopBasedDirective(S); 5757 }; 5758 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 5759 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5760 S.getBeginLoc()); 5761 }; 5762 auto LPCRegion = 5763 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5764 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 5765 emitEmptyBoundParameters); 5766 } 5767 5768 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 5769 const OMPParallelMasterTaskLoopSimdDirective &S) { 5770 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5771 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 5772 PrePostActionTy &Action) { 5773 Action.Enter(CGF); 5774 CGF.EmitOMPTaskLoopBasedDirective(S); 5775 }; 5776 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 5777 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 5778 S.getBeginLoc()); 5779 }; 5780 auto LPCRegion = 5781 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 5782 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 5783 emitEmptyBoundParameters); 5784 } 5785 5786 // Generate the instructions for '#pragma omp target update' directive. 5787 void CodeGenFunction::EmitOMPTargetUpdateDirective( 5788 const OMPTargetUpdateDirective &S) { 5789 // If we don't have target devices, don't bother emitting the data mapping 5790 // code. 5791 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5792 return; 5793 5794 // Check if we have any if clause associated with the directive. 5795 const Expr *IfCond = nullptr; 5796 if (const auto *C = S.getSingleClause<OMPIfClause>()) 5797 IfCond = C->getCondition(); 5798 5799 // Check if we have any device clause associated with the directive. 5800 const Expr *Device = nullptr; 5801 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 5802 Device = C->getDevice(); 5803 5804 OMPLexicalScope Scope(*this, S, OMPD_task); 5805 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 5806 } 5807 5808 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 5809 const OMPExecutableDirective &D) { 5810 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 5811 return; 5812 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 5813 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 5814 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 5815 } else { 5816 OMPPrivateScope LoopGlobals(CGF); 5817 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 5818 for (const Expr *E : LD->counters()) { 5819 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 5820 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 5821 LValue GlobLVal = CGF.EmitLValue(E); 5822 LoopGlobals.addPrivate( 5823 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 5824 } 5825 if (isa<OMPCapturedExprDecl>(VD)) { 5826 // Emit only those that were not explicitly referenced in clauses. 5827 if (!CGF.LocalDeclMap.count(VD)) 5828 CGF.EmitVarDecl(*VD); 5829 } 5830 } 5831 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 5832 if (!C->getNumForLoops()) 5833 continue; 5834 for (unsigned I = LD->getCollapsedNumber(), 5835 E = C->getLoopNumIterations().size(); 5836 I < E; ++I) { 5837 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 5838 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 5839 // Emit only those that were not explicitly referenced in clauses. 5840 if (!CGF.LocalDeclMap.count(VD)) 5841 CGF.EmitVarDecl(*VD); 5842 } 5843 } 5844 } 5845 } 5846 LoopGlobals.Privatize(); 5847 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 5848 } 5849 }; 5850 { 5851 auto LPCRegion = 5852 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 5853 OMPSimdLexicalScope Scope(*this, D); 5854 CGM.getOpenMPRuntime().emitInlinedDirective( 5855 *this, 5856 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 5857 : D.getDirectiveKind(), 5858 CodeGen); 5859 } 5860 // Check for outer lastprivate conditional update. 5861 checkForLastprivateConditionalUpdate(*this, D); 5862 } 5863