1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/Support/AtomicOrdering.h" 30 using namespace clang; 31 using namespace CodeGen; 32 using namespace llvm::omp; 33 34 static const VarDecl *getBaseDecl(const Expr *Ref); 35 36 namespace { 37 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 38 /// for captured expressions. 39 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 40 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 41 for (const auto *C : S.clauses()) { 42 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 43 if (const auto *PreInit = 44 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 45 for (const auto *I : PreInit->decls()) { 46 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 47 CGF.EmitVarDecl(cast<VarDecl>(*I)); 48 } else { 49 CodeGenFunction::AutoVarEmission Emission = 50 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 51 CGF.EmitAutoVarCleanups(Emission); 52 } 53 } 54 } 55 } 56 } 57 } 58 CodeGenFunction::OMPPrivateScope InlinedShareds; 59 60 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 61 return CGF.LambdaCaptureFields.lookup(VD) || 62 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 63 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 64 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 65 } 66 67 public: 68 OMPLexicalScope( 69 CodeGenFunction &CGF, const OMPExecutableDirective &S, 70 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 71 const bool EmitPreInitStmt = true) 72 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 73 InlinedShareds(CGF) { 74 if (EmitPreInitStmt) 75 emitPreInitStmt(CGF, S); 76 if (!CapturedRegion.hasValue()) 77 return; 78 assert(S.hasAssociatedStmt() && 79 "Expected associated statement for inlined directive."); 80 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 81 for (const auto &C : CS->captures()) { 82 if (C.capturesVariable() || C.capturesVariableByCopy()) { 83 auto *VD = C.getCapturedVar(); 84 assert(VD == VD->getCanonicalDecl() && 85 "Canonical decl must be captured."); 86 DeclRefExpr DRE( 87 CGF.getContext(), const_cast<VarDecl *>(VD), 88 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 89 InlinedShareds.isGlobalVarCaptured(VD)), 90 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 91 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 92 return CGF.EmitLValue(&DRE).getAddress(CGF); 93 }); 94 } 95 } 96 (void)InlinedShareds.Privatize(); 97 } 98 }; 99 100 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 101 /// for captured expressions. 102 class OMPParallelScope final : public OMPLexicalScope { 103 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 104 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 105 return !(isOpenMPTargetExecutionDirective(Kind) || 106 isOpenMPLoopBoundSharingDirective(Kind)) && 107 isOpenMPParallelDirective(Kind); 108 } 109 110 public: 111 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 112 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 113 EmitPreInitStmt(S)) {} 114 }; 115 116 /// Lexical scope for OpenMP teams construct, that handles correct codegen 117 /// for captured expressions. 118 class OMPTeamsScope final : public OMPLexicalScope { 119 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 120 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 121 return !isOpenMPTargetExecutionDirective(Kind) && 122 isOpenMPTeamsDirective(Kind); 123 } 124 125 public: 126 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 127 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 128 EmitPreInitStmt(S)) {} 129 }; 130 131 /// Private scope for OpenMP loop-based directives, that supports capturing 132 /// of used expression from loop statement. 133 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 134 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 135 CodeGenFunction::OMPMapVars PreCondVars; 136 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 137 for (const auto *E : S.counters()) { 138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 139 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 140 (void)PreCondVars.setVarAddr( 141 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 142 } 143 // Mark private vars as undefs. 144 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 145 for (const Expr *IRef : C->varlists()) { 146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 147 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 148 (void)PreCondVars.setVarAddr( 149 CGF, OrigVD, 150 Address(llvm::UndefValue::get( 151 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 152 OrigVD->getType().getNonReferenceType()))), 153 CGF.getContext().getDeclAlign(OrigVD))); 154 } 155 } 156 } 157 (void)PreCondVars.apply(CGF); 158 // Emit init, __range and __end variables for C++ range loops. 159 const Stmt *Body = 160 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 161 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 162 Body = OMPLoopDirective::tryToFindNextInnerLoop( 163 Body, /*TryImperfectlyNestedLoops=*/true); 164 if (auto *For = dyn_cast<ForStmt>(Body)) { 165 Body = For->getBody(); 166 } else { 167 assert(isa<CXXForRangeStmt>(Body) && 168 "Expected canonical for loop or range-based for loop."); 169 auto *CXXFor = cast<CXXForRangeStmt>(Body); 170 if (const Stmt *Init = CXXFor->getInit()) 171 CGF.EmitStmt(Init); 172 CGF.EmitStmt(CXXFor->getRangeStmt()); 173 CGF.EmitStmt(CXXFor->getEndStmt()); 174 Body = CXXFor->getBody(); 175 } 176 } 177 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 178 for (const auto *I : PreInits->decls()) 179 CGF.EmitVarDecl(cast<VarDecl>(*I)); 180 } 181 PreCondVars.restore(CGF); 182 } 183 184 public: 185 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 186 : CodeGenFunction::RunCleanupsScope(CGF) { 187 emitPreInitStmt(CGF, S); 188 } 189 }; 190 191 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 192 CodeGenFunction::OMPPrivateScope InlinedShareds; 193 194 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 195 return CGF.LambdaCaptureFields.lookup(VD) || 196 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 197 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 198 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 199 } 200 201 public: 202 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 203 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 204 InlinedShareds(CGF) { 205 for (const auto *C : S.clauses()) { 206 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 207 if (const auto *PreInit = 208 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 209 for (const auto *I : PreInit->decls()) { 210 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 211 CGF.EmitVarDecl(cast<VarDecl>(*I)); 212 } else { 213 CodeGenFunction::AutoVarEmission Emission = 214 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 215 CGF.EmitAutoVarCleanups(Emission); 216 } 217 } 218 } 219 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 220 for (const Expr *E : UDP->varlists()) { 221 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 222 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 223 CGF.EmitVarDecl(*OED); 224 } 225 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 226 for (const Expr *E : UDP->varlists()) { 227 const Decl *D = getBaseDecl(E); 228 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 229 CGF.EmitVarDecl(*OED); 230 } 231 } 232 } 233 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 234 CGF.EmitOMPPrivateClause(S, InlinedShareds); 235 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 236 if (const Expr *E = TG->getReductionRef()) 237 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 238 } 239 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 240 while (CS) { 241 for (auto &C : CS->captures()) { 242 if (C.capturesVariable() || C.capturesVariableByCopy()) { 243 auto *VD = C.getCapturedVar(); 244 assert(VD == VD->getCanonicalDecl() && 245 "Canonical decl must be captured."); 246 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 247 isCapturedVar(CGF, VD) || 248 (CGF.CapturedStmtInfo && 249 InlinedShareds.isGlobalVarCaptured(VD)), 250 VD->getType().getNonReferenceType(), VK_LValue, 251 C.getLocation()); 252 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 253 return CGF.EmitLValue(&DRE).getAddress(CGF); 254 }); 255 } 256 } 257 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 258 } 259 (void)InlinedShareds.Privatize(); 260 } 261 }; 262 263 } // namespace 264 265 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 266 const OMPExecutableDirective &S, 267 const RegionCodeGenTy &CodeGen); 268 269 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 270 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 271 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 272 OrigVD = OrigVD->getCanonicalDecl(); 273 bool IsCaptured = 274 LambdaCaptureFields.lookup(OrigVD) || 275 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 276 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 277 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 278 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 279 return EmitLValue(&DRE); 280 } 281 } 282 return EmitLValue(E); 283 } 284 285 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 286 ASTContext &C = getContext(); 287 llvm::Value *Size = nullptr; 288 auto SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) { 290 // getTypeSizeInChars() returns 0 for a VLA. 291 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 292 VlaSizePair VlaSize = getVLASize(VAT); 293 Ty = VlaSize.Type; 294 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 295 : VlaSize.NumElts; 296 } 297 SizeInChars = C.getTypeSizeInChars(Ty); 298 if (SizeInChars.isZero()) 299 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 300 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 301 } 302 return CGM.getSize(SizeInChars); 303 } 304 305 void CodeGenFunction::GenerateOpenMPCapturedVars( 306 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 307 const RecordDecl *RD = S.getCapturedRecordDecl(); 308 auto CurField = RD->field_begin(); 309 auto CurCap = S.captures().begin(); 310 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 311 E = S.capture_init_end(); 312 I != E; ++I, ++CurField, ++CurCap) { 313 if (CurField->hasCapturedVLAType()) { 314 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 315 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 316 CapturedVars.push_back(Val); 317 } else if (CurCap->capturesThis()) { 318 CapturedVars.push_back(CXXThisValue); 319 } else if (CurCap->capturesVariableByCopy()) { 320 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 321 322 // If the field is not a pointer, we need to save the actual value 323 // and load it as a void pointer. 324 if (!CurField->getType()->isAnyPointerType()) { 325 ASTContext &Ctx = getContext(); 326 Address DstAddr = CreateMemTemp( 327 Ctx.getUIntPtrType(), 328 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 329 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 330 331 llvm::Value *SrcAddrVal = EmitScalarConversion( 332 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 333 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 334 LValue SrcLV = 335 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 336 337 // Store the value using the source type pointer. 338 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 339 340 // Load the value using the destination type pointer. 341 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 342 } 343 CapturedVars.push_back(CV); 344 } else { 345 assert(CurCap->capturesVariable() && "Expected capture by reference."); 346 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 347 } 348 } 349 } 350 351 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 352 QualType DstType, StringRef Name, 353 LValue AddrLV) { 354 ASTContext &Ctx = CGF.getContext(); 355 356 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 357 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 358 Ctx.getPointerType(DstType), Loc); 359 Address TmpAddr = 360 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 361 .getAddress(CGF); 362 return TmpAddr; 363 } 364 365 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 366 if (T->isLValueReferenceType()) 367 return C.getLValueReferenceType( 368 getCanonicalParamType(C, T.getNonReferenceType()), 369 /*SpelledAsLValue=*/false); 370 if (T->isPointerType()) 371 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 372 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 373 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 374 return getCanonicalParamType(C, VLA->getElementType()); 375 if (!A->isVariablyModifiedType()) 376 return C.getCanonicalType(T); 377 } 378 return C.getCanonicalParamType(T); 379 } 380 381 namespace { 382 /// Contains required data for proper outlined function codegen. 383 struct FunctionOptions { 384 /// Captured statement for which the function is generated. 385 const CapturedStmt *S = nullptr; 386 /// true if cast to/from UIntPtr is required for variables captured by 387 /// value. 388 const bool UIntPtrCastRequired = true; 389 /// true if only casted arguments must be registered as local args or VLA 390 /// sizes. 391 const bool RegisterCastedArgsOnly = false; 392 /// Name of the generated function. 393 const StringRef FunctionName; 394 /// Location of the non-debug version of the outlined function. 395 SourceLocation Loc; 396 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 397 bool RegisterCastedArgsOnly, StringRef FunctionName, 398 SourceLocation Loc) 399 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 400 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 401 FunctionName(FunctionName), Loc(Loc) {} 402 }; 403 } // namespace 404 405 static llvm::Function *emitOutlinedFunctionPrologue( 406 CodeGenFunction &CGF, FunctionArgList &Args, 407 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 408 &LocalAddrs, 409 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 410 &VLASizes, 411 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 412 const CapturedDecl *CD = FO.S->getCapturedDecl(); 413 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 414 assert(CD->hasBody() && "missing CapturedDecl body"); 415 416 CXXThisValue = nullptr; 417 // Build the argument list. 418 CodeGenModule &CGM = CGF.CGM; 419 ASTContext &Ctx = CGM.getContext(); 420 FunctionArgList TargetArgs; 421 Args.append(CD->param_begin(), 422 std::next(CD->param_begin(), CD->getContextParamPosition())); 423 TargetArgs.append( 424 CD->param_begin(), 425 std::next(CD->param_begin(), CD->getContextParamPosition())); 426 auto I = FO.S->captures().begin(); 427 FunctionDecl *DebugFunctionDecl = nullptr; 428 if (!FO.UIntPtrCastRequired) { 429 FunctionProtoType::ExtProtoInfo EPI; 430 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 431 DebugFunctionDecl = FunctionDecl::Create( 432 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 433 SourceLocation(), DeclarationName(), FunctionTy, 434 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 435 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 436 } 437 for (const FieldDecl *FD : RD->fields()) { 438 QualType ArgType = FD->getType(); 439 IdentifierInfo *II = nullptr; 440 VarDecl *CapVar = nullptr; 441 442 // If this is a capture by copy and the type is not a pointer, the outlined 443 // function argument type should be uintptr and the value properly casted to 444 // uintptr. This is necessary given that the runtime library is only able to 445 // deal with pointers. We can pass in the same way the VLA type sizes to the 446 // outlined function. 447 if (FO.UIntPtrCastRequired && 448 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 449 I->capturesVariableArrayType())) 450 ArgType = Ctx.getUIntPtrType(); 451 452 if (I->capturesVariable() || I->capturesVariableByCopy()) { 453 CapVar = I->getCapturedVar(); 454 II = CapVar->getIdentifier(); 455 } else if (I->capturesThis()) { 456 II = &Ctx.Idents.get("this"); 457 } else { 458 assert(I->capturesVariableArrayType()); 459 II = &Ctx.Idents.get("vla"); 460 } 461 if (ArgType->isVariablyModifiedType()) 462 ArgType = getCanonicalParamType(Ctx, ArgType); 463 VarDecl *Arg; 464 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 465 Arg = ParmVarDecl::Create( 466 Ctx, DebugFunctionDecl, 467 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 468 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 469 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 470 } else { 471 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 472 II, ArgType, ImplicitParamDecl::Other); 473 } 474 Args.emplace_back(Arg); 475 // Do not cast arguments if we emit function with non-original types. 476 TargetArgs.emplace_back( 477 FO.UIntPtrCastRequired 478 ? Arg 479 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 480 ++I; 481 } 482 Args.append( 483 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 484 CD->param_end()); 485 TargetArgs.append( 486 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 487 CD->param_end()); 488 489 // Create the function declaration. 490 const CGFunctionInfo &FuncInfo = 491 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 492 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 493 494 auto *F = 495 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 496 FO.FunctionName, &CGM.getModule()); 497 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 498 if (CD->isNothrow()) 499 F->setDoesNotThrow(); 500 F->setDoesNotRecurse(); 501 502 // Generate the function. 503 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 504 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 505 FO.UIntPtrCastRequired ? FO.Loc 506 : CD->getBody()->getBeginLoc()); 507 unsigned Cnt = CD->getContextParamPosition(); 508 I = FO.S->captures().begin(); 509 for (const FieldDecl *FD : RD->fields()) { 510 // Do not map arguments if we emit function with non-original types. 511 Address LocalAddr(Address::invalid()); 512 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 513 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 514 TargetArgs[Cnt]); 515 } else { 516 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 517 } 518 // If we are capturing a pointer by copy we don't need to do anything, just 519 // use the value that we get from the arguments. 520 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 521 const VarDecl *CurVD = I->getCapturedVar(); 522 if (!FO.RegisterCastedArgsOnly) 523 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 524 ++Cnt; 525 ++I; 526 continue; 527 } 528 529 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 530 AlignmentSource::Decl); 531 if (FD->hasCapturedVLAType()) { 532 if (FO.UIntPtrCastRequired) { 533 ArgLVal = CGF.MakeAddrLValue( 534 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 535 Args[Cnt]->getName(), ArgLVal), 536 FD->getType(), AlignmentSource::Decl); 537 } 538 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 539 const VariableArrayType *VAT = FD->getCapturedVLAType(); 540 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 541 } else if (I->capturesVariable()) { 542 const VarDecl *Var = I->getCapturedVar(); 543 QualType VarTy = Var->getType(); 544 Address ArgAddr = ArgLVal.getAddress(CGF); 545 if (ArgLVal.getType()->isLValueReferenceType()) { 546 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 547 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 548 assert(ArgLVal.getType()->isPointerType()); 549 ArgAddr = CGF.EmitLoadOfPointer( 550 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 551 } 552 if (!FO.RegisterCastedArgsOnly) { 553 LocalAddrs.insert( 554 {Args[Cnt], 555 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 556 } 557 } else if (I->capturesVariableByCopy()) { 558 assert(!FD->getType()->isAnyPointerType() && 559 "Not expecting a captured pointer."); 560 const VarDecl *Var = I->getCapturedVar(); 561 LocalAddrs.insert({Args[Cnt], 562 {Var, FO.UIntPtrCastRequired 563 ? castValueFromUintptr( 564 CGF, I->getLocation(), FD->getType(), 565 Args[Cnt]->getName(), ArgLVal) 566 : ArgLVal.getAddress(CGF)}}); 567 } else { 568 // If 'this' is captured, load it into CXXThisValue. 569 assert(I->capturesThis()); 570 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 571 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 572 } 573 ++Cnt; 574 ++I; 575 } 576 577 return F; 578 } 579 580 llvm::Function * 581 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 582 SourceLocation Loc) { 583 assert( 584 CapturedStmtInfo && 585 "CapturedStmtInfo should be set when generating the captured function"); 586 const CapturedDecl *CD = S.getCapturedDecl(); 587 // Build the argument list. 588 bool NeedWrapperFunction = 589 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 590 FunctionArgList Args; 591 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 592 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 593 SmallString<256> Buffer; 594 llvm::raw_svector_ostream Out(Buffer); 595 Out << CapturedStmtInfo->getHelperName(); 596 if (NeedWrapperFunction) 597 Out << "_debug__"; 598 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 599 Out.str(), Loc); 600 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 601 VLASizes, CXXThisValue, FO); 602 CodeGenFunction::OMPPrivateScope LocalScope(*this); 603 for (const auto &LocalAddrPair : LocalAddrs) { 604 if (LocalAddrPair.second.first) { 605 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 606 return LocalAddrPair.second.second; 607 }); 608 } 609 } 610 (void)LocalScope.Privatize(); 611 for (const auto &VLASizePair : VLASizes) 612 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 613 PGO.assignRegionCounters(GlobalDecl(CD), F); 614 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 615 (void)LocalScope.ForceCleanup(); 616 FinishFunction(CD->getBodyRBrace()); 617 if (!NeedWrapperFunction) 618 return F; 619 620 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 621 /*RegisterCastedArgsOnly=*/true, 622 CapturedStmtInfo->getHelperName(), Loc); 623 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 624 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 625 Args.clear(); 626 LocalAddrs.clear(); 627 VLASizes.clear(); 628 llvm::Function *WrapperF = 629 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 630 WrapperCGF.CXXThisValue, WrapperFO); 631 llvm::SmallVector<llvm::Value *, 4> CallArgs; 632 for (const auto *Arg : Args) { 633 llvm::Value *CallArg; 634 auto I = LocalAddrs.find(Arg); 635 if (I != LocalAddrs.end()) { 636 LValue LV = WrapperCGF.MakeAddrLValue( 637 I->second.second, 638 I->second.first ? I->second.first->getType() : Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } else { 642 auto EI = VLASizes.find(Arg); 643 if (EI != VLASizes.end()) { 644 CallArg = EI->second.second; 645 } else { 646 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 647 Arg->getType(), 648 AlignmentSource::Decl); 649 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 650 } 651 } 652 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 653 } 654 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 655 WrapperCGF.FinishFunction(); 656 return WrapperF; 657 } 658 659 //===----------------------------------------------------------------------===// 660 // OpenMP Directive Emission 661 //===----------------------------------------------------------------------===// 662 void CodeGenFunction::EmitOMPAggregateAssign( 663 Address DestAddr, Address SrcAddr, QualType OriginalType, 664 const llvm::function_ref<void(Address, Address)> CopyGen) { 665 // Perform element-by-element initialization. 666 QualType ElementTy; 667 668 // Drill down to the base element type on both arrays. 669 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 670 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 671 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 672 673 llvm::Value *SrcBegin = SrcAddr.getPointer(); 674 llvm::Value *DestBegin = DestAddr.getPointer(); 675 // Cast from pointer to array type to pointer to single element. 676 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 677 // The basic structure here is a while-do loop. 678 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 679 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 680 llvm::Value *IsEmpty = 681 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 682 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 683 684 // Enter the loop body, making that address the current address. 685 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 686 EmitBlock(BodyBB); 687 688 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 689 690 llvm::PHINode *SrcElementPHI = 691 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 692 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 693 Address SrcElementCurrent = 694 Address(SrcElementPHI, 695 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 696 697 llvm::PHINode *DestElementPHI = 698 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 699 DestElementPHI->addIncoming(DestBegin, EntryBB); 700 Address DestElementCurrent = 701 Address(DestElementPHI, 702 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 703 704 // Emit copy. 705 CopyGen(DestElementCurrent, SrcElementCurrent); 706 707 // Shift the address forward by one element. 708 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 709 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 710 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 711 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 712 // Check whether we've reached the end. 713 llvm::Value *Done = 714 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 715 Builder.CreateCondBr(Done, DoneBB, BodyBB); 716 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 717 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 718 719 // Done. 720 EmitBlock(DoneBB, /*IsFinished=*/true); 721 } 722 723 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 724 Address SrcAddr, const VarDecl *DestVD, 725 const VarDecl *SrcVD, const Expr *Copy) { 726 if (OriginalType->isArrayType()) { 727 const auto *BO = dyn_cast<BinaryOperator>(Copy); 728 if (BO && BO->getOpcode() == BO_Assign) { 729 // Perform simple memcpy for simple copying. 730 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 731 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 732 EmitAggregateAssign(Dest, Src, OriginalType); 733 } else { 734 // For arrays with complex element types perform element by element 735 // copying. 736 EmitOMPAggregateAssign( 737 DestAddr, SrcAddr, OriginalType, 738 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 739 // Working with the single array element, so have to remap 740 // destination and source variables to corresponding array 741 // elements. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 744 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 745 (void)Remap.Privatize(); 746 EmitIgnoredExpr(Copy); 747 }); 748 } 749 } else { 750 // Remap pseudo source variable to private copy. 751 CodeGenFunction::OMPPrivateScope Remap(*this); 752 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 753 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 754 (void)Remap.Privatize(); 755 // Emit copying of the whole variable. 756 EmitIgnoredExpr(Copy); 757 } 758 } 759 760 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 761 OMPPrivateScope &PrivateScope) { 762 if (!HaveInsertPoint()) 763 return false; 764 bool DeviceConstTarget = 765 getLangOpts().OpenMPIsDevice && 766 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 767 bool FirstprivateIsLastprivate = false; 768 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 769 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 770 for (const auto *D : C->varlists()) 771 Lastprivates.try_emplace( 772 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 773 C->getKind()); 774 } 775 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 776 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 777 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 778 // Force emission of the firstprivate copy if the directive does not emit 779 // outlined function, like omp for, omp simd, omp distribute etc. 780 bool MustEmitFirstprivateCopy = 781 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 782 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 783 const auto *IRef = C->varlist_begin(); 784 const auto *InitsRef = C->inits().begin(); 785 for (const Expr *IInit : C->private_copies()) { 786 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 787 bool ThisFirstprivateIsLastprivate = 788 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 789 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 790 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 791 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 792 !FD->getType()->isReferenceType() && 793 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 794 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 795 ++IRef; 796 ++InitsRef; 797 continue; 798 } 799 // Do not emit copy for firstprivate constant variables in target regions, 800 // captured by reference. 801 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 802 FD && FD->getType()->isReferenceType() && 803 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 804 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 805 OrigVD); 806 ++IRef; 807 ++InitsRef; 808 continue; 809 } 810 FirstprivateIsLastprivate = 811 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 812 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 813 const auto *VDInit = 814 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 815 bool IsRegistered; 816 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 817 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 818 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 819 LValue OriginalLVal; 820 if (!FD) { 821 // Check if the firstprivate variable is just a constant value. 822 ConstantEmission CE = tryEmitAsConstant(&DRE); 823 if (CE && !CE.isReference()) { 824 // Constant value, no need to create a copy. 825 ++IRef; 826 ++InitsRef; 827 continue; 828 } 829 if (CE && CE.isReference()) { 830 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 831 } else { 832 assert(!CE && "Expected non-constant firstprivate."); 833 OriginalLVal = EmitLValue(&DRE); 834 } 835 } else { 836 OriginalLVal = EmitLValue(&DRE); 837 } 838 QualType Type = VD->getType(); 839 if (Type->isArrayType()) { 840 // Emit VarDecl with copy init for arrays. 841 // Get the address of the original variable captured in current 842 // captured region. 843 IsRegistered = PrivateScope.addPrivate( 844 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 845 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 846 const Expr *Init = VD->getInit(); 847 if (!isa<CXXConstructExpr>(Init) || 848 isTrivialInitializer(Init)) { 849 // Perform simple memcpy. 850 LValue Dest = 851 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 852 EmitAggregateAssign(Dest, OriginalLVal, Type); 853 } else { 854 EmitOMPAggregateAssign( 855 Emission.getAllocatedAddress(), 856 OriginalLVal.getAddress(*this), Type, 857 [this, VDInit, Init](Address DestElement, 858 Address SrcElement) { 859 // Clean up any temporaries needed by the 860 // initialization. 861 RunCleanupsScope InitScope(*this); 862 // Emit initialization for single element. 863 setAddrOfLocalVar(VDInit, SrcElement); 864 EmitAnyExprToMem(Init, DestElement, 865 Init->getType().getQualifiers(), 866 /*IsInitializer*/ false); 867 LocalDeclMap.erase(VDInit); 868 }); 869 } 870 EmitAutoVarCleanups(Emission); 871 return Emission.getAllocatedAddress(); 872 }); 873 } else { 874 Address OriginalAddr = OriginalLVal.getAddress(*this); 875 IsRegistered = 876 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 877 ThisFirstprivateIsLastprivate, 878 OrigVD, &Lastprivates, IRef]() { 879 // Emit private VarDecl with copy init. 880 // Remap temp VDInit variable to the address of the original 881 // variable (for proper handling of captured global variables). 882 setAddrOfLocalVar(VDInit, OriginalAddr); 883 EmitDecl(*VD); 884 LocalDeclMap.erase(VDInit); 885 if (ThisFirstprivateIsLastprivate && 886 Lastprivates[OrigVD->getCanonicalDecl()] == 887 OMPC_LASTPRIVATE_conditional) { 888 // Create/init special variable for lastprivate conditionals. 889 Address VDAddr = 890 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 891 *this, OrigVD); 892 llvm::Value *V = EmitLoadOfScalar( 893 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 894 AlignmentSource::Decl), 895 (*IRef)->getExprLoc()); 896 EmitStoreOfScalar(V, 897 MakeAddrLValue(VDAddr, (*IRef)->getType(), 898 AlignmentSource::Decl)); 899 LocalDeclMap.erase(VD); 900 setAddrOfLocalVar(VD, VDAddr); 901 return VDAddr; 902 } 903 return GetAddrOfLocalVar(VD); 904 }); 905 } 906 assert(IsRegistered && 907 "firstprivate var already registered as private"); 908 // Silence the warning about unused variable. 909 (void)IsRegistered; 910 } 911 ++IRef; 912 ++InitsRef; 913 } 914 } 915 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 916 } 917 918 void CodeGenFunction::EmitOMPPrivateClause( 919 const OMPExecutableDirective &D, 920 CodeGenFunction::OMPPrivateScope &PrivateScope) { 921 if (!HaveInsertPoint()) 922 return; 923 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 924 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 925 auto IRef = C->varlist_begin(); 926 for (const Expr *IInit : C->private_copies()) { 927 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 928 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 929 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 930 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 931 // Emit private VarDecl with copy init. 932 EmitDecl(*VD); 933 return GetAddrOfLocalVar(VD); 934 }); 935 assert(IsRegistered && "private var already registered as private"); 936 // Silence the warning about unused variable. 937 (void)IsRegistered; 938 } 939 ++IRef; 940 } 941 } 942 } 943 944 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 945 if (!HaveInsertPoint()) 946 return false; 947 // threadprivate_var1 = master_threadprivate_var1; 948 // operator=(threadprivate_var2, master_threadprivate_var2); 949 // ... 950 // __kmpc_barrier(&loc, global_tid); 951 llvm::DenseSet<const VarDecl *> CopiedVars; 952 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 953 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 954 auto IRef = C->varlist_begin(); 955 auto ISrcRef = C->source_exprs().begin(); 956 auto IDestRef = C->destination_exprs().begin(); 957 for (const Expr *AssignOp : C->assignment_ops()) { 958 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 959 QualType Type = VD->getType(); 960 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 961 // Get the address of the master variable. If we are emitting code with 962 // TLS support, the address is passed from the master as field in the 963 // captured declaration. 964 Address MasterAddr = Address::invalid(); 965 if (getLangOpts().OpenMPUseTLS && 966 getContext().getTargetInfo().isTLSSupported()) { 967 assert(CapturedStmtInfo->lookup(VD) && 968 "Copyin threadprivates should have been captured!"); 969 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 970 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 971 MasterAddr = EmitLValue(&DRE).getAddress(*this); 972 LocalDeclMap.erase(VD); 973 } else { 974 MasterAddr = 975 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 976 : CGM.GetAddrOfGlobal(VD), 977 getContext().getDeclAlign(VD)); 978 } 979 // Get the address of the threadprivate variable. 980 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 981 if (CopiedVars.size() == 1) { 982 // At first check if current thread is a master thread. If it is, no 983 // need to copy data. 984 CopyBegin = createBasicBlock("copyin.not.master"); 985 CopyEnd = createBasicBlock("copyin.not.master.end"); 986 Builder.CreateCondBr( 987 Builder.CreateICmpNE( 988 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 989 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 990 CGM.IntPtrTy)), 991 CopyBegin, CopyEnd); 992 EmitBlock(CopyBegin); 993 } 994 const auto *SrcVD = 995 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 996 const auto *DestVD = 997 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 998 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 999 } 1000 ++IRef; 1001 ++ISrcRef; 1002 ++IDestRef; 1003 } 1004 } 1005 if (CopyEnd) { 1006 // Exit out of copying procedure for non-master thread. 1007 EmitBlock(CopyEnd, /*IsFinished=*/true); 1008 return true; 1009 } 1010 return false; 1011 } 1012 1013 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1014 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1015 if (!HaveInsertPoint()) 1016 return false; 1017 bool HasAtLeastOneLastprivate = false; 1018 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1019 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1020 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1021 for (const Expr *C : LoopDirective->counters()) { 1022 SIMDLCVs.insert( 1023 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1024 } 1025 } 1026 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1027 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1028 HasAtLeastOneLastprivate = true; 1029 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1030 !getLangOpts().OpenMPSimd) 1031 break; 1032 const auto *IRef = C->varlist_begin(); 1033 const auto *IDestRef = C->destination_exprs().begin(); 1034 for (const Expr *IInit : C->private_copies()) { 1035 // Keep the address of the original variable for future update at the end 1036 // of the loop. 1037 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1038 // Taskloops do not require additional initialization, it is done in 1039 // runtime support library. 1040 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1041 const auto *DestVD = 1042 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1043 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1044 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1045 /*RefersToEnclosingVariableOrCapture=*/ 1046 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1047 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1048 return EmitLValue(&DRE).getAddress(*this); 1049 }); 1050 // Check if the variable is also a firstprivate: in this case IInit is 1051 // not generated. Initialization of this variable will happen in codegen 1052 // for 'firstprivate' clause. 1053 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1054 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1055 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1056 OrigVD]() { 1057 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1058 Address VDAddr = 1059 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1060 OrigVD); 1061 setAddrOfLocalVar(VD, VDAddr); 1062 return VDAddr; 1063 } 1064 // Emit private VarDecl with copy init. 1065 EmitDecl(*VD); 1066 return GetAddrOfLocalVar(VD); 1067 }); 1068 assert(IsRegistered && 1069 "lastprivate var already registered as private"); 1070 (void)IsRegistered; 1071 } 1072 } 1073 ++IRef; 1074 ++IDestRef; 1075 } 1076 } 1077 return HasAtLeastOneLastprivate; 1078 } 1079 1080 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1081 const OMPExecutableDirective &D, bool NoFinals, 1082 llvm::Value *IsLastIterCond) { 1083 if (!HaveInsertPoint()) 1084 return; 1085 // Emit following code: 1086 // if (<IsLastIterCond>) { 1087 // orig_var1 = private_orig_var1; 1088 // ... 1089 // orig_varn = private_orig_varn; 1090 // } 1091 llvm::BasicBlock *ThenBB = nullptr; 1092 llvm::BasicBlock *DoneBB = nullptr; 1093 if (IsLastIterCond) { 1094 // Emit implicit barrier if at least one lastprivate conditional is found 1095 // and this is not a simd mode. 1096 if (!getLangOpts().OpenMPSimd && 1097 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1098 [](const OMPLastprivateClause *C) { 1099 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1100 })) { 1101 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1102 OMPD_unknown, 1103 /*EmitChecks=*/false, 1104 /*ForceSimpleCall=*/true); 1105 } 1106 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1107 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1108 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1109 EmitBlock(ThenBB); 1110 } 1111 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1112 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1113 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1114 auto IC = LoopDirective->counters().begin(); 1115 for (const Expr *F : LoopDirective->finals()) { 1116 const auto *D = 1117 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1118 if (NoFinals) 1119 AlreadyEmittedVars.insert(D); 1120 else 1121 LoopCountersAndUpdates[D] = F; 1122 ++IC; 1123 } 1124 } 1125 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1126 auto IRef = C->varlist_begin(); 1127 auto ISrcRef = C->source_exprs().begin(); 1128 auto IDestRef = C->destination_exprs().begin(); 1129 for (const Expr *AssignOp : C->assignment_ops()) { 1130 const auto *PrivateVD = 1131 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1132 QualType Type = PrivateVD->getType(); 1133 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1134 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1135 // If lastprivate variable is a loop control variable for loop-based 1136 // directive, update its value before copyin back to original 1137 // variable. 1138 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1139 EmitIgnoredExpr(FinalExpr); 1140 const auto *SrcVD = 1141 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1142 const auto *DestVD = 1143 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1144 // Get the address of the private variable. 1145 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1146 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1147 PrivateAddr = 1148 Address(Builder.CreateLoad(PrivateAddr), 1149 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1150 // Store the last value to the private copy in the last iteration. 1151 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1152 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1153 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1154 (*IRef)->getExprLoc()); 1155 // Get the address of the original variable. 1156 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1157 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1158 } 1159 ++IRef; 1160 ++ISrcRef; 1161 ++IDestRef; 1162 } 1163 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1164 EmitIgnoredExpr(PostUpdate); 1165 } 1166 if (IsLastIterCond) 1167 EmitBlock(DoneBB, /*IsFinished=*/true); 1168 } 1169 1170 void CodeGenFunction::EmitOMPReductionClauseInit( 1171 const OMPExecutableDirective &D, 1172 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1173 if (!HaveInsertPoint()) 1174 return; 1175 SmallVector<const Expr *, 4> Shareds; 1176 SmallVector<const Expr *, 4> Privates; 1177 SmallVector<const Expr *, 4> ReductionOps; 1178 SmallVector<const Expr *, 4> LHSs; 1179 SmallVector<const Expr *, 4> RHSs; 1180 OMPTaskDataTy Data; 1181 SmallVector<const Expr *, 4> TaskLHSs; 1182 SmallVector<const Expr *, 4> TaskRHSs; 1183 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1184 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1185 continue; 1186 Shareds.append(C->varlist_begin(), C->varlist_end()); 1187 Privates.append(C->privates().begin(), C->privates().end()); 1188 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1189 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1190 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1191 if (C->getModifier() == OMPC_REDUCTION_task) { 1192 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1193 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1194 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1195 Data.ReductionOps.append(C->reduction_ops().begin(), 1196 C->reduction_ops().end()); 1197 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1198 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1199 } 1200 } 1201 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1202 unsigned Count = 0; 1203 auto *ILHS = LHSs.begin(); 1204 auto *IRHS = RHSs.begin(); 1205 auto *IPriv = Privates.begin(); 1206 for (const Expr *IRef : Shareds) { 1207 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1208 // Emit private VarDecl with reduction init. 1209 RedCG.emitSharedOrigLValue(*this, Count); 1210 RedCG.emitAggregateType(*this, Count); 1211 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1212 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1213 RedCG.getSharedLValue(Count), 1214 [&Emission](CodeGenFunction &CGF) { 1215 CGF.EmitAutoVarInit(Emission); 1216 return true; 1217 }); 1218 EmitAutoVarCleanups(Emission); 1219 Address BaseAddr = RedCG.adjustPrivateAddress( 1220 *this, Count, Emission.getAllocatedAddress()); 1221 bool IsRegistered = PrivateScope.addPrivate( 1222 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1223 assert(IsRegistered && "private var already registered as private"); 1224 // Silence the warning about unused variable. 1225 (void)IsRegistered; 1226 1227 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1228 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1229 QualType Type = PrivateVD->getType(); 1230 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1231 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1232 // Store the address of the original variable associated with the LHS 1233 // implicit variable. 1234 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1235 return RedCG.getSharedLValue(Count).getAddress(*this); 1236 }); 1237 PrivateScope.addPrivate( 1238 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1239 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1240 isa<ArraySubscriptExpr>(IRef)) { 1241 // Store the address of the original variable associated with the LHS 1242 // implicit variable. 1243 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1244 return RedCG.getSharedLValue(Count).getAddress(*this); 1245 }); 1246 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1247 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1248 ConvertTypeForMem(RHSVD->getType()), 1249 "rhs.begin"); 1250 }); 1251 } else { 1252 QualType Type = PrivateVD->getType(); 1253 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1254 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 if (IsArray) { 1258 OriginalAddr = Builder.CreateElementBitCast( 1259 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1260 } 1261 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1262 PrivateScope.addPrivate( 1263 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1264 return IsArray 1265 ? Builder.CreateElementBitCast( 1266 GetAddrOfLocalVar(PrivateVD), 1267 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1268 : GetAddrOfLocalVar(PrivateVD); 1269 }); 1270 } 1271 ++ILHS; 1272 ++IRHS; 1273 ++IPriv; 1274 ++Count; 1275 } 1276 if (!Data.ReductionVars.empty()) { 1277 Data.IsReductionWithTaskMod = true; 1278 Data.IsWorksharingReduction = 1279 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1280 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1281 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1282 const Expr *TaskRedRef = nullptr; 1283 switch (D.getDirectiveKind()) { 1284 case OMPD_parallel: 1285 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1286 break; 1287 case OMPD_for: 1288 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1289 break; 1290 case OMPD_sections: 1291 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1292 break; 1293 case OMPD_parallel_for: 1294 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1295 break; 1296 case OMPD_parallel_master: 1297 TaskRedRef = 1298 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1299 break; 1300 case OMPD_parallel_sections: 1301 TaskRedRef = 1302 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1303 break; 1304 case OMPD_target_parallel: 1305 TaskRedRef = 1306 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1307 break; 1308 case OMPD_target_parallel_for: 1309 TaskRedRef = 1310 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_distribute_parallel_for: 1313 TaskRedRef = 1314 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_teams_distribute_parallel_for: 1317 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1318 .getTaskReductionRefExpr(); 1319 break; 1320 case OMPD_target_teams_distribute_parallel_for: 1321 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1322 .getTaskReductionRefExpr(); 1323 break; 1324 case OMPD_simd: 1325 case OMPD_for_simd: 1326 case OMPD_section: 1327 case OMPD_single: 1328 case OMPD_master: 1329 case OMPD_critical: 1330 case OMPD_parallel_for_simd: 1331 case OMPD_task: 1332 case OMPD_taskyield: 1333 case OMPD_barrier: 1334 case OMPD_taskwait: 1335 case OMPD_taskgroup: 1336 case OMPD_flush: 1337 case OMPD_depobj: 1338 case OMPD_scan: 1339 case OMPD_ordered: 1340 case OMPD_atomic: 1341 case OMPD_teams: 1342 case OMPD_target: 1343 case OMPD_cancellation_point: 1344 case OMPD_cancel: 1345 case OMPD_target_data: 1346 case OMPD_target_enter_data: 1347 case OMPD_target_exit_data: 1348 case OMPD_taskloop: 1349 case OMPD_taskloop_simd: 1350 case OMPD_master_taskloop: 1351 case OMPD_master_taskloop_simd: 1352 case OMPD_parallel_master_taskloop: 1353 case OMPD_parallel_master_taskloop_simd: 1354 case OMPD_distribute: 1355 case OMPD_target_update: 1356 case OMPD_distribute_parallel_for_simd: 1357 case OMPD_distribute_simd: 1358 case OMPD_target_parallel_for_simd: 1359 case OMPD_target_simd: 1360 case OMPD_teams_distribute: 1361 case OMPD_teams_distribute_simd: 1362 case OMPD_teams_distribute_parallel_for_simd: 1363 case OMPD_target_teams: 1364 case OMPD_target_teams_distribute: 1365 case OMPD_target_teams_distribute_parallel_for_simd: 1366 case OMPD_target_teams_distribute_simd: 1367 case OMPD_declare_target: 1368 case OMPD_end_declare_target: 1369 case OMPD_threadprivate: 1370 case OMPD_allocate: 1371 case OMPD_declare_reduction: 1372 case OMPD_declare_mapper: 1373 case OMPD_declare_simd: 1374 case OMPD_requires: 1375 case OMPD_declare_variant: 1376 case OMPD_begin_declare_variant: 1377 case OMPD_end_declare_variant: 1378 case OMPD_unknown: 1379 llvm_unreachable("Enexpected directive with task reductions."); 1380 } 1381 1382 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1383 EmitVarDecl(*VD); 1384 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1385 /*Volatile=*/false, TaskRedRef->getType()); 1386 } 1387 } 1388 1389 void CodeGenFunction::EmitOMPReductionClauseFinal( 1390 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1391 if (!HaveInsertPoint()) 1392 return; 1393 llvm::SmallVector<const Expr *, 8> Privates; 1394 llvm::SmallVector<const Expr *, 8> LHSExprs; 1395 llvm::SmallVector<const Expr *, 8> RHSExprs; 1396 llvm::SmallVector<const Expr *, 8> ReductionOps; 1397 bool HasAtLeastOneReduction = false; 1398 bool IsReductionWithTaskMod = false; 1399 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1400 // Do not emit for inscan reductions. 1401 if (C->getModifier() == OMPC_REDUCTION_inscan) 1402 continue; 1403 HasAtLeastOneReduction = true; 1404 Privates.append(C->privates().begin(), C->privates().end()); 1405 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1406 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1407 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1408 IsReductionWithTaskMod = 1409 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1410 } 1411 if (HasAtLeastOneReduction) { 1412 if (IsReductionWithTaskMod) { 1413 CGM.getOpenMPRuntime().emitTaskReductionFini( 1414 *this, D.getBeginLoc(), 1415 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1416 } 1417 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1418 isOpenMPParallelDirective(D.getDirectiveKind()) || 1419 ReductionKind == OMPD_simd; 1420 bool SimpleReduction = ReductionKind == OMPD_simd; 1421 // Emit nowait reduction if nowait clause is present or directive is a 1422 // parallel directive (it always has implicit barrier). 1423 CGM.getOpenMPRuntime().emitReduction( 1424 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1425 {WithNowait, SimpleReduction, ReductionKind}); 1426 } 1427 } 1428 1429 static void emitPostUpdateForReductionClause( 1430 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1431 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1432 if (!CGF.HaveInsertPoint()) 1433 return; 1434 llvm::BasicBlock *DoneBB = nullptr; 1435 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1436 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1437 if (!DoneBB) { 1438 if (llvm::Value *Cond = CondGen(CGF)) { 1439 // If the first post-update expression is found, emit conditional 1440 // block if it was requested. 1441 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1442 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1443 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1444 CGF.EmitBlock(ThenBB); 1445 } 1446 } 1447 CGF.EmitIgnoredExpr(PostUpdate); 1448 } 1449 } 1450 if (DoneBB) 1451 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1452 } 1453 1454 namespace { 1455 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1456 /// parallel function. This is necessary for combined constructs such as 1457 /// 'distribute parallel for' 1458 typedef llvm::function_ref<void(CodeGenFunction &, 1459 const OMPExecutableDirective &, 1460 llvm::SmallVectorImpl<llvm::Value *> &)> 1461 CodeGenBoundParametersTy; 1462 } // anonymous namespace 1463 1464 static void 1465 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1466 const OMPExecutableDirective &S) { 1467 if (CGF.getLangOpts().OpenMP < 50) 1468 return; 1469 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1470 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1471 for (const Expr *Ref : C->varlists()) { 1472 if (!Ref->getType()->isScalarType()) 1473 continue; 1474 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1475 if (!DRE) 1476 continue; 1477 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1478 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1479 } 1480 } 1481 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1482 for (const Expr *Ref : C->varlists()) { 1483 if (!Ref->getType()->isScalarType()) 1484 continue; 1485 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1486 if (!DRE) 1487 continue; 1488 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1489 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1490 } 1491 } 1492 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1493 for (const Expr *Ref : C->varlists()) { 1494 if (!Ref->getType()->isScalarType()) 1495 continue; 1496 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1497 if (!DRE) 1498 continue; 1499 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1500 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1501 } 1502 } 1503 // Privates should ne analyzed since they are not captured at all. 1504 // Task reductions may be skipped - tasks are ignored. 1505 // Firstprivates do not return value but may be passed by reference - no need 1506 // to check for updated lastprivate conditional. 1507 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1508 for (const Expr *Ref : C->varlists()) { 1509 if (!Ref->getType()->isScalarType()) 1510 continue; 1511 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1512 if (!DRE) 1513 continue; 1514 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1515 } 1516 } 1517 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1518 CGF, S, PrivateDecls); 1519 } 1520 1521 static void emitCommonOMPParallelDirective( 1522 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1523 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1524 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1525 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1526 llvm::Function *OutlinedFn = 1527 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1528 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1529 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1530 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1531 llvm::Value *NumThreads = 1532 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1533 /*IgnoreResultAssign=*/true); 1534 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1535 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1536 } 1537 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1538 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1539 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1540 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1541 } 1542 const Expr *IfCond = nullptr; 1543 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1544 if (C->getNameModifier() == OMPD_unknown || 1545 C->getNameModifier() == OMPD_parallel) { 1546 IfCond = C->getCondition(); 1547 break; 1548 } 1549 } 1550 1551 OMPParallelScope Scope(CGF, S); 1552 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1553 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1554 // lower and upper bounds with the pragma 'for' chunking mechanism. 1555 // The following lambda takes care of appending the lower and upper bound 1556 // parameters when necessary 1557 CodeGenBoundParameters(CGF, S, CapturedVars); 1558 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1559 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1560 CapturedVars, IfCond); 1561 } 1562 1563 static void emitEmptyBoundParameters(CodeGenFunction &, 1564 const OMPExecutableDirective &, 1565 llvm::SmallVectorImpl<llvm::Value *> &) {} 1566 1567 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1568 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1569 // Check if we have any if clause associated with the directive. 1570 llvm::Value *IfCond = nullptr; 1571 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1572 IfCond = EmitScalarExpr(C->getCondition(), 1573 /*IgnoreResultAssign=*/true); 1574 1575 llvm::Value *NumThreads = nullptr; 1576 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1577 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1578 /*IgnoreResultAssign=*/true); 1579 1580 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1581 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1582 ProcBind = ProcBindClause->getProcBindKind(); 1583 1584 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1585 1586 // The cleanup callback that finalizes all variabels at the given location, 1587 // thus calls destructors etc. 1588 auto FiniCB = [this](InsertPointTy IP) { 1589 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1590 }; 1591 1592 // Privatization callback that performs appropriate action for 1593 // shared/private/firstprivate/lastprivate/copyin/... variables. 1594 // 1595 // TODO: This defaults to shared right now. 1596 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1597 llvm::Value &Val, llvm::Value *&ReplVal) { 1598 // The next line is appropriate only for variables (Val) with the 1599 // data-sharing attribute "shared". 1600 ReplVal = &Val; 1601 1602 return CodeGenIP; 1603 }; 1604 1605 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1606 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1607 1608 auto BodyGenCB = [ParallelRegionBodyStmt, 1609 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1610 llvm::BasicBlock &ContinuationBB) { 1611 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1612 ContinuationBB); 1613 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1614 CodeGenIP, ContinuationBB); 1615 }; 1616 1617 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1618 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1619 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1620 FiniCB, IfCond, NumThreads, 1621 ProcBind, S.hasCancel())); 1622 return; 1623 } 1624 1625 // Emit parallel region as a standalone region. 1626 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1627 Action.Enter(CGF); 1628 OMPPrivateScope PrivateScope(CGF); 1629 bool Copyins = CGF.EmitOMPCopyinClause(S); 1630 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1631 if (Copyins) { 1632 // Emit implicit barrier to synchronize threads and avoid data races on 1633 // propagation master's thread values of threadprivate variables to local 1634 // instances of that variables of all other implicit threads. 1635 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1636 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1637 /*ForceSimpleCall=*/true); 1638 } 1639 CGF.EmitOMPPrivateClause(S, PrivateScope); 1640 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1641 (void)PrivateScope.Privatize(); 1642 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1643 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1644 }; 1645 { 1646 auto LPCRegion = 1647 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1648 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1649 emitEmptyBoundParameters); 1650 emitPostUpdateForReductionClause(*this, S, 1651 [](CodeGenFunction &) { return nullptr; }); 1652 } 1653 // Check for outer lastprivate conditional update. 1654 checkForLastprivateConditionalUpdate(*this, S); 1655 } 1656 1657 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1658 int MaxLevel, int Level = 0) { 1659 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1660 const Stmt *SimplifiedS = S->IgnoreContainers(); 1661 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1662 PrettyStackTraceLoc CrashInfo( 1663 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1664 "LLVM IR generation of compound statement ('{}')"); 1665 1666 // Keep track of the current cleanup stack depth, including debug scopes. 1667 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1668 for (const Stmt *CurStmt : CS->body()) 1669 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1670 return; 1671 } 1672 if (SimplifiedS == NextLoop) { 1673 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1674 S = For->getBody(); 1675 } else { 1676 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1677 "Expected canonical for loop or range-based for loop."); 1678 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1679 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1680 S = CXXFor->getBody(); 1681 } 1682 if (Level + 1 < MaxLevel) { 1683 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1684 S, /*TryImperfectlyNestedLoops=*/true); 1685 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1686 return; 1687 } 1688 } 1689 CGF.EmitStmt(S); 1690 } 1691 1692 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1693 JumpDest LoopExit) { 1694 RunCleanupsScope BodyScope(*this); 1695 // Update counters values on current iteration. 1696 for (const Expr *UE : D.updates()) 1697 EmitIgnoredExpr(UE); 1698 // Update the linear variables. 1699 // In distribute directives only loop counters may be marked as linear, no 1700 // need to generate the code for them. 1701 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1702 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1703 for (const Expr *UE : C->updates()) 1704 EmitIgnoredExpr(UE); 1705 } 1706 } 1707 1708 // On a continue in the body, jump to the end. 1709 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1710 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1711 for (const Expr *E : D.finals_conditions()) { 1712 if (!E) 1713 continue; 1714 // Check that loop counter in non-rectangular nest fits into the iteration 1715 // space. 1716 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1717 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1718 getProfileCount(D.getBody())); 1719 EmitBlock(NextBB); 1720 } 1721 1722 OMPPrivateScope InscanScope(*this); 1723 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1724 bool IsInscanRegion = InscanScope.Privatize(); 1725 if (IsInscanRegion) { 1726 // Need to remember the block before and after scan directive 1727 // to dispatch them correctly depending on the clause used in 1728 // this directive, inclusive or exclusive. For inclusive scan the natural 1729 // order of the blocks is used, for exclusive clause the blocks must be 1730 // executed in reverse order. 1731 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1732 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1733 // No need to allocate inscan exit block, in simd mode it is selected in the 1734 // codegen for the scan directive. 1735 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1736 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1737 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1738 EmitBranch(OMPScanDispatch); 1739 EmitBlock(OMPBeforeScanBlock); 1740 } 1741 1742 // Emit loop variables for C++ range loops. 1743 const Stmt *Body = 1744 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1745 // Emit loop body. 1746 emitBody(*this, Body, 1747 OMPLoopDirective::tryToFindNextInnerLoop( 1748 Body, /*TryImperfectlyNestedLoops=*/true), 1749 D.getCollapsedNumber()); 1750 1751 // Jump to the dispatcher at the end of the loop body. 1752 if (IsInscanRegion) 1753 EmitBranch(OMPScanExitBlock); 1754 1755 // The end (updates/cleanups). 1756 EmitBlock(Continue.getBlock()); 1757 BreakContinueStack.pop_back(); 1758 } 1759 1760 void CodeGenFunction::EmitOMPInnerLoop( 1761 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 1762 const Expr *IncExpr, 1763 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1764 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1765 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1766 1767 // Start the loop with a block that tests the condition. 1768 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1769 EmitBlock(CondBlock); 1770 const SourceRange R = S.getSourceRange(); 1771 1772 // If attributes are attached, push to the basic block with them. 1773 const auto &OMPED = cast<OMPExecutableDirective>(S); 1774 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 1775 const Stmt *SS = ICS->getCapturedStmt(); 1776 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 1777 if (AS) 1778 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 1779 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 1780 SourceLocToDebugLoc(R.getEnd())); 1781 else 1782 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1783 SourceLocToDebugLoc(R.getEnd())); 1784 1785 // If there are any cleanups between here and the loop-exit scope, 1786 // create a block to stage a loop exit along. 1787 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1788 if (RequiresCleanup) 1789 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1790 1791 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1792 1793 // Emit condition. 1794 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1795 if (ExitBlock != LoopExit.getBlock()) { 1796 EmitBlock(ExitBlock); 1797 EmitBranchThroughCleanup(LoopExit); 1798 } 1799 1800 EmitBlock(LoopBody); 1801 incrementProfileCounter(&S); 1802 1803 // Create a block for the increment. 1804 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1805 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1806 1807 BodyGen(*this); 1808 1809 // Emit "IV = IV + 1" and a back-edge to the condition block. 1810 EmitBlock(Continue.getBlock()); 1811 EmitIgnoredExpr(IncExpr); 1812 PostIncGen(*this); 1813 BreakContinueStack.pop_back(); 1814 EmitBranch(CondBlock); 1815 LoopStack.pop(); 1816 // Emit the fall-through block. 1817 EmitBlock(LoopExit.getBlock()); 1818 } 1819 1820 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1821 if (!HaveInsertPoint()) 1822 return false; 1823 // Emit inits for the linear variables. 1824 bool HasLinears = false; 1825 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1826 for (const Expr *Init : C->inits()) { 1827 HasLinears = true; 1828 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1829 if (const auto *Ref = 1830 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1831 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1832 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1833 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1834 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1835 VD->getInit()->getType(), VK_LValue, 1836 VD->getInit()->getExprLoc()); 1837 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1838 VD->getType()), 1839 /*capturedByInit=*/false); 1840 EmitAutoVarCleanups(Emission); 1841 } else { 1842 EmitVarDecl(*VD); 1843 } 1844 } 1845 // Emit the linear steps for the linear clauses. 1846 // If a step is not constant, it is pre-calculated before the loop. 1847 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1848 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1849 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1850 // Emit calculation of the linear step. 1851 EmitIgnoredExpr(CS); 1852 } 1853 } 1854 return HasLinears; 1855 } 1856 1857 void CodeGenFunction::EmitOMPLinearClauseFinal( 1858 const OMPLoopDirective &D, 1859 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1860 if (!HaveInsertPoint()) 1861 return; 1862 llvm::BasicBlock *DoneBB = nullptr; 1863 // Emit the final values of the linear variables. 1864 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1865 auto IC = C->varlist_begin(); 1866 for (const Expr *F : C->finals()) { 1867 if (!DoneBB) { 1868 if (llvm::Value *Cond = CondGen(*this)) { 1869 // If the first post-update expression is found, emit conditional 1870 // block if it was requested. 1871 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1872 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1873 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1874 EmitBlock(ThenBB); 1875 } 1876 } 1877 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1878 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1879 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1880 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1881 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1882 CodeGenFunction::OMPPrivateScope VarScope(*this); 1883 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1884 (void)VarScope.Privatize(); 1885 EmitIgnoredExpr(F); 1886 ++IC; 1887 } 1888 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1889 EmitIgnoredExpr(PostUpdate); 1890 } 1891 if (DoneBB) 1892 EmitBlock(DoneBB, /*IsFinished=*/true); 1893 } 1894 1895 static void emitAlignedClause(CodeGenFunction &CGF, 1896 const OMPExecutableDirective &D) { 1897 if (!CGF.HaveInsertPoint()) 1898 return; 1899 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1900 llvm::APInt ClauseAlignment(64, 0); 1901 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1902 auto *AlignmentCI = 1903 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1904 ClauseAlignment = AlignmentCI->getValue(); 1905 } 1906 for (const Expr *E : Clause->varlists()) { 1907 llvm::APInt Alignment(ClauseAlignment); 1908 if (Alignment == 0) { 1909 // OpenMP [2.8.1, Description] 1910 // If no optional parameter is specified, implementation-defined default 1911 // alignments for SIMD instructions on the target platforms are assumed. 1912 Alignment = 1913 CGF.getContext() 1914 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1915 E->getType()->getPointeeType())) 1916 .getQuantity(); 1917 } 1918 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1919 "alignment is not power of 2"); 1920 if (Alignment != 0) { 1921 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1922 CGF.emitAlignmentAssumption( 1923 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1924 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1925 } 1926 } 1927 } 1928 } 1929 1930 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1931 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1932 if (!HaveInsertPoint()) 1933 return; 1934 auto I = S.private_counters().begin(); 1935 for (const Expr *E : S.counters()) { 1936 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1937 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1938 // Emit var without initialization. 1939 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1940 EmitAutoVarCleanups(VarEmission); 1941 LocalDeclMap.erase(PrivateVD); 1942 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1943 return VarEmission.getAllocatedAddress(); 1944 }); 1945 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1946 VD->hasGlobalStorage()) { 1947 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1948 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1949 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1950 E->getType(), VK_LValue, E->getExprLoc()); 1951 return EmitLValue(&DRE).getAddress(*this); 1952 }); 1953 } else { 1954 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1955 return VarEmission.getAllocatedAddress(); 1956 }); 1957 } 1958 ++I; 1959 } 1960 // Privatize extra loop counters used in loops for ordered(n) clauses. 1961 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1962 if (!C->getNumForLoops()) 1963 continue; 1964 for (unsigned I = S.getCollapsedNumber(), 1965 E = C->getLoopNumIterations().size(); 1966 I < E; ++I) { 1967 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1968 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1969 // Override only those variables that can be captured to avoid re-emission 1970 // of the variables declared within the loops. 1971 if (DRE->refersToEnclosingVariableOrCapture()) { 1972 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1973 return CreateMemTemp(DRE->getType(), VD->getName()); 1974 }); 1975 } 1976 } 1977 } 1978 } 1979 1980 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1981 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1982 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1983 if (!CGF.HaveInsertPoint()) 1984 return; 1985 { 1986 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1987 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1988 (void)PreCondScope.Privatize(); 1989 // Get initial values of real counters. 1990 for (const Expr *I : S.inits()) { 1991 CGF.EmitIgnoredExpr(I); 1992 } 1993 } 1994 // Create temp loop control variables with their init values to support 1995 // non-rectangular loops. 1996 CodeGenFunction::OMPMapVars PreCondVars; 1997 for (const Expr * E: S.dependent_counters()) { 1998 if (!E) 1999 continue; 2000 assert(!E->getType().getNonReferenceType()->isRecordType() && 2001 "dependent counter must not be an iterator."); 2002 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2003 Address CounterAddr = 2004 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2005 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2006 } 2007 (void)PreCondVars.apply(CGF); 2008 for (const Expr *E : S.dependent_inits()) { 2009 if (!E) 2010 continue; 2011 CGF.EmitIgnoredExpr(E); 2012 } 2013 // Check that loop is executed at least one time. 2014 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2015 PreCondVars.restore(CGF); 2016 } 2017 2018 void CodeGenFunction::EmitOMPLinearClause( 2019 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2020 if (!HaveInsertPoint()) 2021 return; 2022 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2023 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2024 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2025 for (const Expr *C : LoopDirective->counters()) { 2026 SIMDLCVs.insert( 2027 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2028 } 2029 } 2030 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2031 auto CurPrivate = C->privates().begin(); 2032 for (const Expr *E : C->varlists()) { 2033 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2034 const auto *PrivateVD = 2035 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2036 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2037 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2038 // Emit private VarDecl with copy init. 2039 EmitVarDecl(*PrivateVD); 2040 return GetAddrOfLocalVar(PrivateVD); 2041 }); 2042 assert(IsRegistered && "linear var already registered as private"); 2043 // Silence the warning about unused variable. 2044 (void)IsRegistered; 2045 } else { 2046 EmitVarDecl(*PrivateVD); 2047 } 2048 ++CurPrivate; 2049 } 2050 } 2051 } 2052 2053 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2054 const OMPExecutableDirective &D, 2055 bool IsMonotonic) { 2056 if (!CGF.HaveInsertPoint()) 2057 return; 2058 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2059 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2060 /*ignoreResult=*/true); 2061 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2062 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2063 // In presence of finite 'safelen', it may be unsafe to mark all 2064 // the memory instructions parallel, because loop-carried 2065 // dependences of 'safelen' iterations are possible. 2066 if (!IsMonotonic) 2067 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2068 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2069 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2070 /*ignoreResult=*/true); 2071 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2072 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2073 // In presence of finite 'safelen', it may be unsafe to mark all 2074 // the memory instructions parallel, because loop-carried 2075 // dependences of 'safelen' iterations are possible. 2076 CGF.LoopStack.setParallel(/*Enable=*/false); 2077 } 2078 } 2079 2080 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2081 bool IsMonotonic) { 2082 // Walk clauses and process safelen/lastprivate. 2083 LoopStack.setParallel(!IsMonotonic); 2084 LoopStack.setVectorizeEnable(); 2085 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2086 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2087 if (C->getKind() == OMPC_ORDER_concurrent) 2088 LoopStack.setParallel(/*Enable=*/true); 2089 if ((D.getDirectiveKind() == OMPD_simd || 2090 (getLangOpts().OpenMPSimd && 2091 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2092 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2093 [](const OMPReductionClause *C) { 2094 return C->getModifier() == OMPC_REDUCTION_inscan; 2095 })) 2096 // Disable parallel access in case of prefix sum. 2097 LoopStack.setParallel(/*Enable=*/false); 2098 } 2099 2100 void CodeGenFunction::EmitOMPSimdFinal( 2101 const OMPLoopDirective &D, 2102 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2103 if (!HaveInsertPoint()) 2104 return; 2105 llvm::BasicBlock *DoneBB = nullptr; 2106 auto IC = D.counters().begin(); 2107 auto IPC = D.private_counters().begin(); 2108 for (const Expr *F : D.finals()) { 2109 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2110 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2111 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2112 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2113 OrigVD->hasGlobalStorage() || CED) { 2114 if (!DoneBB) { 2115 if (llvm::Value *Cond = CondGen(*this)) { 2116 // If the first post-update expression is found, emit conditional 2117 // block if it was requested. 2118 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2119 DoneBB = createBasicBlock(".omp.final.done"); 2120 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2121 EmitBlock(ThenBB); 2122 } 2123 } 2124 Address OrigAddr = Address::invalid(); 2125 if (CED) { 2126 OrigAddr = 2127 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2128 } else { 2129 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2130 /*RefersToEnclosingVariableOrCapture=*/false, 2131 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2132 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2133 } 2134 OMPPrivateScope VarScope(*this); 2135 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2136 (void)VarScope.Privatize(); 2137 EmitIgnoredExpr(F); 2138 } 2139 ++IC; 2140 ++IPC; 2141 } 2142 if (DoneBB) 2143 EmitBlock(DoneBB, /*IsFinished=*/true); 2144 } 2145 2146 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2147 const OMPLoopDirective &S, 2148 CodeGenFunction::JumpDest LoopExit) { 2149 CGF.EmitOMPLoopBody(S, LoopExit); 2150 CGF.EmitStopPoint(&S); 2151 } 2152 2153 /// Emit a helper variable and return corresponding lvalue. 2154 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2155 const DeclRefExpr *Helper) { 2156 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2157 CGF.EmitVarDecl(*VDecl); 2158 return CGF.EmitLValue(Helper); 2159 } 2160 2161 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2162 const RegionCodeGenTy &SimdInitGen, 2163 const RegionCodeGenTy &BodyCodeGen) { 2164 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2165 PrePostActionTy &) { 2166 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2167 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2168 SimdInitGen(CGF); 2169 2170 BodyCodeGen(CGF); 2171 }; 2172 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2173 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2174 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2175 2176 BodyCodeGen(CGF); 2177 }; 2178 const Expr *IfCond = nullptr; 2179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2180 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2181 if (CGF.getLangOpts().OpenMP >= 50 && 2182 (C->getNameModifier() == OMPD_unknown || 2183 C->getNameModifier() == OMPD_simd)) { 2184 IfCond = C->getCondition(); 2185 break; 2186 } 2187 } 2188 } 2189 if (IfCond) { 2190 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2191 } else { 2192 RegionCodeGenTy ThenRCG(ThenGen); 2193 ThenRCG(CGF); 2194 } 2195 } 2196 2197 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2198 PrePostActionTy &Action) { 2199 Action.Enter(CGF); 2200 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2201 "Expected simd directive"); 2202 OMPLoopScope PreInitScope(CGF, S); 2203 // if (PreCond) { 2204 // for (IV in 0..LastIteration) BODY; 2205 // <Final counter/linear vars updates>; 2206 // } 2207 // 2208 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2209 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2210 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2211 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2212 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2213 } 2214 2215 // Emit: if (PreCond) - begin. 2216 // If the condition constant folds and can be elided, avoid emitting the 2217 // whole loop. 2218 bool CondConstant; 2219 llvm::BasicBlock *ContBlock = nullptr; 2220 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2221 if (!CondConstant) 2222 return; 2223 } else { 2224 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2225 ContBlock = CGF.createBasicBlock("simd.if.end"); 2226 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2227 CGF.getProfileCount(&S)); 2228 CGF.EmitBlock(ThenBlock); 2229 CGF.incrementProfileCounter(&S); 2230 } 2231 2232 // Emit the loop iteration variable. 2233 const Expr *IVExpr = S.getIterationVariable(); 2234 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2235 CGF.EmitVarDecl(*IVDecl); 2236 CGF.EmitIgnoredExpr(S.getInit()); 2237 2238 // Emit the iterations count variable. 2239 // If it is not a variable, Sema decided to calculate iterations count on 2240 // each iteration (e.g., it is foldable into a constant). 2241 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2242 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2243 // Emit calculation of the iterations count. 2244 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2245 } 2246 2247 emitAlignedClause(CGF, S); 2248 (void)CGF.EmitOMPLinearClauseInit(S); 2249 { 2250 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2251 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2252 CGF.EmitOMPLinearClause(S, LoopScope); 2253 CGF.EmitOMPPrivateClause(S, LoopScope); 2254 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2255 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2256 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2257 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2258 (void)LoopScope.Privatize(); 2259 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2260 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2261 2262 emitCommonSimdLoop( 2263 CGF, S, 2264 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2265 CGF.EmitOMPSimdInit(S); 2266 }, 2267 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2268 CGF.EmitOMPInnerLoop( 2269 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2270 [&S](CodeGenFunction &CGF) { 2271 emitOMPLoopBodyWithStopPoint(CGF, S, 2272 CodeGenFunction::JumpDest()); 2273 }, 2274 [](CodeGenFunction &) {}); 2275 }); 2276 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2277 // Emit final copy of the lastprivate variables at the end of loops. 2278 if (HasLastprivateClause) 2279 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2280 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2281 emitPostUpdateForReductionClause(CGF, S, 2282 [](CodeGenFunction &) { return nullptr; }); 2283 } 2284 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2285 // Emit: if (PreCond) - end. 2286 if (ContBlock) { 2287 CGF.EmitBranch(ContBlock); 2288 CGF.EmitBlock(ContBlock, true); 2289 } 2290 } 2291 2292 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2293 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2294 OMPFirstScanLoop = true; 2295 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2296 emitOMPSimdRegion(CGF, S, Action); 2297 }; 2298 { 2299 auto LPCRegion = 2300 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2301 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2302 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2303 } 2304 // Check for outer lastprivate conditional update. 2305 checkForLastprivateConditionalUpdate(*this, S); 2306 } 2307 2308 void CodeGenFunction::EmitOMPOuterLoop( 2309 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2310 CodeGenFunction::OMPPrivateScope &LoopScope, 2311 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2312 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2313 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2314 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2315 2316 const Expr *IVExpr = S.getIterationVariable(); 2317 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2318 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2319 2320 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2321 2322 // Start the loop with a block that tests the condition. 2323 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2324 EmitBlock(CondBlock); 2325 const SourceRange R = S.getSourceRange(); 2326 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2327 SourceLocToDebugLoc(R.getEnd())); 2328 2329 llvm::Value *BoolCondVal = nullptr; 2330 if (!DynamicOrOrdered) { 2331 // UB = min(UB, GlobalUB) or 2332 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2333 // 'distribute parallel for') 2334 EmitIgnoredExpr(LoopArgs.EUB); 2335 // IV = LB 2336 EmitIgnoredExpr(LoopArgs.Init); 2337 // IV < UB 2338 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2339 } else { 2340 BoolCondVal = 2341 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2342 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2343 } 2344 2345 // If there are any cleanups between here and the loop-exit scope, 2346 // create a block to stage a loop exit along. 2347 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2348 if (LoopScope.requiresCleanups()) 2349 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2350 2351 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2352 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2353 if (ExitBlock != LoopExit.getBlock()) { 2354 EmitBlock(ExitBlock); 2355 EmitBranchThroughCleanup(LoopExit); 2356 } 2357 EmitBlock(LoopBody); 2358 2359 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2360 // LB for loop condition and emitted it above). 2361 if (DynamicOrOrdered) 2362 EmitIgnoredExpr(LoopArgs.Init); 2363 2364 // Create a block for the increment. 2365 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2366 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2367 2368 emitCommonSimdLoop( 2369 *this, S, 2370 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2371 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2372 // with dynamic/guided scheduling and without ordered clause. 2373 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2374 CGF.LoopStack.setParallel(!IsMonotonic); 2375 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2376 if (C->getKind() == OMPC_ORDER_concurrent) 2377 CGF.LoopStack.setParallel(/*Enable=*/true); 2378 } else { 2379 CGF.EmitOMPSimdInit(S, IsMonotonic); 2380 } 2381 }, 2382 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2383 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2384 SourceLocation Loc = S.getBeginLoc(); 2385 // when 'distribute' is not combined with a 'for': 2386 // while (idx <= UB) { BODY; ++idx; } 2387 // when 'distribute' is combined with a 'for' 2388 // (e.g. 'distribute parallel for') 2389 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2390 CGF.EmitOMPInnerLoop( 2391 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2392 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2393 CodeGenLoop(CGF, S, LoopExit); 2394 }, 2395 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2396 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2397 }); 2398 }); 2399 2400 EmitBlock(Continue.getBlock()); 2401 BreakContinueStack.pop_back(); 2402 if (!DynamicOrOrdered) { 2403 // Emit "LB = LB + Stride", "UB = UB + Stride". 2404 EmitIgnoredExpr(LoopArgs.NextLB); 2405 EmitIgnoredExpr(LoopArgs.NextUB); 2406 } 2407 2408 EmitBranch(CondBlock); 2409 LoopStack.pop(); 2410 // Emit the fall-through block. 2411 EmitBlock(LoopExit.getBlock()); 2412 2413 // Tell the runtime we are done. 2414 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2415 if (!DynamicOrOrdered) 2416 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2417 S.getDirectiveKind()); 2418 }; 2419 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2420 } 2421 2422 void CodeGenFunction::EmitOMPForOuterLoop( 2423 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2424 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2425 const OMPLoopArguments &LoopArgs, 2426 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2427 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2428 2429 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2430 const bool DynamicOrOrdered = 2431 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2432 2433 assert((Ordered || 2434 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2435 LoopArgs.Chunk != nullptr)) && 2436 "static non-chunked schedule does not need outer loop"); 2437 2438 // Emit outer loop. 2439 // 2440 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2441 // When schedule(dynamic,chunk_size) is specified, the iterations are 2442 // distributed to threads in the team in chunks as the threads request them. 2443 // Each thread executes a chunk of iterations, then requests another chunk, 2444 // until no chunks remain to be distributed. Each chunk contains chunk_size 2445 // iterations, except for the last chunk to be distributed, which may have 2446 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2447 // 2448 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2449 // to threads in the team in chunks as the executing threads request them. 2450 // Each thread executes a chunk of iterations, then requests another chunk, 2451 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2452 // each chunk is proportional to the number of unassigned iterations divided 2453 // by the number of threads in the team, decreasing to 1. For a chunk_size 2454 // with value k (greater than 1), the size of each chunk is determined in the 2455 // same way, with the restriction that the chunks do not contain fewer than k 2456 // iterations (except for the last chunk to be assigned, which may have fewer 2457 // than k iterations). 2458 // 2459 // When schedule(auto) is specified, the decision regarding scheduling is 2460 // delegated to the compiler and/or runtime system. The programmer gives the 2461 // implementation the freedom to choose any possible mapping of iterations to 2462 // threads in the team. 2463 // 2464 // When schedule(runtime) is specified, the decision regarding scheduling is 2465 // deferred until run time, and the schedule and chunk size are taken from the 2466 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2467 // implementation defined 2468 // 2469 // while(__kmpc_dispatch_next(&LB, &UB)) { 2470 // idx = LB; 2471 // while (idx <= UB) { BODY; ++idx; 2472 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2473 // } // inner loop 2474 // } 2475 // 2476 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2477 // When schedule(static, chunk_size) is specified, iterations are divided into 2478 // chunks of size chunk_size, and the chunks are assigned to the threads in 2479 // the team in a round-robin fashion in the order of the thread number. 2480 // 2481 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2482 // while (idx <= UB) { BODY; ++idx; } // inner loop 2483 // LB = LB + ST; 2484 // UB = UB + ST; 2485 // } 2486 // 2487 2488 const Expr *IVExpr = S.getIterationVariable(); 2489 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2490 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2491 2492 if (DynamicOrOrdered) { 2493 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2494 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2495 llvm::Value *LBVal = DispatchBounds.first; 2496 llvm::Value *UBVal = DispatchBounds.second; 2497 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2498 LoopArgs.Chunk}; 2499 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2500 IVSigned, Ordered, DipatchRTInputValues); 2501 } else { 2502 CGOpenMPRuntime::StaticRTInput StaticInit( 2503 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2504 LoopArgs.ST, LoopArgs.Chunk); 2505 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2506 ScheduleKind, StaticInit); 2507 } 2508 2509 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2510 const unsigned IVSize, 2511 const bool IVSigned) { 2512 if (Ordered) { 2513 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2514 IVSigned); 2515 } 2516 }; 2517 2518 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2519 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2520 OuterLoopArgs.IncExpr = S.getInc(); 2521 OuterLoopArgs.Init = S.getInit(); 2522 OuterLoopArgs.Cond = S.getCond(); 2523 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2524 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2525 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2526 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2527 } 2528 2529 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2530 const unsigned IVSize, const bool IVSigned) {} 2531 2532 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2533 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2534 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2535 const CodeGenLoopTy &CodeGenLoopContent) { 2536 2537 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2538 2539 // Emit outer loop. 2540 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2541 // dynamic 2542 // 2543 2544 const Expr *IVExpr = S.getIterationVariable(); 2545 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2546 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2547 2548 CGOpenMPRuntime::StaticRTInput StaticInit( 2549 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2550 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2551 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2552 2553 // for combined 'distribute' and 'for' the increment expression of distribute 2554 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2555 Expr *IncExpr; 2556 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2557 IncExpr = S.getDistInc(); 2558 else 2559 IncExpr = S.getInc(); 2560 2561 // this routine is shared by 'omp distribute parallel for' and 2562 // 'omp distribute': select the right EUB expression depending on the 2563 // directive 2564 OMPLoopArguments OuterLoopArgs; 2565 OuterLoopArgs.LB = LoopArgs.LB; 2566 OuterLoopArgs.UB = LoopArgs.UB; 2567 OuterLoopArgs.ST = LoopArgs.ST; 2568 OuterLoopArgs.IL = LoopArgs.IL; 2569 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2570 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2571 ? S.getCombinedEnsureUpperBound() 2572 : S.getEnsureUpperBound(); 2573 OuterLoopArgs.IncExpr = IncExpr; 2574 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2575 ? S.getCombinedInit() 2576 : S.getInit(); 2577 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2578 ? S.getCombinedCond() 2579 : S.getCond(); 2580 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2581 ? S.getCombinedNextLowerBound() 2582 : S.getNextLowerBound(); 2583 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2584 ? S.getCombinedNextUpperBound() 2585 : S.getNextUpperBound(); 2586 2587 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2588 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2589 emitEmptyOrdered); 2590 } 2591 2592 static std::pair<LValue, LValue> 2593 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2594 const OMPExecutableDirective &S) { 2595 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2596 LValue LB = 2597 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2598 LValue UB = 2599 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2600 2601 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2602 // parallel for') we need to use the 'distribute' 2603 // chunk lower and upper bounds rather than the whole loop iteration 2604 // space. These are parameters to the outlined function for 'parallel' 2605 // and we copy the bounds of the previous schedule into the 2606 // the current ones. 2607 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2608 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2609 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2610 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2611 PrevLBVal = CGF.EmitScalarConversion( 2612 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2613 LS.getIterationVariable()->getType(), 2614 LS.getPrevLowerBoundVariable()->getExprLoc()); 2615 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2616 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2617 PrevUBVal = CGF.EmitScalarConversion( 2618 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2619 LS.getIterationVariable()->getType(), 2620 LS.getPrevUpperBoundVariable()->getExprLoc()); 2621 2622 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2623 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2624 2625 return {LB, UB}; 2626 } 2627 2628 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2629 /// we need to use the LB and UB expressions generated by the worksharing 2630 /// code generation support, whereas in non combined situations we would 2631 /// just emit 0 and the LastIteration expression 2632 /// This function is necessary due to the difference of the LB and UB 2633 /// types for the RT emission routines for 'for_static_init' and 2634 /// 'for_dispatch_init' 2635 static std::pair<llvm::Value *, llvm::Value *> 2636 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2637 const OMPExecutableDirective &S, 2638 Address LB, Address UB) { 2639 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2640 const Expr *IVExpr = LS.getIterationVariable(); 2641 // when implementing a dynamic schedule for a 'for' combined with a 2642 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2643 // is not normalized as each team only executes its own assigned 2644 // distribute chunk 2645 QualType IteratorTy = IVExpr->getType(); 2646 llvm::Value *LBVal = 2647 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2648 llvm::Value *UBVal = 2649 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2650 return {LBVal, UBVal}; 2651 } 2652 2653 static void emitDistributeParallelForDistributeInnerBoundParams( 2654 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2655 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2656 const auto &Dir = cast<OMPLoopDirective>(S); 2657 LValue LB = 2658 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2659 llvm::Value *LBCast = 2660 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2661 CGF.SizeTy, /*isSigned=*/false); 2662 CapturedVars.push_back(LBCast); 2663 LValue UB = 2664 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2665 2666 llvm::Value *UBCast = 2667 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2668 CGF.SizeTy, /*isSigned=*/false); 2669 CapturedVars.push_back(UBCast); 2670 } 2671 2672 static void 2673 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2674 const OMPLoopDirective &S, 2675 CodeGenFunction::JumpDest LoopExit) { 2676 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2677 PrePostActionTy &Action) { 2678 Action.Enter(CGF); 2679 bool HasCancel = false; 2680 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2681 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2682 HasCancel = D->hasCancel(); 2683 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2684 HasCancel = D->hasCancel(); 2685 else if (const auto *D = 2686 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2687 HasCancel = D->hasCancel(); 2688 } 2689 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2690 HasCancel); 2691 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2692 emitDistributeParallelForInnerBounds, 2693 emitDistributeParallelForDispatchBounds); 2694 }; 2695 2696 emitCommonOMPParallelDirective( 2697 CGF, S, 2698 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2699 CGInlinedWorksharingLoop, 2700 emitDistributeParallelForDistributeInnerBoundParams); 2701 } 2702 2703 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2704 const OMPDistributeParallelForDirective &S) { 2705 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2706 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2707 S.getDistInc()); 2708 }; 2709 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2710 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2711 } 2712 2713 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2714 const OMPDistributeParallelForSimdDirective &S) { 2715 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2716 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2717 S.getDistInc()); 2718 }; 2719 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2720 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2721 } 2722 2723 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2724 const OMPDistributeSimdDirective &S) { 2725 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2726 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2727 }; 2728 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2729 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2730 } 2731 2732 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2733 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2734 // Emit SPMD target parallel for region as a standalone region. 2735 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2736 emitOMPSimdRegion(CGF, S, Action); 2737 }; 2738 llvm::Function *Fn; 2739 llvm::Constant *Addr; 2740 // Emit target region as a standalone region. 2741 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2742 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2743 assert(Fn && Addr && "Target device function emission failed."); 2744 } 2745 2746 void CodeGenFunction::EmitOMPTargetSimdDirective( 2747 const OMPTargetSimdDirective &S) { 2748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2749 emitOMPSimdRegion(CGF, S, Action); 2750 }; 2751 emitCommonOMPTargetDirective(*this, S, CodeGen); 2752 } 2753 2754 namespace { 2755 struct ScheduleKindModifiersTy { 2756 OpenMPScheduleClauseKind Kind; 2757 OpenMPScheduleClauseModifier M1; 2758 OpenMPScheduleClauseModifier M2; 2759 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2760 OpenMPScheduleClauseModifier M1, 2761 OpenMPScheduleClauseModifier M2) 2762 : Kind(Kind), M1(M1), M2(M2) {} 2763 }; 2764 } // namespace 2765 2766 bool CodeGenFunction::EmitOMPWorksharingLoop( 2767 const OMPLoopDirective &S, Expr *EUB, 2768 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2769 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2770 // Emit the loop iteration variable. 2771 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2772 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2773 EmitVarDecl(*IVDecl); 2774 2775 // Emit the iterations count variable. 2776 // If it is not a variable, Sema decided to calculate iterations count on each 2777 // iteration (e.g., it is foldable into a constant). 2778 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2779 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2780 // Emit calculation of the iterations count. 2781 EmitIgnoredExpr(S.getCalcLastIteration()); 2782 } 2783 2784 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2785 2786 bool HasLastprivateClause; 2787 // Check pre-condition. 2788 { 2789 OMPLoopScope PreInitScope(*this, S); 2790 // Skip the entire loop if we don't meet the precondition. 2791 // If the condition constant folds and can be elided, avoid emitting the 2792 // whole loop. 2793 bool CondConstant; 2794 llvm::BasicBlock *ContBlock = nullptr; 2795 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2796 if (!CondConstant) 2797 return false; 2798 } else { 2799 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2800 ContBlock = createBasicBlock("omp.precond.end"); 2801 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2802 getProfileCount(&S)); 2803 EmitBlock(ThenBlock); 2804 incrementProfileCounter(&S); 2805 } 2806 2807 RunCleanupsScope DoacrossCleanupScope(*this); 2808 bool Ordered = false; 2809 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2810 if (OrderedClause->getNumForLoops()) 2811 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2812 else 2813 Ordered = true; 2814 } 2815 2816 llvm::DenseSet<const Expr *> EmittedFinals; 2817 emitAlignedClause(*this, S); 2818 bool HasLinears = EmitOMPLinearClauseInit(S); 2819 // Emit helper vars inits. 2820 2821 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2822 LValue LB = Bounds.first; 2823 LValue UB = Bounds.second; 2824 LValue ST = 2825 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2826 LValue IL = 2827 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2828 2829 // Emit 'then' code. 2830 { 2831 OMPPrivateScope LoopScope(*this); 2832 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2833 // Emit implicit barrier to synchronize threads and avoid data races on 2834 // initialization of firstprivate variables and post-update of 2835 // lastprivate variables. 2836 CGM.getOpenMPRuntime().emitBarrierCall( 2837 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2838 /*ForceSimpleCall=*/true); 2839 } 2840 EmitOMPPrivateClause(S, LoopScope); 2841 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2842 *this, S, EmitLValue(S.getIterationVariable())); 2843 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2844 EmitOMPReductionClauseInit(S, LoopScope); 2845 EmitOMPPrivateLoopCounters(S, LoopScope); 2846 EmitOMPLinearClause(S, LoopScope); 2847 (void)LoopScope.Privatize(); 2848 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2849 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2850 2851 // Detect the loop schedule kind and chunk. 2852 const Expr *ChunkExpr = nullptr; 2853 OpenMPScheduleTy ScheduleKind; 2854 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2855 ScheduleKind.Schedule = C->getScheduleKind(); 2856 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2857 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2858 ChunkExpr = C->getChunkSize(); 2859 } else { 2860 // Default behaviour for schedule clause. 2861 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2862 *this, S, ScheduleKind.Schedule, ChunkExpr); 2863 } 2864 bool HasChunkSizeOne = false; 2865 llvm::Value *Chunk = nullptr; 2866 if (ChunkExpr) { 2867 Chunk = EmitScalarExpr(ChunkExpr); 2868 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2869 S.getIterationVariable()->getType(), 2870 S.getBeginLoc()); 2871 Expr::EvalResult Result; 2872 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2873 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2874 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2875 } 2876 } 2877 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2878 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2879 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2880 // If the static schedule kind is specified or if the ordered clause is 2881 // specified, and if no monotonic modifier is specified, the effect will 2882 // be as if the monotonic modifier was specified. 2883 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2884 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2885 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2886 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2887 /* Chunked */ Chunk != nullptr) || 2888 StaticChunkedOne) && 2889 !Ordered) { 2890 JumpDest LoopExit = 2891 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2892 emitCommonSimdLoop( 2893 *this, S, 2894 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2895 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2896 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2897 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2898 if (C->getKind() == OMPC_ORDER_concurrent) 2899 CGF.LoopStack.setParallel(/*Enable=*/true); 2900 } 2901 }, 2902 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2903 &S, ScheduleKind, LoopExit, 2904 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2905 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2906 // When no chunk_size is specified, the iteration space is divided 2907 // into chunks that are approximately equal in size, and at most 2908 // one chunk is distributed to each thread. Note that the size of 2909 // the chunks is unspecified in this case. 2910 CGOpenMPRuntime::StaticRTInput StaticInit( 2911 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2912 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2913 StaticChunkedOne ? Chunk : nullptr); 2914 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2915 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2916 StaticInit); 2917 // UB = min(UB, GlobalUB); 2918 if (!StaticChunkedOne) 2919 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2920 // IV = LB; 2921 CGF.EmitIgnoredExpr(S.getInit()); 2922 // For unchunked static schedule generate: 2923 // 2924 // while (idx <= UB) { 2925 // BODY; 2926 // ++idx; 2927 // } 2928 // 2929 // For static schedule with chunk one: 2930 // 2931 // while (IV <= PrevUB) { 2932 // BODY; 2933 // IV += ST; 2934 // } 2935 CGF.EmitOMPInnerLoop( 2936 S, LoopScope.requiresCleanups(), 2937 StaticChunkedOne ? S.getCombinedParForInDistCond() 2938 : S.getCond(), 2939 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2940 [&S, LoopExit](CodeGenFunction &CGF) { 2941 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 2942 }, 2943 [](CodeGenFunction &) {}); 2944 }); 2945 EmitBlock(LoopExit.getBlock()); 2946 // Tell the runtime we are done. 2947 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2948 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2949 S.getDirectiveKind()); 2950 }; 2951 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2952 } else { 2953 const bool IsMonotonic = 2954 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2955 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2956 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2957 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2958 // Emit the outer loop, which requests its work chunk [LB..UB] from 2959 // runtime and runs the inner loop to process it. 2960 const OMPLoopArguments LoopArguments( 2961 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2962 IL.getAddress(*this), Chunk, EUB); 2963 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2964 LoopArguments, CGDispatchBounds); 2965 } 2966 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2967 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2968 return CGF.Builder.CreateIsNotNull( 2969 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2970 }); 2971 } 2972 EmitOMPReductionClauseFinal( 2973 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2974 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2975 : /*Parallel only*/ OMPD_parallel); 2976 // Emit post-update of the reduction variables if IsLastIter != 0. 2977 emitPostUpdateForReductionClause( 2978 *this, S, [IL, &S](CodeGenFunction &CGF) { 2979 return CGF.Builder.CreateIsNotNull( 2980 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2981 }); 2982 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2983 if (HasLastprivateClause) 2984 EmitOMPLastprivateClauseFinal( 2985 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2986 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2987 } 2988 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2989 return CGF.Builder.CreateIsNotNull( 2990 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2991 }); 2992 DoacrossCleanupScope.ForceCleanup(); 2993 // We're now done with the loop, so jump to the continuation block. 2994 if (ContBlock) { 2995 EmitBranch(ContBlock); 2996 EmitBlock(ContBlock, /*IsFinished=*/true); 2997 } 2998 } 2999 return HasLastprivateClause; 3000 } 3001 3002 /// The following two functions generate expressions for the loop lower 3003 /// and upper bounds in case of static and dynamic (dispatch) schedule 3004 /// of the associated 'for' or 'distribute' loop. 3005 static std::pair<LValue, LValue> 3006 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3007 const auto &LS = cast<OMPLoopDirective>(S); 3008 LValue LB = 3009 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3010 LValue UB = 3011 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3012 return {LB, UB}; 3013 } 3014 3015 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3016 /// consider the lower and upper bound expressions generated by the 3017 /// worksharing loop support, but we use 0 and the iteration space size as 3018 /// constants 3019 static std::pair<llvm::Value *, llvm::Value *> 3020 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3021 Address LB, Address UB) { 3022 const auto &LS = cast<OMPLoopDirective>(S); 3023 const Expr *IVExpr = LS.getIterationVariable(); 3024 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3025 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3026 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3027 return {LBVal, UBVal}; 3028 } 3029 3030 /// Emits the code for the directive with inscan reductions. 3031 /// The code is the following: 3032 /// \code 3033 /// size num_iters = <num_iters>; 3034 /// <type> buffer[num_iters]; 3035 /// #pragma omp ... 3036 /// for (i: 0..<num_iters>) { 3037 /// <input phase>; 3038 /// buffer[i] = red; 3039 /// } 3040 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3041 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3042 /// buffer[i] op= buffer[i-pow(2,k)]; 3043 /// #pragma omp ... 3044 /// for (0..<num_iters>) { 3045 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3046 /// <scan phase>; 3047 /// } 3048 /// \endcode 3049 static void emitScanBasedDirective( 3050 CodeGenFunction &CGF, const OMPLoopDirective &S, 3051 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3052 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3053 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3054 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3055 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3056 SmallVector<const Expr *, 4> Shareds; 3057 SmallVector<const Expr *, 4> Privates; 3058 SmallVector<const Expr *, 4> ReductionOps; 3059 SmallVector<const Expr *, 4> LHSs; 3060 SmallVector<const Expr *, 4> RHSs; 3061 SmallVector<const Expr *, 4> CopyOps; 3062 SmallVector<const Expr *, 4> CopyArrayTemps; 3063 SmallVector<const Expr *, 4> CopyArrayElems; 3064 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3065 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3066 "Only inscan reductions are expected."); 3067 Shareds.append(C->varlist_begin(), C->varlist_end()); 3068 Privates.append(C->privates().begin(), C->privates().end()); 3069 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3070 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3071 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3072 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 3073 CopyArrayTemps.append(C->copy_array_temps().begin(), 3074 C->copy_array_temps().end()); 3075 CopyArrayElems.append(C->copy_array_elems().begin(), 3076 C->copy_array_elems().end()); 3077 } 3078 { 3079 // Emit buffers for each reduction variables. 3080 // ReductionCodeGen is required to emit correctly the code for array 3081 // reductions. 3082 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3083 unsigned Count = 0; 3084 auto *ITA = CopyArrayTemps.begin(); 3085 for (const Expr *IRef : Privates) { 3086 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3087 // Emit variably modified arrays, used for arrays/array sections 3088 // reductions. 3089 if (PrivateVD->getType()->isVariablyModifiedType()) { 3090 RedCG.emitSharedOrigLValue(CGF, Count); 3091 RedCG.emitAggregateType(CGF, Count); 3092 } 3093 CodeGenFunction::OpaqueValueMapping DimMapping( 3094 CGF, 3095 cast<OpaqueValueExpr>( 3096 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3097 ->getSizeExpr()), 3098 RValue::get(OMPScanNumIterations)); 3099 // Emit temp buffer. 3100 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3101 ++ITA; 3102 ++Count; 3103 } 3104 } 3105 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3106 { 3107 // Emit loop with input phase: 3108 // #pragma omp ... 3109 // for (i: 0..<num_iters>) { 3110 // <input phase>; 3111 // buffer[i] = red; 3112 // } 3113 CGF.OMPFirstScanLoop = true; 3114 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3115 FirstGen(CGF); 3116 } 3117 // Emit prefix reduction: 3118 // for (int k = 0; k <= ceil(log2(n)); ++k) 3119 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3120 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3121 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3122 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3123 llvm::Value *Arg = 3124 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3125 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3126 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3127 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3128 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3129 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3130 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3131 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3132 CGF.EmitBlock(LoopBB); 3133 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3134 // size pow2k = 1; 3135 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3136 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3137 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3138 // for (size i = n - 1; i >= 2 ^ k; --i) 3139 // tmp[i] op= tmp[i-pow2k]; 3140 llvm::BasicBlock *InnerLoopBB = 3141 CGF.createBasicBlock("omp.inner.log.scan.body"); 3142 llvm::BasicBlock *InnerExitBB = 3143 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3144 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3145 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3146 CGF.EmitBlock(InnerLoopBB); 3147 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3148 IVal->addIncoming(NMin1, LoopBB); 3149 { 3150 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3151 auto *ILHS = LHSs.begin(); 3152 auto *IRHS = RHSs.begin(); 3153 for (const Expr *CopyArrayElem : CopyArrayElems) { 3154 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3155 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3156 Address LHSAddr = Address::invalid(); 3157 { 3158 CodeGenFunction::OpaqueValueMapping IdxMapping( 3159 CGF, 3160 cast<OpaqueValueExpr>( 3161 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3162 RValue::get(IVal)); 3163 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3164 } 3165 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3166 Address RHSAddr = Address::invalid(); 3167 { 3168 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3169 CodeGenFunction::OpaqueValueMapping IdxMapping( 3170 CGF, 3171 cast<OpaqueValueExpr>( 3172 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3173 RValue::get(OffsetIVal)); 3174 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3175 } 3176 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3177 ++ILHS; 3178 ++IRHS; 3179 } 3180 PrivScope.Privatize(); 3181 CGF.CGM.getOpenMPRuntime().emitReduction( 3182 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3183 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3184 } 3185 llvm::Value *NextIVal = 3186 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3187 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3188 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3189 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3190 CGF.EmitBlock(InnerExitBB); 3191 llvm::Value *Next = 3192 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3193 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3194 // pow2k <<= 1; 3195 llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3196 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3197 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3198 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3199 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3200 CGF.EmitBlock(ExitBB); 3201 3202 CGF.OMPFirstScanLoop = false; 3203 SecondGen(CGF); 3204 } 3205 3206 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3207 bool HasLastprivates = false; 3208 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3209 PrePostActionTy &) { 3210 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3211 [](const OMPReductionClause *C) { 3212 return C->getModifier() == OMPC_REDUCTION_inscan; 3213 })) { 3214 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3215 OMPLocalDeclMapRAII Scope(CGF); 3216 OMPLoopScope LoopScope(CGF, S); 3217 return CGF.EmitScalarExpr(S.getNumIterations()); 3218 }; 3219 const auto &&FirstGen = [&S](CodeGenFunction &CGF) { 3220 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3221 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3222 emitForLoopBounds, 3223 emitDispatchForLoopBounds); 3224 // Emit an implicit barrier at the end. 3225 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3226 OMPD_for); 3227 }; 3228 const auto &&SecondGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 3229 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3230 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3231 emitForLoopBounds, 3232 emitDispatchForLoopBounds); 3233 }; 3234 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3235 } else { 3236 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3237 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3238 emitForLoopBounds, 3239 emitDispatchForLoopBounds); 3240 } 3241 }; 3242 { 3243 auto LPCRegion = 3244 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3245 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3246 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3247 S.hasCancel()); 3248 } 3249 3250 // Emit an implicit barrier at the end. 3251 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3252 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3253 // Check for outer lastprivate conditional update. 3254 checkForLastprivateConditionalUpdate(*this, S); 3255 } 3256 3257 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3258 bool HasLastprivates = false; 3259 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3260 PrePostActionTy &) { 3261 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3262 [](const OMPReductionClause *C) { 3263 return C->getModifier() == OMPC_REDUCTION_inscan; 3264 })) { 3265 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3266 OMPLocalDeclMapRAII Scope(CGF); 3267 OMPLoopScope LoopScope(CGF, S); 3268 return CGF.EmitScalarExpr(S.getNumIterations()); 3269 }; 3270 const auto &&FirstGen = [&S](CodeGenFunction &CGF) { 3271 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3272 emitForLoopBounds, 3273 emitDispatchForLoopBounds); 3274 // Emit an implicit barrier at the end. 3275 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3276 OMPD_for); 3277 }; 3278 const auto &&SecondGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 3279 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3280 emitForLoopBounds, 3281 emitDispatchForLoopBounds); 3282 }; 3283 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3284 } else { 3285 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3286 emitForLoopBounds, 3287 emitDispatchForLoopBounds); 3288 } 3289 }; 3290 { 3291 auto LPCRegion = 3292 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3293 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3294 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3295 } 3296 3297 // Emit an implicit barrier at the end. 3298 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3299 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3300 // Check for outer lastprivate conditional update. 3301 checkForLastprivateConditionalUpdate(*this, S); 3302 } 3303 3304 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3305 const Twine &Name, 3306 llvm::Value *Init = nullptr) { 3307 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3308 if (Init) 3309 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3310 return LVal; 3311 } 3312 3313 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3314 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3315 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3316 bool HasLastprivates = false; 3317 auto &&CodeGen = [&S, CapturedStmt, CS, 3318 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3319 const ASTContext &C = CGF.getContext(); 3320 QualType KmpInt32Ty = 3321 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3322 // Emit helper vars inits. 3323 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3324 CGF.Builder.getInt32(0)); 3325 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3326 ? CGF.Builder.getInt32(CS->size() - 1) 3327 : CGF.Builder.getInt32(0); 3328 LValue UB = 3329 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3330 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3331 CGF.Builder.getInt32(1)); 3332 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3333 CGF.Builder.getInt32(0)); 3334 // Loop counter. 3335 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3336 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3337 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3338 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3339 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3340 // Generate condition for loop. 3341 BinaryOperator *Cond = BinaryOperator::Create( 3342 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3343 S.getBeginLoc(), FPOptions(C.getLangOpts())); 3344 // Increment for loop counter. 3345 UnaryOperator *Inc = UnaryOperator::Create( 3346 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3347 S.getBeginLoc(), true, FPOptions(C.getLangOpts())); 3348 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3349 // Iterate through all sections and emit a switch construct: 3350 // switch (IV) { 3351 // case 0: 3352 // <SectionStmt[0]>; 3353 // break; 3354 // ... 3355 // case <NumSection> - 1: 3356 // <SectionStmt[<NumSection> - 1]>; 3357 // break; 3358 // } 3359 // .omp.sections.exit: 3360 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3361 llvm::SwitchInst *SwitchStmt = 3362 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3363 ExitBB, CS == nullptr ? 1 : CS->size()); 3364 if (CS) { 3365 unsigned CaseNumber = 0; 3366 for (const Stmt *SubStmt : CS->children()) { 3367 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3368 CGF.EmitBlock(CaseBB); 3369 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3370 CGF.EmitStmt(SubStmt); 3371 CGF.EmitBranch(ExitBB); 3372 ++CaseNumber; 3373 } 3374 } else { 3375 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3376 CGF.EmitBlock(CaseBB); 3377 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3378 CGF.EmitStmt(CapturedStmt); 3379 CGF.EmitBranch(ExitBB); 3380 } 3381 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3382 }; 3383 3384 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3385 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3386 // Emit implicit barrier to synchronize threads and avoid data races on 3387 // initialization of firstprivate variables and post-update of lastprivate 3388 // variables. 3389 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3390 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3391 /*ForceSimpleCall=*/true); 3392 } 3393 CGF.EmitOMPPrivateClause(S, LoopScope); 3394 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3395 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3396 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3397 (void)LoopScope.Privatize(); 3398 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3399 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3400 3401 // Emit static non-chunked loop. 3402 OpenMPScheduleTy ScheduleKind; 3403 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3404 CGOpenMPRuntime::StaticRTInput StaticInit( 3405 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3406 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3407 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3408 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3409 // UB = min(UB, GlobalUB); 3410 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3411 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3412 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3413 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3414 // IV = LB; 3415 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3416 // while (idx <= UB) { BODY; ++idx; } 3417 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3418 [](CodeGenFunction &) {}); 3419 // Tell the runtime we are done. 3420 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3421 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3422 S.getDirectiveKind()); 3423 }; 3424 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3425 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3426 // Emit post-update of the reduction variables if IsLastIter != 0. 3427 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3428 return CGF.Builder.CreateIsNotNull( 3429 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3430 }); 3431 3432 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3433 if (HasLastprivates) 3434 CGF.EmitOMPLastprivateClauseFinal( 3435 S, /*NoFinals=*/false, 3436 CGF.Builder.CreateIsNotNull( 3437 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3438 }; 3439 3440 bool HasCancel = false; 3441 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3442 HasCancel = OSD->hasCancel(); 3443 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3444 HasCancel = OPSD->hasCancel(); 3445 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3446 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3447 HasCancel); 3448 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3449 // clause. Otherwise the barrier will be generated by the codegen for the 3450 // directive. 3451 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3452 // Emit implicit barrier to synchronize threads and avoid data races on 3453 // initialization of firstprivate variables. 3454 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3455 OMPD_unknown); 3456 } 3457 } 3458 3459 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3460 { 3461 auto LPCRegion = 3462 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3463 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3464 EmitSections(S); 3465 } 3466 // Emit an implicit barrier at the end. 3467 if (!S.getSingleClause<OMPNowaitClause>()) { 3468 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3469 OMPD_sections); 3470 } 3471 // Check for outer lastprivate conditional update. 3472 checkForLastprivateConditionalUpdate(*this, S); 3473 } 3474 3475 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3476 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3477 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3478 }; 3479 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3480 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3481 S.hasCancel()); 3482 } 3483 3484 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3485 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3486 llvm::SmallVector<const Expr *, 8> DestExprs; 3487 llvm::SmallVector<const Expr *, 8> SrcExprs; 3488 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3489 // Check if there are any 'copyprivate' clauses associated with this 3490 // 'single' construct. 3491 // Build a list of copyprivate variables along with helper expressions 3492 // (<source>, <destination>, <destination>=<source> expressions) 3493 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3494 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3495 DestExprs.append(C->destination_exprs().begin(), 3496 C->destination_exprs().end()); 3497 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3498 AssignmentOps.append(C->assignment_ops().begin(), 3499 C->assignment_ops().end()); 3500 } 3501 // Emit code for 'single' region along with 'copyprivate' clauses 3502 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3503 Action.Enter(CGF); 3504 OMPPrivateScope SingleScope(CGF); 3505 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3506 CGF.EmitOMPPrivateClause(S, SingleScope); 3507 (void)SingleScope.Privatize(); 3508 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3509 }; 3510 { 3511 auto LPCRegion = 3512 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3513 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3514 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3515 CopyprivateVars, DestExprs, 3516 SrcExprs, AssignmentOps); 3517 } 3518 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3519 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3520 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3521 CGM.getOpenMPRuntime().emitBarrierCall( 3522 *this, S.getBeginLoc(), 3523 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3524 } 3525 // Check for outer lastprivate conditional update. 3526 checkForLastprivateConditionalUpdate(*this, S); 3527 } 3528 3529 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3530 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3531 Action.Enter(CGF); 3532 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3533 }; 3534 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3535 } 3536 3537 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3538 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3539 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3540 3541 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3542 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3543 3544 auto FiniCB = [this](InsertPointTy IP) { 3545 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3546 }; 3547 3548 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3549 InsertPointTy CodeGenIP, 3550 llvm::BasicBlock &FiniBB) { 3551 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3552 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3553 CodeGenIP, FiniBB); 3554 }; 3555 3556 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3557 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3558 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3559 3560 return; 3561 } 3562 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3563 emitMaster(*this, S); 3564 } 3565 3566 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3567 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3568 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3569 3570 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3571 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3572 const Expr *Hint = nullptr; 3573 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3574 Hint = HintClause->getHint(); 3575 3576 // TODO: This is slightly different from what's currently being done in 3577 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3578 // about typing is final. 3579 llvm::Value *HintInst = nullptr; 3580 if (Hint) 3581 HintInst = 3582 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3583 3584 auto FiniCB = [this](InsertPointTy IP) { 3585 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3586 }; 3587 3588 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3589 InsertPointTy CodeGenIP, 3590 llvm::BasicBlock &FiniBB) { 3591 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3592 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3593 CodeGenIP, FiniBB); 3594 }; 3595 3596 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3597 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3598 Builder.restoreIP(OMPBuilder->CreateCritical( 3599 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3600 HintInst)); 3601 3602 return; 3603 } 3604 3605 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3606 Action.Enter(CGF); 3607 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3608 }; 3609 const Expr *Hint = nullptr; 3610 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3611 Hint = HintClause->getHint(); 3612 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3613 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3614 S.getDirectiveName().getAsString(), 3615 CodeGen, S.getBeginLoc(), Hint); 3616 } 3617 3618 void CodeGenFunction::EmitOMPParallelForDirective( 3619 const OMPParallelForDirective &S) { 3620 // Emit directive as a combined directive that consists of two implicit 3621 // directives: 'parallel' with 'for' directive. 3622 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3623 Action.Enter(CGF); 3624 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 3625 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3626 emitDispatchForLoopBounds); 3627 }; 3628 { 3629 auto LPCRegion = 3630 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3631 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3632 emitEmptyBoundParameters); 3633 } 3634 // Check for outer lastprivate conditional update. 3635 checkForLastprivateConditionalUpdate(*this, S); 3636 } 3637 3638 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3639 const OMPParallelForSimdDirective &S) { 3640 // Emit directive as a combined directive that consists of two implicit 3641 // directives: 'parallel' with 'for' directive. 3642 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3643 Action.Enter(CGF); 3644 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3645 emitDispatchForLoopBounds); 3646 }; 3647 { 3648 auto LPCRegion = 3649 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3650 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3651 emitEmptyBoundParameters); 3652 } 3653 // Check for outer lastprivate conditional update. 3654 checkForLastprivateConditionalUpdate(*this, S); 3655 } 3656 3657 void CodeGenFunction::EmitOMPParallelMasterDirective( 3658 const OMPParallelMasterDirective &S) { 3659 // Emit directive as a combined directive that consists of two implicit 3660 // directives: 'parallel' with 'master' directive. 3661 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3662 Action.Enter(CGF); 3663 OMPPrivateScope PrivateScope(CGF); 3664 bool Copyins = CGF.EmitOMPCopyinClause(S); 3665 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3666 if (Copyins) { 3667 // Emit implicit barrier to synchronize threads and avoid data races on 3668 // propagation master's thread values of threadprivate variables to local 3669 // instances of that variables of all other implicit threads. 3670 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3671 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3672 /*ForceSimpleCall=*/true); 3673 } 3674 CGF.EmitOMPPrivateClause(S, PrivateScope); 3675 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3676 (void)PrivateScope.Privatize(); 3677 emitMaster(CGF, S); 3678 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3679 }; 3680 { 3681 auto LPCRegion = 3682 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3683 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3684 emitEmptyBoundParameters); 3685 emitPostUpdateForReductionClause(*this, S, 3686 [](CodeGenFunction &) { return nullptr; }); 3687 } 3688 // Check for outer lastprivate conditional update. 3689 checkForLastprivateConditionalUpdate(*this, S); 3690 } 3691 3692 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3693 const OMPParallelSectionsDirective &S) { 3694 // Emit directive as a combined directive that consists of two implicit 3695 // directives: 'parallel' with 'sections' directive. 3696 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3697 Action.Enter(CGF); 3698 CGF.EmitSections(S); 3699 }; 3700 { 3701 auto LPCRegion = 3702 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3703 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3704 emitEmptyBoundParameters); 3705 } 3706 // Check for outer lastprivate conditional update. 3707 checkForLastprivateConditionalUpdate(*this, S); 3708 } 3709 3710 void CodeGenFunction::EmitOMPTaskBasedDirective( 3711 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3712 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3713 OMPTaskDataTy &Data) { 3714 // Emit outlined function for task construct. 3715 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3716 auto I = CS->getCapturedDecl()->param_begin(); 3717 auto PartId = std::next(I); 3718 auto TaskT = std::next(I, 4); 3719 // Check if the task is final 3720 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3721 // If the condition constant folds and can be elided, try to avoid emitting 3722 // the condition and the dead arm of the if/else. 3723 const Expr *Cond = Clause->getCondition(); 3724 bool CondConstant; 3725 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3726 Data.Final.setInt(CondConstant); 3727 else 3728 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3729 } else { 3730 // By default the task is not final. 3731 Data.Final.setInt(/*IntVal=*/false); 3732 } 3733 // Check if the task has 'priority' clause. 3734 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3735 const Expr *Prio = Clause->getPriority(); 3736 Data.Priority.setInt(/*IntVal=*/true); 3737 Data.Priority.setPointer(EmitScalarConversion( 3738 EmitScalarExpr(Prio), Prio->getType(), 3739 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3740 Prio->getExprLoc())); 3741 } 3742 // The first function argument for tasks is a thread id, the second one is a 3743 // part id (0 for tied tasks, >=0 for untied task). 3744 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3745 // Get list of private variables. 3746 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3747 auto IRef = C->varlist_begin(); 3748 for (const Expr *IInit : C->private_copies()) { 3749 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3750 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3751 Data.PrivateVars.push_back(*IRef); 3752 Data.PrivateCopies.push_back(IInit); 3753 } 3754 ++IRef; 3755 } 3756 } 3757 EmittedAsPrivate.clear(); 3758 // Get list of firstprivate variables. 3759 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3760 auto IRef = C->varlist_begin(); 3761 auto IElemInitRef = C->inits().begin(); 3762 for (const Expr *IInit : C->private_copies()) { 3763 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3764 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3765 Data.FirstprivateVars.push_back(*IRef); 3766 Data.FirstprivateCopies.push_back(IInit); 3767 Data.FirstprivateInits.push_back(*IElemInitRef); 3768 } 3769 ++IRef; 3770 ++IElemInitRef; 3771 } 3772 } 3773 // Get list of lastprivate variables (for taskloops). 3774 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3775 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3776 auto IRef = C->varlist_begin(); 3777 auto ID = C->destination_exprs().begin(); 3778 for (const Expr *IInit : C->private_copies()) { 3779 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3780 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3781 Data.LastprivateVars.push_back(*IRef); 3782 Data.LastprivateCopies.push_back(IInit); 3783 } 3784 LastprivateDstsOrigs.insert( 3785 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3786 cast<DeclRefExpr>(*IRef)}); 3787 ++IRef; 3788 ++ID; 3789 } 3790 } 3791 SmallVector<const Expr *, 4> LHSs; 3792 SmallVector<const Expr *, 4> RHSs; 3793 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3794 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 3795 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 3796 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 3797 Data.ReductionOps.append(C->reduction_ops().begin(), 3798 C->reduction_ops().end()); 3799 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3800 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3801 } 3802 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3803 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3804 // Build list of dependences. 3805 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 3806 OMPTaskDataTy::DependData &DD = 3807 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 3808 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 3809 } 3810 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3811 CapturedRegion](CodeGenFunction &CGF, 3812 PrePostActionTy &Action) { 3813 // Set proper addresses for generated private copies. 3814 OMPPrivateScope Scope(CGF); 3815 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 3816 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3817 !Data.LastprivateVars.empty()) { 3818 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3819 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3820 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3821 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3822 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3823 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3824 CS->getCapturedDecl()->getParam(PrivatesParam))); 3825 // Map privates. 3826 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3827 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3828 CallArgs.push_back(PrivatesPtr); 3829 for (const Expr *E : Data.PrivateVars) { 3830 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3831 Address PrivatePtr = CGF.CreateMemTemp( 3832 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3833 PrivatePtrs.emplace_back(VD, PrivatePtr); 3834 CallArgs.push_back(PrivatePtr.getPointer()); 3835 } 3836 for (const Expr *E : Data.FirstprivateVars) { 3837 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3838 Address PrivatePtr = 3839 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3840 ".firstpriv.ptr.addr"); 3841 PrivatePtrs.emplace_back(VD, PrivatePtr); 3842 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 3843 CallArgs.push_back(PrivatePtr.getPointer()); 3844 } 3845 for (const Expr *E : Data.LastprivateVars) { 3846 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3847 Address PrivatePtr = 3848 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3849 ".lastpriv.ptr.addr"); 3850 PrivatePtrs.emplace_back(VD, PrivatePtr); 3851 CallArgs.push_back(PrivatePtr.getPointer()); 3852 } 3853 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3854 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3855 for (const auto &Pair : LastprivateDstsOrigs) { 3856 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3857 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3858 /*RefersToEnclosingVariableOrCapture=*/ 3859 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3860 Pair.second->getType(), VK_LValue, 3861 Pair.second->getExprLoc()); 3862 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3863 return CGF.EmitLValue(&DRE).getAddress(CGF); 3864 }); 3865 } 3866 for (const auto &Pair : PrivatePtrs) { 3867 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3868 CGF.getContext().getDeclAlign(Pair.first)); 3869 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3870 } 3871 } 3872 if (Data.Reductions) { 3873 OMPPrivateScope FirstprivateScope(CGF); 3874 for (const auto &Pair : FirstprivatePtrs) { 3875 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3876 CGF.getContext().getDeclAlign(Pair.first)); 3877 FirstprivateScope.addPrivate(Pair.first, 3878 [Replacement]() { return Replacement; }); 3879 } 3880 (void)FirstprivateScope.Privatize(); 3881 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3882 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 3883 Data.ReductionCopies, Data.ReductionOps); 3884 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3885 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3886 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3887 RedCG.emitSharedOrigLValue(CGF, Cnt); 3888 RedCG.emitAggregateType(CGF, Cnt); 3889 // FIXME: This must removed once the runtime library is fixed. 3890 // Emit required threadprivate variables for 3891 // initializer/combiner/finalizer. 3892 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3893 RedCG, Cnt); 3894 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3895 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3896 Replacement = 3897 Address(CGF.EmitScalarConversion( 3898 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3899 CGF.getContext().getPointerType( 3900 Data.ReductionCopies[Cnt]->getType()), 3901 Data.ReductionCopies[Cnt]->getExprLoc()), 3902 Replacement.getAlignment()); 3903 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3904 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3905 [Replacement]() { return Replacement; }); 3906 } 3907 } 3908 // Privatize all private variables except for in_reduction items. 3909 (void)Scope.Privatize(); 3910 SmallVector<const Expr *, 4> InRedVars; 3911 SmallVector<const Expr *, 4> InRedPrivs; 3912 SmallVector<const Expr *, 4> InRedOps; 3913 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3914 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3915 auto IPriv = C->privates().begin(); 3916 auto IRed = C->reduction_ops().begin(); 3917 auto ITD = C->taskgroup_descriptors().begin(); 3918 for (const Expr *Ref : C->varlists()) { 3919 InRedVars.emplace_back(Ref); 3920 InRedPrivs.emplace_back(*IPriv); 3921 InRedOps.emplace_back(*IRed); 3922 TaskgroupDescriptors.emplace_back(*ITD); 3923 std::advance(IPriv, 1); 3924 std::advance(IRed, 1); 3925 std::advance(ITD, 1); 3926 } 3927 } 3928 // Privatize in_reduction items here, because taskgroup descriptors must be 3929 // privatized earlier. 3930 OMPPrivateScope InRedScope(CGF); 3931 if (!InRedVars.empty()) { 3932 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 3933 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3934 RedCG.emitSharedOrigLValue(CGF, Cnt); 3935 RedCG.emitAggregateType(CGF, Cnt); 3936 // The taskgroup descriptor variable is always implicit firstprivate and 3937 // privatized already during processing of the firstprivates. 3938 // FIXME: This must removed once the runtime library is fixed. 3939 // Emit required threadprivate variables for 3940 // initializer/combiner/finalizer. 3941 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3942 RedCG, Cnt); 3943 llvm::Value *ReductionsPtr; 3944 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 3945 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 3946 TRExpr->getExprLoc()); 3947 } else { 3948 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 3949 } 3950 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3951 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3952 Replacement = Address( 3953 CGF.EmitScalarConversion( 3954 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3955 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3956 InRedPrivs[Cnt]->getExprLoc()), 3957 Replacement.getAlignment()); 3958 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3959 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3960 [Replacement]() { return Replacement; }); 3961 } 3962 } 3963 (void)InRedScope.Privatize(); 3964 3965 Action.Enter(CGF); 3966 BodyGen(CGF); 3967 }; 3968 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3969 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3970 Data.NumberOfParts); 3971 OMPLexicalScope Scope(*this, S, llvm::None, 3972 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3973 !isOpenMPSimdDirective(S.getDirectiveKind())); 3974 TaskGen(*this, OutlinedFn, Data); 3975 } 3976 3977 static ImplicitParamDecl * 3978 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3979 QualType Ty, CapturedDecl *CD, 3980 SourceLocation Loc) { 3981 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3982 ImplicitParamDecl::Other); 3983 auto *OrigRef = DeclRefExpr::Create( 3984 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3985 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3986 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3987 ImplicitParamDecl::Other); 3988 auto *PrivateRef = DeclRefExpr::Create( 3989 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3990 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3991 QualType ElemType = C.getBaseElementType(Ty); 3992 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3993 ImplicitParamDecl::Other); 3994 auto *InitRef = DeclRefExpr::Create( 3995 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3996 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3997 PrivateVD->setInitStyle(VarDecl::CInit); 3998 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3999 InitRef, /*BasePath=*/nullptr, 4000 VK_RValue)); 4001 Data.FirstprivateVars.emplace_back(OrigRef); 4002 Data.FirstprivateCopies.emplace_back(PrivateRef); 4003 Data.FirstprivateInits.emplace_back(InitRef); 4004 return OrigVD; 4005 } 4006 4007 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4008 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4009 OMPTargetDataInfo &InputInfo) { 4010 // Emit outlined function for task construct. 4011 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4012 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4013 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4014 auto I = CS->getCapturedDecl()->param_begin(); 4015 auto PartId = std::next(I); 4016 auto TaskT = std::next(I, 4); 4017 OMPTaskDataTy Data; 4018 // The task is not final. 4019 Data.Final.setInt(/*IntVal=*/false); 4020 // Get list of firstprivate variables. 4021 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4022 auto IRef = C->varlist_begin(); 4023 auto IElemInitRef = C->inits().begin(); 4024 for (auto *IInit : C->private_copies()) { 4025 Data.FirstprivateVars.push_back(*IRef); 4026 Data.FirstprivateCopies.push_back(IInit); 4027 Data.FirstprivateInits.push_back(*IElemInitRef); 4028 ++IRef; 4029 ++IElemInitRef; 4030 } 4031 } 4032 OMPPrivateScope TargetScope(*this); 4033 VarDecl *BPVD = nullptr; 4034 VarDecl *PVD = nullptr; 4035 VarDecl *SVD = nullptr; 4036 if (InputInfo.NumberOfTargetItems > 0) { 4037 auto *CD = CapturedDecl::Create( 4038 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4039 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4040 QualType BaseAndPointersType = getContext().getConstantArrayType( 4041 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4042 /*IndexTypeQuals=*/0); 4043 BPVD = createImplicitFirstprivateForType( 4044 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4045 PVD = createImplicitFirstprivateForType( 4046 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4047 QualType SizesType = getContext().getConstantArrayType( 4048 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4049 ArrSize, nullptr, ArrayType::Normal, 4050 /*IndexTypeQuals=*/0); 4051 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4052 S.getBeginLoc()); 4053 TargetScope.addPrivate( 4054 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4055 TargetScope.addPrivate(PVD, 4056 [&InputInfo]() { return InputInfo.PointersArray; }); 4057 TargetScope.addPrivate(SVD, 4058 [&InputInfo]() { return InputInfo.SizesArray; }); 4059 } 4060 (void)TargetScope.Privatize(); 4061 // Build list of dependences. 4062 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4063 OMPTaskDataTy::DependData &DD = 4064 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4065 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4066 } 4067 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 4068 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4069 // Set proper addresses for generated private copies. 4070 OMPPrivateScope Scope(CGF); 4071 if (!Data.FirstprivateVars.empty()) { 4072 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 4073 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 4074 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4075 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4076 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4077 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4078 CS->getCapturedDecl()->getParam(PrivatesParam))); 4079 // Map privates. 4080 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4081 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4082 CallArgs.push_back(PrivatesPtr); 4083 for (const Expr *E : Data.FirstprivateVars) { 4084 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4085 Address PrivatePtr = 4086 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4087 ".firstpriv.ptr.addr"); 4088 PrivatePtrs.emplace_back(VD, PrivatePtr); 4089 CallArgs.push_back(PrivatePtr.getPointer()); 4090 } 4091 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4092 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4093 for (const auto &Pair : PrivatePtrs) { 4094 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4095 CGF.getContext().getDeclAlign(Pair.first)); 4096 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4097 } 4098 } 4099 // Privatize all private variables except for in_reduction items. 4100 (void)Scope.Privatize(); 4101 if (InputInfo.NumberOfTargetItems > 0) { 4102 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4103 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4104 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4105 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4106 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4107 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4108 } 4109 4110 Action.Enter(CGF); 4111 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4112 BodyGen(CGF); 4113 }; 4114 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4115 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4116 Data.NumberOfParts); 4117 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4118 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4119 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4120 SourceLocation()); 4121 4122 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4123 SharedsTy, CapturedStruct, &IfCond, Data); 4124 } 4125 4126 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4127 // Emit outlined function for task construct. 4128 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4129 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4130 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4131 const Expr *IfCond = nullptr; 4132 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4133 if (C->getNameModifier() == OMPD_unknown || 4134 C->getNameModifier() == OMPD_task) { 4135 IfCond = C->getCondition(); 4136 break; 4137 } 4138 } 4139 4140 OMPTaskDataTy Data; 4141 // Check if we should emit tied or untied task. 4142 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4143 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4144 CGF.EmitStmt(CS->getCapturedStmt()); 4145 }; 4146 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4147 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4148 const OMPTaskDataTy &Data) { 4149 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4150 SharedsTy, CapturedStruct, IfCond, 4151 Data); 4152 }; 4153 auto LPCRegion = 4154 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4155 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4156 } 4157 4158 void CodeGenFunction::EmitOMPTaskyieldDirective( 4159 const OMPTaskyieldDirective &S) { 4160 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4161 } 4162 4163 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4164 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4165 } 4166 4167 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4168 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4169 } 4170 4171 void CodeGenFunction::EmitOMPTaskgroupDirective( 4172 const OMPTaskgroupDirective &S) { 4173 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4174 Action.Enter(CGF); 4175 if (const Expr *E = S.getReductionRef()) { 4176 SmallVector<const Expr *, 4> LHSs; 4177 SmallVector<const Expr *, 4> RHSs; 4178 OMPTaskDataTy Data; 4179 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4180 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4181 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4182 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4183 Data.ReductionOps.append(C->reduction_ops().begin(), 4184 C->reduction_ops().end()); 4185 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4186 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4187 } 4188 llvm::Value *ReductionDesc = 4189 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4190 LHSs, RHSs, Data); 4191 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4192 CGF.EmitVarDecl(*VD); 4193 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4194 /*Volatile=*/false, E->getType()); 4195 } 4196 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4197 }; 4198 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4199 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4200 } 4201 4202 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4203 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4204 ? llvm::AtomicOrdering::NotAtomic 4205 : llvm::AtomicOrdering::AcquireRelease; 4206 CGM.getOpenMPRuntime().emitFlush( 4207 *this, 4208 [&S]() -> ArrayRef<const Expr *> { 4209 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4210 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4211 FlushClause->varlist_end()); 4212 return llvm::None; 4213 }(), 4214 S.getBeginLoc(), AO); 4215 } 4216 4217 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4218 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4219 LValue DOLVal = EmitLValue(DO->getDepobj()); 4220 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4221 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4222 DC->getModifier()); 4223 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4224 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4225 *this, Dependencies, DC->getBeginLoc()); 4226 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4227 return; 4228 } 4229 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4230 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4231 return; 4232 } 4233 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4234 CGM.getOpenMPRuntime().emitUpdateClause( 4235 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4236 return; 4237 } 4238 } 4239 4240 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4241 if (!OMPParentLoopDirectiveForScan) 4242 return; 4243 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4244 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4245 SmallVector<const Expr *, 4> Shareds; 4246 SmallVector<const Expr *, 4> Privates; 4247 SmallVector<const Expr *, 4> LHSs; 4248 SmallVector<const Expr *, 4> RHSs; 4249 SmallVector<const Expr *, 4> ReductionOps; 4250 SmallVector<const Expr *, 4> CopyOps; 4251 SmallVector<const Expr *, 4> CopyArrayTemps; 4252 SmallVector<const Expr *, 4> CopyArrayElems; 4253 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4254 if (C->getModifier() != OMPC_REDUCTION_inscan) 4255 continue; 4256 Shareds.append(C->varlist_begin(), C->varlist_end()); 4257 Privates.append(C->privates().begin(), C->privates().end()); 4258 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4259 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4260 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4261 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4262 CopyArrayTemps.append(C->copy_array_temps().begin(), 4263 C->copy_array_temps().end()); 4264 CopyArrayElems.append(C->copy_array_elems().begin(), 4265 C->copy_array_elems().end()); 4266 } 4267 if (ParentDir.getDirectiveKind() == OMPD_simd || 4268 (getLangOpts().OpenMPSimd && 4269 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4270 // For simd directive and simd-based directives in simd only mode, use the 4271 // following codegen: 4272 // int x = 0; 4273 // #pragma omp simd reduction(inscan, +: x) 4274 // for (..) { 4275 // <first part> 4276 // #pragma omp scan inclusive(x) 4277 // <second part> 4278 // } 4279 // is transformed to: 4280 // int x = 0; 4281 // for (..) { 4282 // int x_priv = 0; 4283 // <first part> 4284 // x = x_priv + x; 4285 // x_priv = x; 4286 // <second part> 4287 // } 4288 // and 4289 // int x = 0; 4290 // #pragma omp simd reduction(inscan, +: x) 4291 // for (..) { 4292 // <first part> 4293 // #pragma omp scan exclusive(x) 4294 // <second part> 4295 // } 4296 // to 4297 // int x = 0; 4298 // for (..) { 4299 // int x_priv = 0; 4300 // <second part> 4301 // int temp = x; 4302 // x = x_priv + x; 4303 // x_priv = temp; 4304 // <first part> 4305 // } 4306 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4307 EmitBranch(IsInclusive 4308 ? OMPScanReduce 4309 : BreakContinueStack.back().ContinueBlock.getBlock()); 4310 EmitBlock(OMPScanDispatch); 4311 { 4312 // New scope for correct construction/destruction of temp variables for 4313 // exclusive scan. 4314 LexicalScope Scope(*this, S.getSourceRange()); 4315 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4316 EmitBlock(OMPScanReduce); 4317 if (!IsInclusive) { 4318 // Create temp var and copy LHS value to this temp value. 4319 // TMP = LHS; 4320 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4321 const Expr *PrivateExpr = Privates[I]; 4322 const Expr *TempExpr = CopyArrayTemps[I]; 4323 EmitAutoVarDecl( 4324 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4325 LValue DestLVal = EmitLValue(TempExpr); 4326 LValue SrcLVal = EmitLValue(LHSs[I]); 4327 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4328 SrcLVal.getAddress(*this), 4329 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4330 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4331 CopyOps[I]); 4332 } 4333 } 4334 CGM.getOpenMPRuntime().emitReduction( 4335 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4336 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4337 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4338 const Expr *PrivateExpr = Privates[I]; 4339 LValue DestLVal; 4340 LValue SrcLVal; 4341 if (IsInclusive) { 4342 DestLVal = EmitLValue(RHSs[I]); 4343 SrcLVal = EmitLValue(LHSs[I]); 4344 } else { 4345 const Expr *TempExpr = CopyArrayTemps[I]; 4346 DestLVal = EmitLValue(RHSs[I]); 4347 SrcLVal = EmitLValue(TempExpr); 4348 } 4349 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4350 SrcLVal.getAddress(*this), 4351 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4352 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4353 CopyOps[I]); 4354 } 4355 } 4356 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4357 OMPScanExitBlock = IsInclusive 4358 ? BreakContinueStack.back().ContinueBlock.getBlock() 4359 : OMPScanReduce; 4360 EmitBlock(OMPAfterScanBlock); 4361 return; 4362 } 4363 if (!IsInclusive) { 4364 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4365 EmitBlock(OMPScanExitBlock); 4366 } 4367 if (OMPFirstScanLoop) { 4368 // Emit buffer[i] = red; at the end of the input phase. 4369 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4370 .getIterationVariable() 4371 ->IgnoreParenImpCasts(); 4372 LValue IdxLVal = EmitLValue(IVExpr); 4373 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4374 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4375 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4376 const Expr *PrivateExpr = Privates[I]; 4377 const Expr *OrigExpr = Shareds[I]; 4378 const Expr *CopyArrayElem = CopyArrayElems[I]; 4379 OpaqueValueMapping IdxMapping( 4380 *this, 4381 cast<OpaqueValueExpr>( 4382 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4383 RValue::get(IdxVal)); 4384 LValue DestLVal = EmitLValue(CopyArrayElem); 4385 LValue SrcLVal = EmitLValue(OrigExpr); 4386 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4387 SrcLVal.getAddress(*this), 4388 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4389 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4390 CopyOps[I]); 4391 } 4392 } 4393 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4394 if (IsInclusive) { 4395 EmitBlock(OMPScanExitBlock); 4396 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4397 } 4398 EmitBlock(OMPScanDispatch); 4399 if (!OMPFirstScanLoop) { 4400 // Emit red = buffer[i]; at the entrance to the scan phase. 4401 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4402 .getIterationVariable() 4403 ->IgnoreParenImpCasts(); 4404 LValue IdxLVal = EmitLValue(IVExpr); 4405 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4406 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4407 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4408 if (!IsInclusive) { 4409 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4410 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4411 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4412 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4413 EmitBlock(ContBB); 4414 // Use idx - 1 iteration for exclusive scan. 4415 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4416 } 4417 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4418 const Expr *PrivateExpr = Privates[I]; 4419 const Expr *OrigExpr = Shareds[I]; 4420 const Expr *CopyArrayElem = CopyArrayElems[I]; 4421 OpaqueValueMapping IdxMapping( 4422 *this, 4423 cast<OpaqueValueExpr>( 4424 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4425 RValue::get(IdxVal)); 4426 LValue SrcLVal = EmitLValue(CopyArrayElem); 4427 LValue DestLVal = EmitLValue(OrigExpr); 4428 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4429 SrcLVal.getAddress(*this), 4430 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4431 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4432 CopyOps[I]); 4433 } 4434 if (!IsInclusive) { 4435 EmitBlock(ExclusiveExitBB); 4436 } 4437 } 4438 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4439 : OMPAfterScanBlock); 4440 EmitBlock(OMPAfterScanBlock); 4441 } 4442 4443 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4444 const CodeGenLoopTy &CodeGenLoop, 4445 Expr *IncExpr) { 4446 // Emit the loop iteration variable. 4447 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4448 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4449 EmitVarDecl(*IVDecl); 4450 4451 // Emit the iterations count variable. 4452 // If it is not a variable, Sema decided to calculate iterations count on each 4453 // iteration (e.g., it is foldable into a constant). 4454 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4455 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4456 // Emit calculation of the iterations count. 4457 EmitIgnoredExpr(S.getCalcLastIteration()); 4458 } 4459 4460 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4461 4462 bool HasLastprivateClause = false; 4463 // Check pre-condition. 4464 { 4465 OMPLoopScope PreInitScope(*this, S); 4466 // Skip the entire loop if we don't meet the precondition. 4467 // If the condition constant folds and can be elided, avoid emitting the 4468 // whole loop. 4469 bool CondConstant; 4470 llvm::BasicBlock *ContBlock = nullptr; 4471 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4472 if (!CondConstant) 4473 return; 4474 } else { 4475 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4476 ContBlock = createBasicBlock("omp.precond.end"); 4477 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4478 getProfileCount(&S)); 4479 EmitBlock(ThenBlock); 4480 incrementProfileCounter(&S); 4481 } 4482 4483 emitAlignedClause(*this, S); 4484 // Emit 'then' code. 4485 { 4486 // Emit helper vars inits. 4487 4488 LValue LB = EmitOMPHelperVar( 4489 *this, cast<DeclRefExpr>( 4490 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4491 ? S.getCombinedLowerBoundVariable() 4492 : S.getLowerBoundVariable()))); 4493 LValue UB = EmitOMPHelperVar( 4494 *this, cast<DeclRefExpr>( 4495 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4496 ? S.getCombinedUpperBoundVariable() 4497 : S.getUpperBoundVariable()))); 4498 LValue ST = 4499 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 4500 LValue IL = 4501 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 4502 4503 OMPPrivateScope LoopScope(*this); 4504 if (EmitOMPFirstprivateClause(S, LoopScope)) { 4505 // Emit implicit barrier to synchronize threads and avoid data races 4506 // on initialization of firstprivate variables and post-update of 4507 // lastprivate variables. 4508 CGM.getOpenMPRuntime().emitBarrierCall( 4509 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4510 /*ForceSimpleCall=*/true); 4511 } 4512 EmitOMPPrivateClause(S, LoopScope); 4513 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4514 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4515 !isOpenMPTeamsDirective(S.getDirectiveKind())) 4516 EmitOMPReductionClauseInit(S, LoopScope); 4517 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 4518 EmitOMPPrivateLoopCounters(S, LoopScope); 4519 (void)LoopScope.Privatize(); 4520 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4521 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 4522 4523 // Detect the distribute schedule kind and chunk. 4524 llvm::Value *Chunk = nullptr; 4525 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 4526 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 4527 ScheduleKind = C->getDistScheduleKind(); 4528 if (const Expr *Ch = C->getChunkSize()) { 4529 Chunk = EmitScalarExpr(Ch); 4530 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 4531 S.getIterationVariable()->getType(), 4532 S.getBeginLoc()); 4533 } 4534 } else { 4535 // Default behaviour for dist_schedule clause. 4536 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 4537 *this, S, ScheduleKind, Chunk); 4538 } 4539 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 4540 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 4541 4542 // OpenMP [2.10.8, distribute Construct, Description] 4543 // If dist_schedule is specified, kind must be static. If specified, 4544 // iterations are divided into chunks of size chunk_size, chunks are 4545 // assigned to the teams of the league in a round-robin fashion in the 4546 // order of the team number. When no chunk_size is specified, the 4547 // iteration space is divided into chunks that are approximately equal 4548 // in size, and at most one chunk is distributed to each team of the 4549 // league. The size of the chunks is unspecified in this case. 4550 bool StaticChunked = RT.isStaticChunked( 4551 ScheduleKind, /* Chunked */ Chunk != nullptr) && 4552 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 4553 if (RT.isStaticNonchunked(ScheduleKind, 4554 /* Chunked */ Chunk != nullptr) || 4555 StaticChunked) { 4556 CGOpenMPRuntime::StaticRTInput StaticInit( 4557 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 4558 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4559 StaticChunked ? Chunk : nullptr); 4560 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 4561 StaticInit); 4562 JumpDest LoopExit = 4563 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 4564 // UB = min(UB, GlobalUB); 4565 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4566 ? S.getCombinedEnsureUpperBound() 4567 : S.getEnsureUpperBound()); 4568 // IV = LB; 4569 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4570 ? S.getCombinedInit() 4571 : S.getInit()); 4572 4573 const Expr *Cond = 4574 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4575 ? S.getCombinedCond() 4576 : S.getCond(); 4577 4578 if (StaticChunked) 4579 Cond = S.getCombinedDistCond(); 4580 4581 // For static unchunked schedules generate: 4582 // 4583 // 1. For distribute alone, codegen 4584 // while (idx <= UB) { 4585 // BODY; 4586 // ++idx; 4587 // } 4588 // 4589 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 4590 // while (idx <= UB) { 4591 // <CodeGen rest of pragma>(LB, UB); 4592 // idx += ST; 4593 // } 4594 // 4595 // For static chunk one schedule generate: 4596 // 4597 // while (IV <= GlobalUB) { 4598 // <CodeGen rest of pragma>(LB, UB); 4599 // LB += ST; 4600 // UB += ST; 4601 // UB = min(UB, GlobalUB); 4602 // IV = LB; 4603 // } 4604 // 4605 emitCommonSimdLoop( 4606 *this, S, 4607 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4608 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4609 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 4610 }, 4611 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 4612 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 4613 CGF.EmitOMPInnerLoop( 4614 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4615 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4616 CodeGenLoop(CGF, S, LoopExit); 4617 }, 4618 [&S, StaticChunked](CodeGenFunction &CGF) { 4619 if (StaticChunked) { 4620 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4621 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4622 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4623 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4624 } 4625 }); 4626 }); 4627 EmitBlock(LoopExit.getBlock()); 4628 // Tell the runtime we are done. 4629 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4630 } else { 4631 // Emit the outer loop, which requests its work chunk [LB..UB] from 4632 // runtime and runs the inner loop to process it. 4633 const OMPLoopArguments LoopArguments = { 4634 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4635 IL.getAddress(*this), Chunk}; 4636 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4637 CodeGenLoop); 4638 } 4639 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4640 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4641 return CGF.Builder.CreateIsNotNull( 4642 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4643 }); 4644 } 4645 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4646 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4647 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4648 EmitOMPReductionClauseFinal(S, OMPD_simd); 4649 // Emit post-update of the reduction variables if IsLastIter != 0. 4650 emitPostUpdateForReductionClause( 4651 *this, S, [IL, &S](CodeGenFunction &CGF) { 4652 return CGF.Builder.CreateIsNotNull( 4653 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4654 }); 4655 } 4656 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4657 if (HasLastprivateClause) { 4658 EmitOMPLastprivateClauseFinal( 4659 S, /*NoFinals=*/false, 4660 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4661 } 4662 } 4663 4664 // We're now done with the loop, so jump to the continuation block. 4665 if (ContBlock) { 4666 EmitBranch(ContBlock); 4667 EmitBlock(ContBlock, true); 4668 } 4669 } 4670 } 4671 4672 void CodeGenFunction::EmitOMPDistributeDirective( 4673 const OMPDistributeDirective &S) { 4674 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4675 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4676 }; 4677 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4678 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4679 } 4680 4681 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4682 const CapturedStmt *S, 4683 SourceLocation Loc) { 4684 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4685 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4686 CGF.CapturedStmtInfo = &CapStmtInfo; 4687 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4688 Fn->setDoesNotRecurse(); 4689 return Fn; 4690 } 4691 4692 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4693 if (S.hasClausesOfKind<OMPDependClause>()) { 4694 assert(!S.getAssociatedStmt() && 4695 "No associated statement must be in ordered depend construct."); 4696 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4697 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4698 return; 4699 } 4700 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4701 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4702 PrePostActionTy &Action) { 4703 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4704 if (C) { 4705 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4706 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4707 llvm::Function *OutlinedFn = 4708 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4709 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4710 OutlinedFn, CapturedVars); 4711 } else { 4712 Action.Enter(CGF); 4713 CGF.EmitStmt(CS->getCapturedStmt()); 4714 } 4715 }; 4716 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4717 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4718 } 4719 4720 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4721 QualType SrcType, QualType DestType, 4722 SourceLocation Loc) { 4723 assert(CGF.hasScalarEvaluationKind(DestType) && 4724 "DestType must have scalar evaluation kind."); 4725 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4726 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4727 DestType, Loc) 4728 : CGF.EmitComplexToScalarConversion( 4729 Val.getComplexVal(), SrcType, DestType, Loc); 4730 } 4731 4732 static CodeGenFunction::ComplexPairTy 4733 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4734 QualType DestType, SourceLocation Loc) { 4735 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4736 "DestType must have complex evaluation kind."); 4737 CodeGenFunction::ComplexPairTy ComplexVal; 4738 if (Val.isScalar()) { 4739 // Convert the input element to the element type of the complex. 4740 QualType DestElementType = 4741 DestType->castAs<ComplexType>()->getElementType(); 4742 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4743 Val.getScalarVal(), SrcType, DestElementType, Loc); 4744 ComplexVal = CodeGenFunction::ComplexPairTy( 4745 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4746 } else { 4747 assert(Val.isComplex() && "Must be a scalar or complex."); 4748 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4749 QualType DestElementType = 4750 DestType->castAs<ComplexType>()->getElementType(); 4751 ComplexVal.first = CGF.EmitScalarConversion( 4752 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4753 ComplexVal.second = CGF.EmitScalarConversion( 4754 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4755 } 4756 return ComplexVal; 4757 } 4758 4759 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4760 LValue LVal, RValue RVal) { 4761 if (LVal.isGlobalReg()) 4762 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4763 else 4764 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4765 } 4766 4767 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4768 llvm::AtomicOrdering AO, LValue LVal, 4769 SourceLocation Loc) { 4770 if (LVal.isGlobalReg()) 4771 return CGF.EmitLoadOfLValue(LVal, Loc); 4772 return CGF.EmitAtomicLoad( 4773 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4774 LVal.isVolatile()); 4775 } 4776 4777 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4778 QualType RValTy, SourceLocation Loc) { 4779 switch (getEvaluationKind(LVal.getType())) { 4780 case TEK_Scalar: 4781 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4782 *this, RVal, RValTy, LVal.getType(), Loc)), 4783 LVal); 4784 break; 4785 case TEK_Complex: 4786 EmitStoreOfComplex( 4787 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4788 /*isInit=*/false); 4789 break; 4790 case TEK_Aggregate: 4791 llvm_unreachable("Must be a scalar or complex."); 4792 } 4793 } 4794 4795 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4796 const Expr *X, const Expr *V, 4797 SourceLocation Loc) { 4798 // v = x; 4799 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4800 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4801 LValue XLValue = CGF.EmitLValue(X); 4802 LValue VLValue = CGF.EmitLValue(V); 4803 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4804 // OpenMP, 2.17.7, atomic Construct 4805 // If the read or capture clause is specified and the acquire, acq_rel, or 4806 // seq_cst clause is specified then the strong flush on exit from the atomic 4807 // operation is also an acquire flush. 4808 switch (AO) { 4809 case llvm::AtomicOrdering::Acquire: 4810 case llvm::AtomicOrdering::AcquireRelease: 4811 case llvm::AtomicOrdering::SequentiallyConsistent: 4812 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4813 llvm::AtomicOrdering::Acquire); 4814 break; 4815 case llvm::AtomicOrdering::Monotonic: 4816 case llvm::AtomicOrdering::Release: 4817 break; 4818 case llvm::AtomicOrdering::NotAtomic: 4819 case llvm::AtomicOrdering::Unordered: 4820 llvm_unreachable("Unexpected ordering."); 4821 } 4822 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4823 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4824 } 4825 4826 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4827 llvm::AtomicOrdering AO, const Expr *X, 4828 const Expr *E, SourceLocation Loc) { 4829 // x = expr; 4830 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4831 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4832 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4833 // OpenMP, 2.17.7, atomic Construct 4834 // If the write, update, or capture clause is specified and the release, 4835 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4836 // the atomic operation is also a release flush. 4837 switch (AO) { 4838 case llvm::AtomicOrdering::Release: 4839 case llvm::AtomicOrdering::AcquireRelease: 4840 case llvm::AtomicOrdering::SequentiallyConsistent: 4841 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4842 llvm::AtomicOrdering::Release); 4843 break; 4844 case llvm::AtomicOrdering::Acquire: 4845 case llvm::AtomicOrdering::Monotonic: 4846 break; 4847 case llvm::AtomicOrdering::NotAtomic: 4848 case llvm::AtomicOrdering::Unordered: 4849 llvm_unreachable("Unexpected ordering."); 4850 } 4851 } 4852 4853 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4854 RValue Update, 4855 BinaryOperatorKind BO, 4856 llvm::AtomicOrdering AO, 4857 bool IsXLHSInRHSPart) { 4858 ASTContext &Context = CGF.getContext(); 4859 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4860 // expression is simple and atomic is allowed for the given type for the 4861 // target platform. 4862 if (BO == BO_Comma || !Update.isScalar() || 4863 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4864 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4865 (Update.getScalarVal()->getType() != 4866 X.getAddress(CGF).getElementType())) || 4867 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4868 !Context.getTargetInfo().hasBuiltinAtomic( 4869 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4870 return std::make_pair(false, RValue::get(nullptr)); 4871 4872 llvm::AtomicRMWInst::BinOp RMWOp; 4873 switch (BO) { 4874 case BO_Add: 4875 RMWOp = llvm::AtomicRMWInst::Add; 4876 break; 4877 case BO_Sub: 4878 if (!IsXLHSInRHSPart) 4879 return std::make_pair(false, RValue::get(nullptr)); 4880 RMWOp = llvm::AtomicRMWInst::Sub; 4881 break; 4882 case BO_And: 4883 RMWOp = llvm::AtomicRMWInst::And; 4884 break; 4885 case BO_Or: 4886 RMWOp = llvm::AtomicRMWInst::Or; 4887 break; 4888 case BO_Xor: 4889 RMWOp = llvm::AtomicRMWInst::Xor; 4890 break; 4891 case BO_LT: 4892 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4893 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4894 : llvm::AtomicRMWInst::Max) 4895 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4896 : llvm::AtomicRMWInst::UMax); 4897 break; 4898 case BO_GT: 4899 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4900 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4901 : llvm::AtomicRMWInst::Min) 4902 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4903 : llvm::AtomicRMWInst::UMin); 4904 break; 4905 case BO_Assign: 4906 RMWOp = llvm::AtomicRMWInst::Xchg; 4907 break; 4908 case BO_Mul: 4909 case BO_Div: 4910 case BO_Rem: 4911 case BO_Shl: 4912 case BO_Shr: 4913 case BO_LAnd: 4914 case BO_LOr: 4915 return std::make_pair(false, RValue::get(nullptr)); 4916 case BO_PtrMemD: 4917 case BO_PtrMemI: 4918 case BO_LE: 4919 case BO_GE: 4920 case BO_EQ: 4921 case BO_NE: 4922 case BO_Cmp: 4923 case BO_AddAssign: 4924 case BO_SubAssign: 4925 case BO_AndAssign: 4926 case BO_OrAssign: 4927 case BO_XorAssign: 4928 case BO_MulAssign: 4929 case BO_DivAssign: 4930 case BO_RemAssign: 4931 case BO_ShlAssign: 4932 case BO_ShrAssign: 4933 case BO_Comma: 4934 llvm_unreachable("Unsupported atomic update operation"); 4935 } 4936 llvm::Value *UpdateVal = Update.getScalarVal(); 4937 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4938 UpdateVal = CGF.Builder.CreateIntCast( 4939 IC, X.getAddress(CGF).getElementType(), 4940 X.getType()->hasSignedIntegerRepresentation()); 4941 } 4942 llvm::Value *Res = 4943 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4944 return std::make_pair(true, RValue::get(Res)); 4945 } 4946 4947 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4948 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4949 llvm::AtomicOrdering AO, SourceLocation Loc, 4950 const llvm::function_ref<RValue(RValue)> CommonGen) { 4951 // Update expressions are allowed to have the following forms: 4952 // x binop= expr; -> xrval + expr; 4953 // x++, ++x -> xrval + 1; 4954 // x--, --x -> xrval - 1; 4955 // x = x binop expr; -> xrval binop expr 4956 // x = expr Op x; - > expr binop xrval; 4957 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4958 if (!Res.first) { 4959 if (X.isGlobalReg()) { 4960 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4961 // 'xrval'. 4962 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4963 } else { 4964 // Perform compare-and-swap procedure. 4965 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4966 } 4967 } 4968 return Res; 4969 } 4970 4971 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4972 llvm::AtomicOrdering AO, const Expr *X, 4973 const Expr *E, const Expr *UE, 4974 bool IsXLHSInRHSPart, SourceLocation Loc) { 4975 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4976 "Update expr in 'atomic update' must be a binary operator."); 4977 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4978 // Update expressions are allowed to have the following forms: 4979 // x binop= expr; -> xrval + expr; 4980 // x++, ++x -> xrval + 1; 4981 // x--, --x -> xrval - 1; 4982 // x = x binop expr; -> xrval binop expr 4983 // x = expr Op x; - > expr binop xrval; 4984 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4985 LValue XLValue = CGF.EmitLValue(X); 4986 RValue ExprRValue = CGF.EmitAnyExpr(E); 4987 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4988 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4989 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4990 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4991 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4992 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4993 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4994 return CGF.EmitAnyExpr(UE); 4995 }; 4996 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4997 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4998 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4999 // OpenMP, 2.17.7, atomic Construct 5000 // If the write, update, or capture clause is specified and the release, 5001 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5002 // the atomic operation is also a release flush. 5003 switch (AO) { 5004 case llvm::AtomicOrdering::Release: 5005 case llvm::AtomicOrdering::AcquireRelease: 5006 case llvm::AtomicOrdering::SequentiallyConsistent: 5007 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5008 llvm::AtomicOrdering::Release); 5009 break; 5010 case llvm::AtomicOrdering::Acquire: 5011 case llvm::AtomicOrdering::Monotonic: 5012 break; 5013 case llvm::AtomicOrdering::NotAtomic: 5014 case llvm::AtomicOrdering::Unordered: 5015 llvm_unreachable("Unexpected ordering."); 5016 } 5017 } 5018 5019 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5020 QualType SourceType, QualType ResType, 5021 SourceLocation Loc) { 5022 switch (CGF.getEvaluationKind(ResType)) { 5023 case TEK_Scalar: 5024 return RValue::get( 5025 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5026 case TEK_Complex: { 5027 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5028 return RValue::getComplex(Res.first, Res.second); 5029 } 5030 case TEK_Aggregate: 5031 break; 5032 } 5033 llvm_unreachable("Must be a scalar or complex."); 5034 } 5035 5036 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5037 llvm::AtomicOrdering AO, 5038 bool IsPostfixUpdate, const Expr *V, 5039 const Expr *X, const Expr *E, 5040 const Expr *UE, bool IsXLHSInRHSPart, 5041 SourceLocation Loc) { 5042 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5043 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5044 RValue NewVVal; 5045 LValue VLValue = CGF.EmitLValue(V); 5046 LValue XLValue = CGF.EmitLValue(X); 5047 RValue ExprRValue = CGF.EmitAnyExpr(E); 5048 QualType NewVValType; 5049 if (UE) { 5050 // 'x' is updated with some additional value. 5051 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5052 "Update expr in 'atomic capture' must be a binary operator."); 5053 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5054 // Update expressions are allowed to have the following forms: 5055 // x binop= expr; -> xrval + expr; 5056 // x++, ++x -> xrval + 1; 5057 // x--, --x -> xrval - 1; 5058 // x = x binop expr; -> xrval binop expr 5059 // x = expr Op x; - > expr binop xrval; 5060 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5061 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5062 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5063 NewVValType = XRValExpr->getType(); 5064 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5065 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5066 IsPostfixUpdate](RValue XRValue) { 5067 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5068 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5069 RValue Res = CGF.EmitAnyExpr(UE); 5070 NewVVal = IsPostfixUpdate ? XRValue : Res; 5071 return Res; 5072 }; 5073 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5074 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5075 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5076 if (Res.first) { 5077 // 'atomicrmw' instruction was generated. 5078 if (IsPostfixUpdate) { 5079 // Use old value from 'atomicrmw'. 5080 NewVVal = Res.second; 5081 } else { 5082 // 'atomicrmw' does not provide new value, so evaluate it using old 5083 // value of 'x'. 5084 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5085 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5086 NewVVal = CGF.EmitAnyExpr(UE); 5087 } 5088 } 5089 } else { 5090 // 'x' is simply rewritten with some 'expr'. 5091 NewVValType = X->getType().getNonReferenceType(); 5092 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5093 X->getType().getNonReferenceType(), Loc); 5094 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5095 NewVVal = XRValue; 5096 return ExprRValue; 5097 }; 5098 // Try to perform atomicrmw xchg, otherwise simple exchange. 5099 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5100 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5101 Loc, Gen); 5102 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5103 if (Res.first) { 5104 // 'atomicrmw' instruction was generated. 5105 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5106 } 5107 } 5108 // Emit post-update store to 'v' of old/new 'x' value. 5109 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5110 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5111 // OpenMP, 2.17.7, atomic Construct 5112 // If the write, update, or capture clause is specified and the release, 5113 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5114 // the atomic operation is also a release flush. 5115 // If the read or capture clause is specified and the acquire, acq_rel, or 5116 // seq_cst clause is specified then the strong flush on exit from the atomic 5117 // operation is also an acquire flush. 5118 switch (AO) { 5119 case llvm::AtomicOrdering::Release: 5120 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5121 llvm::AtomicOrdering::Release); 5122 break; 5123 case llvm::AtomicOrdering::Acquire: 5124 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5125 llvm::AtomicOrdering::Acquire); 5126 break; 5127 case llvm::AtomicOrdering::AcquireRelease: 5128 case llvm::AtomicOrdering::SequentiallyConsistent: 5129 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5130 llvm::AtomicOrdering::AcquireRelease); 5131 break; 5132 case llvm::AtomicOrdering::Monotonic: 5133 break; 5134 case llvm::AtomicOrdering::NotAtomic: 5135 case llvm::AtomicOrdering::Unordered: 5136 llvm_unreachable("Unexpected ordering."); 5137 } 5138 } 5139 5140 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5141 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5142 const Expr *X, const Expr *V, const Expr *E, 5143 const Expr *UE, bool IsXLHSInRHSPart, 5144 SourceLocation Loc) { 5145 switch (Kind) { 5146 case OMPC_read: 5147 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5148 break; 5149 case OMPC_write: 5150 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5151 break; 5152 case OMPC_unknown: 5153 case OMPC_update: 5154 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5155 break; 5156 case OMPC_capture: 5157 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5158 IsXLHSInRHSPart, Loc); 5159 break; 5160 case OMPC_if: 5161 case OMPC_final: 5162 case OMPC_num_threads: 5163 case OMPC_private: 5164 case OMPC_firstprivate: 5165 case OMPC_lastprivate: 5166 case OMPC_reduction: 5167 case OMPC_task_reduction: 5168 case OMPC_in_reduction: 5169 case OMPC_safelen: 5170 case OMPC_simdlen: 5171 case OMPC_allocator: 5172 case OMPC_allocate: 5173 case OMPC_collapse: 5174 case OMPC_default: 5175 case OMPC_seq_cst: 5176 case OMPC_acq_rel: 5177 case OMPC_acquire: 5178 case OMPC_release: 5179 case OMPC_relaxed: 5180 case OMPC_shared: 5181 case OMPC_linear: 5182 case OMPC_aligned: 5183 case OMPC_copyin: 5184 case OMPC_copyprivate: 5185 case OMPC_flush: 5186 case OMPC_depobj: 5187 case OMPC_proc_bind: 5188 case OMPC_schedule: 5189 case OMPC_ordered: 5190 case OMPC_nowait: 5191 case OMPC_untied: 5192 case OMPC_threadprivate: 5193 case OMPC_depend: 5194 case OMPC_mergeable: 5195 case OMPC_device: 5196 case OMPC_threads: 5197 case OMPC_simd: 5198 case OMPC_map: 5199 case OMPC_num_teams: 5200 case OMPC_thread_limit: 5201 case OMPC_priority: 5202 case OMPC_grainsize: 5203 case OMPC_nogroup: 5204 case OMPC_num_tasks: 5205 case OMPC_hint: 5206 case OMPC_dist_schedule: 5207 case OMPC_defaultmap: 5208 case OMPC_uniform: 5209 case OMPC_to: 5210 case OMPC_from: 5211 case OMPC_use_device_ptr: 5212 case OMPC_use_device_addr: 5213 case OMPC_is_device_ptr: 5214 case OMPC_unified_address: 5215 case OMPC_unified_shared_memory: 5216 case OMPC_reverse_offload: 5217 case OMPC_dynamic_allocators: 5218 case OMPC_atomic_default_mem_order: 5219 case OMPC_device_type: 5220 case OMPC_match: 5221 case OMPC_nontemporal: 5222 case OMPC_order: 5223 case OMPC_destroy: 5224 case OMPC_detach: 5225 case OMPC_inclusive: 5226 case OMPC_exclusive: 5227 case OMPC_uses_allocators: 5228 case OMPC_affinity: 5229 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5230 } 5231 } 5232 5233 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5234 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5235 bool MemOrderingSpecified = false; 5236 if (S.getSingleClause<OMPSeqCstClause>()) { 5237 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5238 MemOrderingSpecified = true; 5239 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5240 AO = llvm::AtomicOrdering::AcquireRelease; 5241 MemOrderingSpecified = true; 5242 } else if (S.getSingleClause<OMPAcquireClause>()) { 5243 AO = llvm::AtomicOrdering::Acquire; 5244 MemOrderingSpecified = true; 5245 } else if (S.getSingleClause<OMPReleaseClause>()) { 5246 AO = llvm::AtomicOrdering::Release; 5247 MemOrderingSpecified = true; 5248 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5249 AO = llvm::AtomicOrdering::Monotonic; 5250 MemOrderingSpecified = true; 5251 } 5252 OpenMPClauseKind Kind = OMPC_unknown; 5253 for (const OMPClause *C : S.clauses()) { 5254 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5255 // if it is first). 5256 if (C->getClauseKind() != OMPC_seq_cst && 5257 C->getClauseKind() != OMPC_acq_rel && 5258 C->getClauseKind() != OMPC_acquire && 5259 C->getClauseKind() != OMPC_release && 5260 C->getClauseKind() != OMPC_relaxed) { 5261 Kind = C->getClauseKind(); 5262 break; 5263 } 5264 } 5265 if (!MemOrderingSpecified) { 5266 llvm::AtomicOrdering DefaultOrder = 5267 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5268 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5269 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5270 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5271 Kind == OMPC_capture)) { 5272 AO = DefaultOrder; 5273 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5274 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5275 AO = llvm::AtomicOrdering::Release; 5276 } else if (Kind == OMPC_read) { 5277 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5278 AO = llvm::AtomicOrdering::Acquire; 5279 } 5280 } 5281 } 5282 5283 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 5284 5285 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 5286 PrePostActionTy &) { 5287 CGF.EmitStopPoint(CS); 5288 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5289 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5290 S.getBeginLoc()); 5291 }; 5292 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5293 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 5294 } 5295 5296 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5297 const OMPExecutableDirective &S, 5298 const RegionCodeGenTy &CodeGen) { 5299 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5300 CodeGenModule &CGM = CGF.CGM; 5301 5302 // On device emit this construct as inlined code. 5303 if (CGM.getLangOpts().OpenMPIsDevice) { 5304 OMPLexicalScope Scope(CGF, S, OMPD_target); 5305 CGM.getOpenMPRuntime().emitInlinedDirective( 5306 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5307 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5308 }); 5309 return; 5310 } 5311 5312 auto LPCRegion = 5313 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5314 llvm::Function *Fn = nullptr; 5315 llvm::Constant *FnID = nullptr; 5316 5317 const Expr *IfCond = nullptr; 5318 // Check for the at most one if clause associated with the target region. 5319 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5320 if (C->getNameModifier() == OMPD_unknown || 5321 C->getNameModifier() == OMPD_target) { 5322 IfCond = C->getCondition(); 5323 break; 5324 } 5325 } 5326 5327 // Check if we have any device clause associated with the directive. 5328 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5329 nullptr, OMPC_DEVICE_unknown); 5330 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5331 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5332 5333 // Check if we have an if clause whose conditional always evaluates to false 5334 // or if we do not have any targets specified. If so the target region is not 5335 // an offload entry point. 5336 bool IsOffloadEntry = true; 5337 if (IfCond) { 5338 bool Val; 5339 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5340 IsOffloadEntry = false; 5341 } 5342 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5343 IsOffloadEntry = false; 5344 5345 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5346 StringRef ParentName; 5347 // In case we have Ctors/Dtors we use the complete type variant to produce 5348 // the mangling of the device outlined kernel. 5349 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5350 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5351 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5352 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5353 else 5354 ParentName = 5355 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5356 5357 // Emit target region as a standalone region. 5358 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5359 IsOffloadEntry, CodeGen); 5360 OMPLexicalScope Scope(CGF, S, OMPD_task); 5361 auto &&SizeEmitter = 5362 [IsOffloadEntry](CodeGenFunction &CGF, 5363 const OMPLoopDirective &D) -> llvm::Value * { 5364 if (IsOffloadEntry) { 5365 OMPLoopScope(CGF, D); 5366 // Emit calculation of the iterations count. 5367 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5368 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5369 /*isSigned=*/false); 5370 return NumIterations; 5371 } 5372 return nullptr; 5373 }; 5374 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5375 SizeEmitter); 5376 } 5377 5378 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5379 PrePostActionTy &Action) { 5380 Action.Enter(CGF); 5381 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5382 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5383 CGF.EmitOMPPrivateClause(S, PrivateScope); 5384 (void)PrivateScope.Privatize(); 5385 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5386 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5387 5388 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5389 } 5390 5391 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5392 StringRef ParentName, 5393 const OMPTargetDirective &S) { 5394 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5395 emitTargetRegion(CGF, S, Action); 5396 }; 5397 llvm::Function *Fn; 5398 llvm::Constant *Addr; 5399 // Emit target region as a standalone region. 5400 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5401 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5402 assert(Fn && Addr && "Target device function emission failed."); 5403 } 5404 5405 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5406 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5407 emitTargetRegion(CGF, S, Action); 5408 }; 5409 emitCommonOMPTargetDirective(*this, S, CodeGen); 5410 } 5411 5412 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5413 const OMPExecutableDirective &S, 5414 OpenMPDirectiveKind InnermostKind, 5415 const RegionCodeGenTy &CodeGen) { 5416 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5417 llvm::Function *OutlinedFn = 5418 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5419 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5420 5421 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5422 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5423 if (NT || TL) { 5424 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5425 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5426 5427 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5428 S.getBeginLoc()); 5429 } 5430 5431 OMPTeamsScope Scope(CGF, S); 5432 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5433 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5434 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5435 CapturedVars); 5436 } 5437 5438 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5439 // Emit teams region as a standalone region. 5440 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5441 Action.Enter(CGF); 5442 OMPPrivateScope PrivateScope(CGF); 5443 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5444 CGF.EmitOMPPrivateClause(S, PrivateScope); 5445 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5446 (void)PrivateScope.Privatize(); 5447 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5448 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5449 }; 5450 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5451 emitPostUpdateForReductionClause(*this, S, 5452 [](CodeGenFunction &) { return nullptr; }); 5453 } 5454 5455 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5456 const OMPTargetTeamsDirective &S) { 5457 auto *CS = S.getCapturedStmt(OMPD_teams); 5458 Action.Enter(CGF); 5459 // Emit teams region as a standalone region. 5460 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5461 Action.Enter(CGF); 5462 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5463 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5464 CGF.EmitOMPPrivateClause(S, PrivateScope); 5465 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5466 (void)PrivateScope.Privatize(); 5467 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5468 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5469 CGF.EmitStmt(CS->getCapturedStmt()); 5470 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5471 }; 5472 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5473 emitPostUpdateForReductionClause(CGF, S, 5474 [](CodeGenFunction &) { return nullptr; }); 5475 } 5476 5477 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5478 CodeGenModule &CGM, StringRef ParentName, 5479 const OMPTargetTeamsDirective &S) { 5480 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5481 emitTargetTeamsRegion(CGF, Action, S); 5482 }; 5483 llvm::Function *Fn; 5484 llvm::Constant *Addr; 5485 // Emit target region as a standalone region. 5486 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5487 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5488 assert(Fn && Addr && "Target device function emission failed."); 5489 } 5490 5491 void CodeGenFunction::EmitOMPTargetTeamsDirective( 5492 const OMPTargetTeamsDirective &S) { 5493 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5494 emitTargetTeamsRegion(CGF, Action, S); 5495 }; 5496 emitCommonOMPTargetDirective(*this, S, CodeGen); 5497 } 5498 5499 static void 5500 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5501 const OMPTargetTeamsDistributeDirective &S) { 5502 Action.Enter(CGF); 5503 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5504 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5505 }; 5506 5507 // Emit teams region as a standalone region. 5508 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5509 PrePostActionTy &Action) { 5510 Action.Enter(CGF); 5511 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5512 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5513 (void)PrivateScope.Privatize(); 5514 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5515 CodeGenDistribute); 5516 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5517 }; 5518 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 5519 emitPostUpdateForReductionClause(CGF, S, 5520 [](CodeGenFunction &) { return nullptr; }); 5521 } 5522 5523 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 5524 CodeGenModule &CGM, StringRef ParentName, 5525 const OMPTargetTeamsDistributeDirective &S) { 5526 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5527 emitTargetTeamsDistributeRegion(CGF, Action, S); 5528 }; 5529 llvm::Function *Fn; 5530 llvm::Constant *Addr; 5531 // Emit target region as a standalone region. 5532 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5533 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5534 assert(Fn && Addr && "Target device function emission failed."); 5535 } 5536 5537 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 5538 const OMPTargetTeamsDistributeDirective &S) { 5539 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5540 emitTargetTeamsDistributeRegion(CGF, Action, S); 5541 }; 5542 emitCommonOMPTargetDirective(*this, S, CodeGen); 5543 } 5544 5545 static void emitTargetTeamsDistributeSimdRegion( 5546 CodeGenFunction &CGF, PrePostActionTy &Action, 5547 const OMPTargetTeamsDistributeSimdDirective &S) { 5548 Action.Enter(CGF); 5549 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5550 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5551 }; 5552 5553 // Emit teams region as a standalone region. 5554 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5555 PrePostActionTy &Action) { 5556 Action.Enter(CGF); 5557 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5558 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5559 (void)PrivateScope.Privatize(); 5560 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5561 CodeGenDistribute); 5562 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5563 }; 5564 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 5565 emitPostUpdateForReductionClause(CGF, S, 5566 [](CodeGenFunction &) { return nullptr; }); 5567 } 5568 5569 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 5570 CodeGenModule &CGM, StringRef ParentName, 5571 const OMPTargetTeamsDistributeSimdDirective &S) { 5572 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5573 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5574 }; 5575 llvm::Function *Fn; 5576 llvm::Constant *Addr; 5577 // Emit target region as a standalone region. 5578 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5579 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5580 assert(Fn && Addr && "Target device function emission failed."); 5581 } 5582 5583 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 5584 const OMPTargetTeamsDistributeSimdDirective &S) { 5585 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5586 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5587 }; 5588 emitCommonOMPTargetDirective(*this, S, CodeGen); 5589 } 5590 5591 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 5592 const OMPTeamsDistributeDirective &S) { 5593 5594 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5595 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5596 }; 5597 5598 // Emit teams region as a standalone region. 5599 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5600 PrePostActionTy &Action) { 5601 Action.Enter(CGF); 5602 OMPPrivateScope PrivateScope(CGF); 5603 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5604 (void)PrivateScope.Privatize(); 5605 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5606 CodeGenDistribute); 5607 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5608 }; 5609 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5610 emitPostUpdateForReductionClause(*this, S, 5611 [](CodeGenFunction &) { return nullptr; }); 5612 } 5613 5614 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5615 const OMPTeamsDistributeSimdDirective &S) { 5616 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5617 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5618 }; 5619 5620 // Emit teams region as a standalone region. 5621 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5622 PrePostActionTy &Action) { 5623 Action.Enter(CGF); 5624 OMPPrivateScope PrivateScope(CGF); 5625 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5626 (void)PrivateScope.Privatize(); 5627 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5628 CodeGenDistribute); 5629 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5630 }; 5631 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5632 emitPostUpdateForReductionClause(*this, S, 5633 [](CodeGenFunction &) { return nullptr; }); 5634 } 5635 5636 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5637 const OMPTeamsDistributeParallelForDirective &S) { 5638 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5639 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5640 S.getDistInc()); 5641 }; 5642 5643 // Emit teams region as a standalone region. 5644 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5645 PrePostActionTy &Action) { 5646 Action.Enter(CGF); 5647 OMPPrivateScope PrivateScope(CGF); 5648 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5649 (void)PrivateScope.Privatize(); 5650 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5651 CodeGenDistribute); 5652 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5653 }; 5654 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5655 emitPostUpdateForReductionClause(*this, S, 5656 [](CodeGenFunction &) { return nullptr; }); 5657 } 5658 5659 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5660 const OMPTeamsDistributeParallelForSimdDirective &S) { 5661 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5662 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5663 S.getDistInc()); 5664 }; 5665 5666 // Emit teams region as a standalone region. 5667 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5668 PrePostActionTy &Action) { 5669 Action.Enter(CGF); 5670 OMPPrivateScope PrivateScope(CGF); 5671 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5672 (void)PrivateScope.Privatize(); 5673 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5674 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5675 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5676 }; 5677 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5678 CodeGen); 5679 emitPostUpdateForReductionClause(*this, S, 5680 [](CodeGenFunction &) { return nullptr; }); 5681 } 5682 5683 static void emitTargetTeamsDistributeParallelForRegion( 5684 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5685 PrePostActionTy &Action) { 5686 Action.Enter(CGF); 5687 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5688 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5689 S.getDistInc()); 5690 }; 5691 5692 // Emit teams region as a standalone region. 5693 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5694 PrePostActionTy &Action) { 5695 Action.Enter(CGF); 5696 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5697 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5698 (void)PrivateScope.Privatize(); 5699 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5700 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5701 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5702 }; 5703 5704 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5705 CodeGenTeams); 5706 emitPostUpdateForReductionClause(CGF, S, 5707 [](CodeGenFunction &) { return nullptr; }); 5708 } 5709 5710 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5711 CodeGenModule &CGM, StringRef ParentName, 5712 const OMPTargetTeamsDistributeParallelForDirective &S) { 5713 // Emit SPMD target teams distribute parallel for region as a standalone 5714 // region. 5715 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5716 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5717 }; 5718 llvm::Function *Fn; 5719 llvm::Constant *Addr; 5720 // Emit target region as a standalone region. 5721 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5722 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5723 assert(Fn && Addr && "Target device function emission failed."); 5724 } 5725 5726 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5727 const OMPTargetTeamsDistributeParallelForDirective &S) { 5728 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5729 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5730 }; 5731 emitCommonOMPTargetDirective(*this, S, CodeGen); 5732 } 5733 5734 static void emitTargetTeamsDistributeParallelForSimdRegion( 5735 CodeGenFunction &CGF, 5736 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5737 PrePostActionTy &Action) { 5738 Action.Enter(CGF); 5739 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5740 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5741 S.getDistInc()); 5742 }; 5743 5744 // Emit teams region as a standalone region. 5745 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5746 PrePostActionTy &Action) { 5747 Action.Enter(CGF); 5748 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5749 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5750 (void)PrivateScope.Privatize(); 5751 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5752 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5753 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5754 }; 5755 5756 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5757 CodeGenTeams); 5758 emitPostUpdateForReductionClause(CGF, S, 5759 [](CodeGenFunction &) { return nullptr; }); 5760 } 5761 5762 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5763 CodeGenModule &CGM, StringRef ParentName, 5764 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5765 // Emit SPMD target teams distribute parallel for simd region as a standalone 5766 // region. 5767 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5768 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5769 }; 5770 llvm::Function *Fn; 5771 llvm::Constant *Addr; 5772 // Emit target region as a standalone region. 5773 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5774 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5775 assert(Fn && Addr && "Target device function emission failed."); 5776 } 5777 5778 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5779 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5780 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5781 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5782 }; 5783 emitCommonOMPTargetDirective(*this, S, CodeGen); 5784 } 5785 5786 void CodeGenFunction::EmitOMPCancellationPointDirective( 5787 const OMPCancellationPointDirective &S) { 5788 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5789 S.getCancelRegion()); 5790 } 5791 5792 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5793 const Expr *IfCond = nullptr; 5794 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5795 if (C->getNameModifier() == OMPD_unknown || 5796 C->getNameModifier() == OMPD_cancel) { 5797 IfCond = C->getCondition(); 5798 break; 5799 } 5800 } 5801 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5802 // TODO: This check is necessary as we only generate `omp parallel` through 5803 // the OpenMPIRBuilder for now. 5804 if (S.getCancelRegion() == OMPD_parallel) { 5805 llvm::Value *IfCondition = nullptr; 5806 if (IfCond) 5807 IfCondition = EmitScalarExpr(IfCond, 5808 /*IgnoreResultAssign=*/true); 5809 return Builder.restoreIP( 5810 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5811 } 5812 } 5813 5814 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5815 S.getCancelRegion()); 5816 } 5817 5818 CodeGenFunction::JumpDest 5819 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5820 if (Kind == OMPD_parallel || Kind == OMPD_task || 5821 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5822 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5823 return ReturnBlock; 5824 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5825 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5826 Kind == OMPD_distribute_parallel_for || 5827 Kind == OMPD_target_parallel_for || 5828 Kind == OMPD_teams_distribute_parallel_for || 5829 Kind == OMPD_target_teams_distribute_parallel_for); 5830 return OMPCancelStack.getExitBlock(); 5831 } 5832 5833 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5834 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 5835 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5836 auto OrigVarIt = C.varlist_begin(); 5837 auto InitIt = C.inits().begin(); 5838 for (const Expr *PvtVarIt : C.private_copies()) { 5839 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5840 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5841 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5842 5843 // In order to identify the right initializer we need to match the 5844 // declaration used by the mapping logic. In some cases we may get 5845 // OMPCapturedExprDecl that refers to the original declaration. 5846 const ValueDecl *MatchingVD = OrigVD; 5847 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5848 // OMPCapturedExprDecl are used to privative fields of the current 5849 // structure. 5850 const auto *ME = cast<MemberExpr>(OED->getInit()); 5851 assert(isa<CXXThisExpr>(ME->getBase()) && 5852 "Base should be the current struct!"); 5853 MatchingVD = ME->getMemberDecl(); 5854 } 5855 5856 // If we don't have information about the current list item, move on to 5857 // the next one. 5858 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5859 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5860 continue; 5861 5862 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5863 InitAddrIt, InitVD, 5864 PvtVD]() { 5865 // Initialize the temporary initialization variable with the address we 5866 // get from the runtime library. We have to cast the source address 5867 // because it is always a void *. References are materialized in the 5868 // privatization scope, so the initialization here disregards the fact 5869 // the original variable is a reference. 5870 QualType AddrQTy = 5871 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5872 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5873 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5874 setAddrOfLocalVar(InitVD, InitAddr); 5875 5876 // Emit private declaration, it will be initialized by the value we 5877 // declaration we just added to the local declarations map. 5878 EmitDecl(*PvtVD); 5879 5880 // The initialization variables reached its purpose in the emission 5881 // of the previous declaration, so we don't need it anymore. 5882 LocalDeclMap.erase(InitVD); 5883 5884 // Return the address of the private variable. 5885 return GetAddrOfLocalVar(PvtVD); 5886 }); 5887 assert(IsRegistered && "firstprivate var already registered as private"); 5888 // Silence the warning about unused variable. 5889 (void)IsRegistered; 5890 5891 ++OrigVarIt; 5892 ++InitIt; 5893 } 5894 } 5895 5896 static const VarDecl *getBaseDecl(const Expr *Ref) { 5897 const Expr *Base = Ref->IgnoreParenImpCasts(); 5898 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 5899 Base = OASE->getBase()->IgnoreParenImpCasts(); 5900 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 5901 Base = ASE->getBase()->IgnoreParenImpCasts(); 5902 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 5903 } 5904 5905 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 5906 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 5907 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5908 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 5909 for (const Expr *Ref : C.varlists()) { 5910 const VarDecl *OrigVD = getBaseDecl(Ref); 5911 if (!Processed.insert(OrigVD).second) 5912 continue; 5913 // In order to identify the right initializer we need to match the 5914 // declaration used by the mapping logic. In some cases we may get 5915 // OMPCapturedExprDecl that refers to the original declaration. 5916 const ValueDecl *MatchingVD = OrigVD; 5917 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5918 // OMPCapturedExprDecl are used to privative fields of the current 5919 // structure. 5920 const auto *ME = cast<MemberExpr>(OED->getInit()); 5921 assert(isa<CXXThisExpr>(ME->getBase()) && 5922 "Base should be the current struct!"); 5923 MatchingVD = ME->getMemberDecl(); 5924 } 5925 5926 // If we don't have information about the current list item, move on to 5927 // the next one. 5928 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5929 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5930 continue; 5931 5932 Address PrivAddr = InitAddrIt->getSecond(); 5933 // For declrefs and variable length array need to load the pointer for 5934 // correct mapping, since the pointer to the data was passed to the runtime. 5935 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 5936 MatchingVD->getType()->isArrayType()) 5937 PrivAddr = 5938 EmitLoadOfPointer(PrivAddr, getContext() 5939 .getPointerType(OrigVD->getType()) 5940 ->castAs<PointerType>()); 5941 llvm::Type *RealTy = 5942 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 5943 ->getPointerTo(); 5944 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 5945 5946 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 5947 } 5948 } 5949 5950 // Generate the instructions for '#pragma omp target data' directive. 5951 void CodeGenFunction::EmitOMPTargetDataDirective( 5952 const OMPTargetDataDirective &S) { 5953 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5954 5955 // Create a pre/post action to signal the privatization of the device pointer. 5956 // This action can be replaced by the OpenMP runtime code generation to 5957 // deactivate privatization. 5958 bool PrivatizeDevicePointers = false; 5959 class DevicePointerPrivActionTy : public PrePostActionTy { 5960 bool &PrivatizeDevicePointers; 5961 5962 public: 5963 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5964 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5965 void Enter(CodeGenFunction &CGF) override { 5966 PrivatizeDevicePointers = true; 5967 } 5968 }; 5969 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5970 5971 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5972 CodeGenFunction &CGF, PrePostActionTy &Action) { 5973 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5974 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5975 }; 5976 5977 // Codegen that selects whether to generate the privatization code or not. 5978 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5979 &InnermostCodeGen](CodeGenFunction &CGF, 5980 PrePostActionTy &Action) { 5981 RegionCodeGenTy RCG(InnermostCodeGen); 5982 PrivatizeDevicePointers = false; 5983 5984 // Call the pre-action to change the status of PrivatizeDevicePointers if 5985 // needed. 5986 Action.Enter(CGF); 5987 5988 if (PrivatizeDevicePointers) { 5989 OMPPrivateScope PrivateScope(CGF); 5990 // Emit all instances of the use_device_ptr clause. 5991 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5992 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5993 Info.CaptureDeviceAddrMap); 5994 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 5995 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 5996 Info.CaptureDeviceAddrMap); 5997 (void)PrivateScope.Privatize(); 5998 RCG(CGF); 5999 } else { 6000 RCG(CGF); 6001 } 6002 }; 6003 6004 // Forward the provided action to the privatization codegen. 6005 RegionCodeGenTy PrivRCG(PrivCodeGen); 6006 PrivRCG.setAction(Action); 6007 6008 // Notwithstanding the body of the region is emitted as inlined directive, 6009 // we don't use an inline scope as changes in the references inside the 6010 // region are expected to be visible outside, so we do not privative them. 6011 OMPLexicalScope Scope(CGF, S); 6012 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 6013 PrivRCG); 6014 }; 6015 6016 RegionCodeGenTy RCG(CodeGen); 6017 6018 // If we don't have target devices, don't bother emitting the data mapping 6019 // code. 6020 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6021 RCG(*this); 6022 return; 6023 } 6024 6025 // Check if we have any if clause associated with the directive. 6026 const Expr *IfCond = nullptr; 6027 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6028 IfCond = C->getCondition(); 6029 6030 // Check if we have any device clause associated with the directive. 6031 const Expr *Device = nullptr; 6032 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6033 Device = C->getDevice(); 6034 6035 // Set the action to signal privatization of device pointers. 6036 RCG.setAction(PrivAction); 6037 6038 // Emit region code. 6039 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6040 Info); 6041 } 6042 6043 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6044 const OMPTargetEnterDataDirective &S) { 6045 // If we don't have target devices, don't bother emitting the data mapping 6046 // code. 6047 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6048 return; 6049 6050 // Check if we have any if clause associated with the directive. 6051 const Expr *IfCond = nullptr; 6052 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6053 IfCond = C->getCondition(); 6054 6055 // Check if we have any device clause associated with the directive. 6056 const Expr *Device = nullptr; 6057 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6058 Device = C->getDevice(); 6059 6060 OMPLexicalScope Scope(*this, S, OMPD_task); 6061 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6062 } 6063 6064 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6065 const OMPTargetExitDataDirective &S) { 6066 // If we don't have target devices, don't bother emitting the data mapping 6067 // code. 6068 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6069 return; 6070 6071 // Check if we have any if clause associated with the directive. 6072 const Expr *IfCond = nullptr; 6073 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6074 IfCond = C->getCondition(); 6075 6076 // Check if we have any device clause associated with the directive. 6077 const Expr *Device = nullptr; 6078 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6079 Device = C->getDevice(); 6080 6081 OMPLexicalScope Scope(*this, S, OMPD_task); 6082 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6083 } 6084 6085 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6086 const OMPTargetParallelDirective &S, 6087 PrePostActionTy &Action) { 6088 // Get the captured statement associated with the 'parallel' region. 6089 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6090 Action.Enter(CGF); 6091 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6092 Action.Enter(CGF); 6093 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6094 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6095 CGF.EmitOMPPrivateClause(S, PrivateScope); 6096 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6097 (void)PrivateScope.Privatize(); 6098 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6099 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6100 // TODO: Add support for clauses. 6101 CGF.EmitStmt(CS->getCapturedStmt()); 6102 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6103 }; 6104 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6105 emitEmptyBoundParameters); 6106 emitPostUpdateForReductionClause(CGF, S, 6107 [](CodeGenFunction &) { return nullptr; }); 6108 } 6109 6110 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6111 CodeGenModule &CGM, StringRef ParentName, 6112 const OMPTargetParallelDirective &S) { 6113 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6114 emitTargetParallelRegion(CGF, S, Action); 6115 }; 6116 llvm::Function *Fn; 6117 llvm::Constant *Addr; 6118 // Emit target region as a standalone region. 6119 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6120 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6121 assert(Fn && Addr && "Target device function emission failed."); 6122 } 6123 6124 void CodeGenFunction::EmitOMPTargetParallelDirective( 6125 const OMPTargetParallelDirective &S) { 6126 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6127 emitTargetParallelRegion(CGF, S, Action); 6128 }; 6129 emitCommonOMPTargetDirective(*this, S, CodeGen); 6130 } 6131 6132 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6133 const OMPTargetParallelForDirective &S, 6134 PrePostActionTy &Action) { 6135 Action.Enter(CGF); 6136 // Emit directive as a combined directive that consists of two implicit 6137 // directives: 'parallel' with 'for' directive. 6138 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6139 Action.Enter(CGF); 6140 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6141 CGF, OMPD_target_parallel_for, S.hasCancel()); 6142 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6143 emitDispatchForLoopBounds); 6144 }; 6145 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6146 emitEmptyBoundParameters); 6147 } 6148 6149 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6150 CodeGenModule &CGM, StringRef ParentName, 6151 const OMPTargetParallelForDirective &S) { 6152 // Emit SPMD target parallel for region as a standalone region. 6153 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6154 emitTargetParallelForRegion(CGF, S, Action); 6155 }; 6156 llvm::Function *Fn; 6157 llvm::Constant *Addr; 6158 // Emit target region as a standalone region. 6159 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6160 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6161 assert(Fn && Addr && "Target device function emission failed."); 6162 } 6163 6164 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6165 const OMPTargetParallelForDirective &S) { 6166 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6167 emitTargetParallelForRegion(CGF, S, Action); 6168 }; 6169 emitCommonOMPTargetDirective(*this, S, CodeGen); 6170 } 6171 6172 static void 6173 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6174 const OMPTargetParallelForSimdDirective &S, 6175 PrePostActionTy &Action) { 6176 Action.Enter(CGF); 6177 // Emit directive as a combined directive that consists of two implicit 6178 // directives: 'parallel' with 'for' directive. 6179 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6180 Action.Enter(CGF); 6181 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6182 emitDispatchForLoopBounds); 6183 }; 6184 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6185 emitEmptyBoundParameters); 6186 } 6187 6188 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6189 CodeGenModule &CGM, StringRef ParentName, 6190 const OMPTargetParallelForSimdDirective &S) { 6191 // Emit SPMD target parallel for region as a standalone region. 6192 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6193 emitTargetParallelForSimdRegion(CGF, S, Action); 6194 }; 6195 llvm::Function *Fn; 6196 llvm::Constant *Addr; 6197 // Emit target region as a standalone region. 6198 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6199 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6200 assert(Fn && Addr && "Target device function emission failed."); 6201 } 6202 6203 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6204 const OMPTargetParallelForSimdDirective &S) { 6205 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6206 emitTargetParallelForSimdRegion(CGF, S, Action); 6207 }; 6208 emitCommonOMPTargetDirective(*this, S, CodeGen); 6209 } 6210 6211 /// Emit a helper variable and return corresponding lvalue. 6212 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6213 const ImplicitParamDecl *PVD, 6214 CodeGenFunction::OMPPrivateScope &Privates) { 6215 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6216 Privates.addPrivate(VDecl, 6217 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6218 } 6219 6220 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6221 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6222 // Emit outlined function for task construct. 6223 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6224 Address CapturedStruct = Address::invalid(); 6225 { 6226 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6227 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6228 } 6229 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6230 const Expr *IfCond = nullptr; 6231 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6232 if (C->getNameModifier() == OMPD_unknown || 6233 C->getNameModifier() == OMPD_taskloop) { 6234 IfCond = C->getCondition(); 6235 break; 6236 } 6237 } 6238 6239 OMPTaskDataTy Data; 6240 // Check if taskloop must be emitted without taskgroup. 6241 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6242 // TODO: Check if we should emit tied or untied task. 6243 Data.Tied = true; 6244 // Set scheduling for taskloop 6245 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6246 // grainsize clause 6247 Data.Schedule.setInt(/*IntVal=*/false); 6248 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6249 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6250 // num_tasks clause 6251 Data.Schedule.setInt(/*IntVal=*/true); 6252 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6253 } 6254 6255 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6256 // if (PreCond) { 6257 // for (IV in 0..LastIteration) BODY; 6258 // <Final counter/linear vars updates>; 6259 // } 6260 // 6261 6262 // Emit: if (PreCond) - begin. 6263 // If the condition constant folds and can be elided, avoid emitting the 6264 // whole loop. 6265 bool CondConstant; 6266 llvm::BasicBlock *ContBlock = nullptr; 6267 OMPLoopScope PreInitScope(CGF, S); 6268 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6269 if (!CondConstant) 6270 return; 6271 } else { 6272 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6273 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6274 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6275 CGF.getProfileCount(&S)); 6276 CGF.EmitBlock(ThenBlock); 6277 CGF.incrementProfileCounter(&S); 6278 } 6279 6280 (void)CGF.EmitOMPLinearClauseInit(S); 6281 6282 OMPPrivateScope LoopScope(CGF); 6283 // Emit helper vars inits. 6284 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6285 auto *I = CS->getCapturedDecl()->param_begin(); 6286 auto *LBP = std::next(I, LowerBound); 6287 auto *UBP = std::next(I, UpperBound); 6288 auto *STP = std::next(I, Stride); 6289 auto *LIP = std::next(I, LastIter); 6290 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6291 LoopScope); 6292 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6293 LoopScope); 6294 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6295 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6296 LoopScope); 6297 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6298 CGF.EmitOMPLinearClause(S, LoopScope); 6299 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6300 (void)LoopScope.Privatize(); 6301 // Emit the loop iteration variable. 6302 const Expr *IVExpr = S.getIterationVariable(); 6303 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6304 CGF.EmitVarDecl(*IVDecl); 6305 CGF.EmitIgnoredExpr(S.getInit()); 6306 6307 // Emit the iterations count variable. 6308 // If it is not a variable, Sema decided to calculate iterations count on 6309 // each iteration (e.g., it is foldable into a constant). 6310 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6311 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6312 // Emit calculation of the iterations count. 6313 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6314 } 6315 6316 { 6317 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6318 emitCommonSimdLoop( 6319 CGF, S, 6320 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6321 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6322 CGF.EmitOMPSimdInit(S); 6323 }, 6324 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6325 CGF.EmitOMPInnerLoop( 6326 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6327 [&S](CodeGenFunction &CGF) { 6328 emitOMPLoopBodyWithStopPoint(CGF, S, 6329 CodeGenFunction::JumpDest()); 6330 }, 6331 [](CodeGenFunction &) {}); 6332 }); 6333 } 6334 // Emit: if (PreCond) - end. 6335 if (ContBlock) { 6336 CGF.EmitBranch(ContBlock); 6337 CGF.EmitBlock(ContBlock, true); 6338 } 6339 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6340 if (HasLastprivateClause) { 6341 CGF.EmitOMPLastprivateClauseFinal( 6342 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6343 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6344 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6345 (*LIP)->getType(), S.getBeginLoc()))); 6346 } 6347 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6348 return CGF.Builder.CreateIsNotNull( 6349 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6350 (*LIP)->getType(), S.getBeginLoc())); 6351 }); 6352 }; 6353 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6354 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6355 const OMPTaskDataTy &Data) { 6356 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6357 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6358 OMPLoopScope PreInitScope(CGF, S); 6359 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6360 OutlinedFn, SharedsTy, 6361 CapturedStruct, IfCond, Data); 6362 }; 6363 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6364 CodeGen); 6365 }; 6366 if (Data.Nogroup) { 6367 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6368 } else { 6369 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6370 *this, 6371 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6372 PrePostActionTy &Action) { 6373 Action.Enter(CGF); 6374 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6375 Data); 6376 }, 6377 S.getBeginLoc()); 6378 } 6379 } 6380 6381 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6382 auto LPCRegion = 6383 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6384 EmitOMPTaskLoopBasedDirective(S); 6385 } 6386 6387 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6388 const OMPTaskLoopSimdDirective &S) { 6389 auto LPCRegion = 6390 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6391 OMPLexicalScope Scope(*this, S); 6392 EmitOMPTaskLoopBasedDirective(S); 6393 } 6394 6395 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6396 const OMPMasterTaskLoopDirective &S) { 6397 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6398 Action.Enter(CGF); 6399 EmitOMPTaskLoopBasedDirective(S); 6400 }; 6401 auto LPCRegion = 6402 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6403 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6404 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6405 } 6406 6407 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6408 const OMPMasterTaskLoopSimdDirective &S) { 6409 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6410 Action.Enter(CGF); 6411 EmitOMPTaskLoopBasedDirective(S); 6412 }; 6413 auto LPCRegion = 6414 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6415 OMPLexicalScope Scope(*this, S); 6416 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6417 } 6418 6419 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6420 const OMPParallelMasterTaskLoopDirective &S) { 6421 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6422 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6423 PrePostActionTy &Action) { 6424 Action.Enter(CGF); 6425 CGF.EmitOMPTaskLoopBasedDirective(S); 6426 }; 6427 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6428 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6429 S.getBeginLoc()); 6430 }; 6431 auto LPCRegion = 6432 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6433 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6434 emitEmptyBoundParameters); 6435 } 6436 6437 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6438 const OMPParallelMasterTaskLoopSimdDirective &S) { 6439 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6440 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6441 PrePostActionTy &Action) { 6442 Action.Enter(CGF); 6443 CGF.EmitOMPTaskLoopBasedDirective(S); 6444 }; 6445 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6446 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6447 S.getBeginLoc()); 6448 }; 6449 auto LPCRegion = 6450 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6451 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6452 emitEmptyBoundParameters); 6453 } 6454 6455 // Generate the instructions for '#pragma omp target update' directive. 6456 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6457 const OMPTargetUpdateDirective &S) { 6458 // If we don't have target devices, don't bother emitting the data mapping 6459 // code. 6460 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6461 return; 6462 6463 // Check if we have any if clause associated with the directive. 6464 const Expr *IfCond = nullptr; 6465 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6466 IfCond = C->getCondition(); 6467 6468 // Check if we have any device clause associated with the directive. 6469 const Expr *Device = nullptr; 6470 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6471 Device = C->getDevice(); 6472 6473 OMPLexicalScope Scope(*this, S, OMPD_task); 6474 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6475 } 6476 6477 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6478 const OMPExecutableDirective &D) { 6479 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6480 EmitOMPScanDirective(*SD); 6481 return; 6482 } 6483 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6484 return; 6485 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6486 OMPPrivateScope GlobalsScope(CGF); 6487 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 6488 // Capture global firstprivates to avoid crash. 6489 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 6490 for (const Expr *Ref : C->varlists()) { 6491 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 6492 if (!DRE) 6493 continue; 6494 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 6495 if (!VD || VD->hasLocalStorage()) 6496 continue; 6497 if (!CGF.LocalDeclMap.count(VD)) { 6498 LValue GlobLVal = CGF.EmitLValue(Ref); 6499 GlobalsScope.addPrivate( 6500 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6501 } 6502 } 6503 } 6504 } 6505 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 6506 (void)GlobalsScope.Privatize(); 6507 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 6508 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 6509 } else { 6510 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 6511 for (const Expr *E : LD->counters()) { 6512 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 6513 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 6514 LValue GlobLVal = CGF.EmitLValue(E); 6515 GlobalsScope.addPrivate( 6516 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6517 } 6518 if (isa<OMPCapturedExprDecl>(VD)) { 6519 // Emit only those that were not explicitly referenced in clauses. 6520 if (!CGF.LocalDeclMap.count(VD)) 6521 CGF.EmitVarDecl(*VD); 6522 } 6523 } 6524 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 6525 if (!C->getNumForLoops()) 6526 continue; 6527 for (unsigned I = LD->getCollapsedNumber(), 6528 E = C->getLoopNumIterations().size(); 6529 I < E; ++I) { 6530 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 6531 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 6532 // Emit only those that were not explicitly referenced in clauses. 6533 if (!CGF.LocalDeclMap.count(VD)) 6534 CGF.EmitVarDecl(*VD); 6535 } 6536 } 6537 } 6538 } 6539 (void)GlobalsScope.Privatize(); 6540 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 6541 } 6542 }; 6543 { 6544 auto LPCRegion = 6545 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 6546 OMPSimdLexicalScope Scope(*this, D); 6547 CGM.getOpenMPRuntime().emitInlinedDirective( 6548 *this, 6549 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 6550 : D.getDirectiveKind(), 6551 CodeGen); 6552 } 6553 // Check for outer lastprivate conditional update. 6554 checkForLastprivateConditionalUpdate(*this, D); 6555 } 6556