1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/AST/StmtVisitor.h" 25 #include "clang/Basic/OpenMPKinds.h" 26 #include "clang/Basic/PrettyStackTrace.h" 27 #include "llvm/Frontend/OpenMP/OMPConstants.h" 28 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/Support/AtomicOrdering.h" 32 using namespace clang; 33 using namespace CodeGen; 34 using namespace llvm::omp; 35 36 static const VarDecl *getBaseDecl(const Expr *Ref); 37 38 namespace { 39 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 40 /// for captured expressions. 41 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 42 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 43 for (const auto *C : S.clauses()) { 44 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 45 if (const auto *PreInit = 46 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 47 for (const auto *I : PreInit->decls()) { 48 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 49 CGF.EmitVarDecl(cast<VarDecl>(*I)); 50 } else { 51 CodeGenFunction::AutoVarEmission Emission = 52 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 53 CGF.EmitAutoVarCleanups(Emission); 54 } 55 } 56 } 57 } 58 } 59 } 60 CodeGenFunction::OMPPrivateScope InlinedShareds; 61 62 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 63 return CGF.LambdaCaptureFields.lookup(VD) || 64 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 65 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 66 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 67 } 68 69 public: 70 OMPLexicalScope( 71 CodeGenFunction &CGF, const OMPExecutableDirective &S, 72 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 73 const bool EmitPreInitStmt = true) 74 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 75 InlinedShareds(CGF) { 76 if (EmitPreInitStmt) 77 emitPreInitStmt(CGF, S); 78 if (!CapturedRegion.hasValue()) 79 return; 80 assert(S.hasAssociatedStmt() && 81 "Expected associated statement for inlined directive."); 82 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 83 for (const auto &C : CS->captures()) { 84 if (C.capturesVariable() || C.capturesVariableByCopy()) { 85 auto *VD = C.getCapturedVar(); 86 assert(VD == VD->getCanonicalDecl() && 87 "Canonical decl must be captured."); 88 DeclRefExpr DRE( 89 CGF.getContext(), const_cast<VarDecl *>(VD), 90 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 91 InlinedShareds.isGlobalVarCaptured(VD)), 92 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 93 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 94 return CGF.EmitLValue(&DRE).getAddress(CGF); 95 }); 96 } 97 } 98 (void)InlinedShareds.Privatize(); 99 } 100 }; 101 102 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 103 /// for captured expressions. 104 class OMPParallelScope final : public OMPLexicalScope { 105 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 106 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 107 return !(isOpenMPTargetExecutionDirective(Kind) || 108 isOpenMPLoopBoundSharingDirective(Kind)) && 109 isOpenMPParallelDirective(Kind); 110 } 111 112 public: 113 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 114 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 115 EmitPreInitStmt(S)) {} 116 }; 117 118 /// Lexical scope for OpenMP teams construct, that handles correct codegen 119 /// for captured expressions. 120 class OMPTeamsScope final : public OMPLexicalScope { 121 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 122 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 123 return !isOpenMPTargetExecutionDirective(Kind) && 124 isOpenMPTeamsDirective(Kind); 125 } 126 127 public: 128 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 129 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 130 EmitPreInitStmt(S)) {} 131 }; 132 133 /// Private scope for OpenMP loop-based directives, that supports capturing 134 /// of used expression from loop statement. 135 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 136 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) { 137 const DeclStmt *PreInits; 138 CodeGenFunction::OMPMapVars PreCondVars; 139 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 140 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 141 for (const auto *E : LD->counters()) { 142 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 143 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 144 (void)PreCondVars.setVarAddr( 145 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 146 } 147 // Mark private vars as undefs. 148 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) { 149 for (const Expr *IRef : C->varlists()) { 150 const auto *OrigVD = 151 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 152 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 153 (void)PreCondVars.setVarAddr( 154 CGF, OrigVD, 155 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem( 156 CGF.getContext().getPointerType( 157 OrigVD->getType().getNonReferenceType()))), 158 CGF.getContext().getDeclAlign(OrigVD))); 159 } 160 } 161 } 162 (void)PreCondVars.apply(CGF); 163 // Emit init, __range and __end variables for C++ range loops. 164 (void)OMPLoopBasedDirective::doForAllLoops( 165 LD->getInnermostCapturedStmt()->getCapturedStmt(), 166 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(), 167 [&CGF](unsigned Cnt, const Stmt *CurStmt) { 168 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) { 169 if (const Stmt *Init = CXXFor->getInit()) 170 CGF.EmitStmt(Init); 171 CGF.EmitStmt(CXXFor->getRangeStmt()); 172 CGF.EmitStmt(CXXFor->getEndStmt()); 173 } 174 return false; 175 }); 176 PreInits = cast_or_null<DeclStmt>(LD->getPreInits()); 177 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) { 178 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits()); 179 } else { 180 llvm_unreachable("Unknown loop-based directive kind."); 181 } 182 if (PreInits) { 183 for (const auto *I : PreInits->decls()) 184 CGF.EmitVarDecl(cast<VarDecl>(*I)); 185 } 186 PreCondVars.restore(CGF); 187 } 188 189 public: 190 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) 191 : CodeGenFunction::RunCleanupsScope(CGF) { 192 emitPreInitStmt(CGF, S); 193 } 194 }; 195 196 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 197 CodeGenFunction::OMPPrivateScope InlinedShareds; 198 199 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 200 return CGF.LambdaCaptureFields.lookup(VD) || 201 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 202 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 203 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 204 } 205 206 public: 207 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 208 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 209 InlinedShareds(CGF) { 210 for (const auto *C : S.clauses()) { 211 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 212 if (const auto *PreInit = 213 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 214 for (const auto *I : PreInit->decls()) { 215 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 216 CGF.EmitVarDecl(cast<VarDecl>(*I)); 217 } else { 218 CodeGenFunction::AutoVarEmission Emission = 219 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 220 CGF.EmitAutoVarCleanups(Emission); 221 } 222 } 223 } 224 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 225 for (const Expr *E : UDP->varlists()) { 226 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 227 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 228 CGF.EmitVarDecl(*OED); 229 } 230 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 231 for (const Expr *E : UDP->varlists()) { 232 const Decl *D = getBaseDecl(E); 233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 234 CGF.EmitVarDecl(*OED); 235 } 236 } 237 } 238 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 239 CGF.EmitOMPPrivateClause(S, InlinedShareds); 240 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 241 if (const Expr *E = TG->getReductionRef()) 242 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 243 } 244 // Temp copy arrays for inscan reductions should not be emitted as they are 245 // not used in simd only mode. 246 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps; 247 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 248 if (C->getModifier() != OMPC_REDUCTION_inscan) 249 continue; 250 for (const Expr *E : C->copy_array_temps()) 251 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl()); 252 } 253 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 254 while (CS) { 255 for (auto &C : CS->captures()) { 256 if (C.capturesVariable() || C.capturesVariableByCopy()) { 257 auto *VD = C.getCapturedVar(); 258 if (CopyArrayTemps.contains(VD)) 259 continue; 260 assert(VD == VD->getCanonicalDecl() && 261 "Canonical decl must be captured."); 262 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 263 isCapturedVar(CGF, VD) || 264 (CGF.CapturedStmtInfo && 265 InlinedShareds.isGlobalVarCaptured(VD)), 266 VD->getType().getNonReferenceType(), VK_LValue, 267 C.getLocation()); 268 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 269 return CGF.EmitLValue(&DRE).getAddress(CGF); 270 }); 271 } 272 } 273 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 274 } 275 (void)InlinedShareds.Privatize(); 276 } 277 }; 278 279 } // namespace 280 281 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 282 const OMPExecutableDirective &S, 283 const RegionCodeGenTy &CodeGen); 284 285 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 286 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 287 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 288 OrigVD = OrigVD->getCanonicalDecl(); 289 bool IsCaptured = 290 LambdaCaptureFields.lookup(OrigVD) || 291 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 292 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 293 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 294 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 295 return EmitLValue(&DRE); 296 } 297 } 298 return EmitLValue(E); 299 } 300 301 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 302 ASTContext &C = getContext(); 303 llvm::Value *Size = nullptr; 304 auto SizeInChars = C.getTypeSizeInChars(Ty); 305 if (SizeInChars.isZero()) { 306 // getTypeSizeInChars() returns 0 for a VLA. 307 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 308 VlaSizePair VlaSize = getVLASize(VAT); 309 Ty = VlaSize.Type; 310 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 311 : VlaSize.NumElts; 312 } 313 SizeInChars = C.getTypeSizeInChars(Ty); 314 if (SizeInChars.isZero()) 315 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 316 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 317 } 318 return CGM.getSize(SizeInChars); 319 } 320 321 void CodeGenFunction::GenerateOpenMPCapturedVars( 322 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 323 const RecordDecl *RD = S.getCapturedRecordDecl(); 324 auto CurField = RD->field_begin(); 325 auto CurCap = S.captures().begin(); 326 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 327 E = S.capture_init_end(); 328 I != E; ++I, ++CurField, ++CurCap) { 329 if (CurField->hasCapturedVLAType()) { 330 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 331 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 332 CapturedVars.push_back(Val); 333 } else if (CurCap->capturesThis()) { 334 CapturedVars.push_back(CXXThisValue); 335 } else if (CurCap->capturesVariableByCopy()) { 336 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 337 338 // If the field is not a pointer, we need to save the actual value 339 // and load it as a void pointer. 340 if (!CurField->getType()->isAnyPointerType()) { 341 ASTContext &Ctx = getContext(); 342 Address DstAddr = CreateMemTemp( 343 Ctx.getUIntPtrType(), 344 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 345 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 346 347 llvm::Value *SrcAddrVal = EmitScalarConversion( 348 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 349 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 350 LValue SrcLV = 351 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 352 353 // Store the value using the source type pointer. 354 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 355 356 // Load the value using the destination type pointer. 357 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 358 } 359 CapturedVars.push_back(CV); 360 } else { 361 assert(CurCap->capturesVariable() && "Expected capture by reference."); 362 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 363 } 364 } 365 } 366 367 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 368 QualType DstType, StringRef Name, 369 LValue AddrLV) { 370 ASTContext &Ctx = CGF.getContext(); 371 372 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 373 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 374 Ctx.getPointerType(DstType), Loc); 375 Address TmpAddr = 376 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 377 .getAddress(CGF); 378 return TmpAddr; 379 } 380 381 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 382 if (T->isLValueReferenceType()) 383 return C.getLValueReferenceType( 384 getCanonicalParamType(C, T.getNonReferenceType()), 385 /*SpelledAsLValue=*/false); 386 if (T->isPointerType()) 387 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 388 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 389 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 390 return getCanonicalParamType(C, VLA->getElementType()); 391 if (!A->isVariablyModifiedType()) 392 return C.getCanonicalType(T); 393 } 394 return C.getCanonicalParamType(T); 395 } 396 397 namespace { 398 /// Contains required data for proper outlined function codegen. 399 struct FunctionOptions { 400 /// Captured statement for which the function is generated. 401 const CapturedStmt *S = nullptr; 402 /// true if cast to/from UIntPtr is required for variables captured by 403 /// value. 404 const bool UIntPtrCastRequired = true; 405 /// true if only casted arguments must be registered as local args or VLA 406 /// sizes. 407 const bool RegisterCastedArgsOnly = false; 408 /// Name of the generated function. 409 const StringRef FunctionName; 410 /// Location of the non-debug version of the outlined function. 411 SourceLocation Loc; 412 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 413 bool RegisterCastedArgsOnly, StringRef FunctionName, 414 SourceLocation Loc) 415 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 416 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 417 FunctionName(FunctionName), Loc(Loc) {} 418 }; 419 } // namespace 420 421 static llvm::Function *emitOutlinedFunctionPrologue( 422 CodeGenFunction &CGF, FunctionArgList &Args, 423 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 424 &LocalAddrs, 425 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 426 &VLASizes, 427 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 428 const CapturedDecl *CD = FO.S->getCapturedDecl(); 429 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 430 assert(CD->hasBody() && "missing CapturedDecl body"); 431 432 CXXThisValue = nullptr; 433 // Build the argument list. 434 CodeGenModule &CGM = CGF.CGM; 435 ASTContext &Ctx = CGM.getContext(); 436 FunctionArgList TargetArgs; 437 Args.append(CD->param_begin(), 438 std::next(CD->param_begin(), CD->getContextParamPosition())); 439 TargetArgs.append( 440 CD->param_begin(), 441 std::next(CD->param_begin(), CD->getContextParamPosition())); 442 auto I = FO.S->captures().begin(); 443 FunctionDecl *DebugFunctionDecl = nullptr; 444 if (!FO.UIntPtrCastRequired) { 445 FunctionProtoType::ExtProtoInfo EPI; 446 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 447 DebugFunctionDecl = FunctionDecl::Create( 448 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 449 SourceLocation(), DeclarationName(), FunctionTy, 450 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 451 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 452 } 453 for (const FieldDecl *FD : RD->fields()) { 454 QualType ArgType = FD->getType(); 455 IdentifierInfo *II = nullptr; 456 VarDecl *CapVar = nullptr; 457 458 // If this is a capture by copy and the type is not a pointer, the outlined 459 // function argument type should be uintptr and the value properly casted to 460 // uintptr. This is necessary given that the runtime library is only able to 461 // deal with pointers. We can pass in the same way the VLA type sizes to the 462 // outlined function. 463 if (FO.UIntPtrCastRequired && 464 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 465 I->capturesVariableArrayType())) 466 ArgType = Ctx.getUIntPtrType(); 467 468 if (I->capturesVariable() || I->capturesVariableByCopy()) { 469 CapVar = I->getCapturedVar(); 470 II = CapVar->getIdentifier(); 471 } else if (I->capturesThis()) { 472 II = &Ctx.Idents.get("this"); 473 } else { 474 assert(I->capturesVariableArrayType()); 475 II = &Ctx.Idents.get("vla"); 476 } 477 if (ArgType->isVariablyModifiedType()) 478 ArgType = getCanonicalParamType(Ctx, ArgType); 479 VarDecl *Arg; 480 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 481 Arg = ParmVarDecl::Create( 482 Ctx, DebugFunctionDecl, 483 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 484 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 485 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 486 } else { 487 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 488 II, ArgType, ImplicitParamDecl::Other); 489 } 490 Args.emplace_back(Arg); 491 // Do not cast arguments if we emit function with non-original types. 492 TargetArgs.emplace_back( 493 FO.UIntPtrCastRequired 494 ? Arg 495 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 496 ++I; 497 } 498 Args.append( 499 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 500 CD->param_end()); 501 TargetArgs.append( 502 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 503 CD->param_end()); 504 505 // Create the function declaration. 506 const CGFunctionInfo &FuncInfo = 507 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 508 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 509 510 auto *F = 511 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 512 FO.FunctionName, &CGM.getModule()); 513 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 514 if (CD->isNothrow()) 515 F->setDoesNotThrow(); 516 F->setDoesNotRecurse(); 517 518 // Generate the function. 519 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 520 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 521 FO.UIntPtrCastRequired ? FO.Loc 522 : CD->getBody()->getBeginLoc()); 523 unsigned Cnt = CD->getContextParamPosition(); 524 I = FO.S->captures().begin(); 525 for (const FieldDecl *FD : RD->fields()) { 526 // Do not map arguments if we emit function with non-original types. 527 Address LocalAddr(Address::invalid()); 528 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 529 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 530 TargetArgs[Cnt]); 531 } else { 532 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 533 } 534 // If we are capturing a pointer by copy we don't need to do anything, just 535 // use the value that we get from the arguments. 536 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 537 const VarDecl *CurVD = I->getCapturedVar(); 538 if (!FO.RegisterCastedArgsOnly) 539 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 540 ++Cnt; 541 ++I; 542 continue; 543 } 544 545 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 546 AlignmentSource::Decl); 547 if (FD->hasCapturedVLAType()) { 548 if (FO.UIntPtrCastRequired) { 549 ArgLVal = CGF.MakeAddrLValue( 550 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 551 Args[Cnt]->getName(), ArgLVal), 552 FD->getType(), AlignmentSource::Decl); 553 } 554 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 555 const VariableArrayType *VAT = FD->getCapturedVLAType(); 556 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 557 } else if (I->capturesVariable()) { 558 const VarDecl *Var = I->getCapturedVar(); 559 QualType VarTy = Var->getType(); 560 Address ArgAddr = ArgLVal.getAddress(CGF); 561 if (ArgLVal.getType()->isLValueReferenceType()) { 562 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 563 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 564 assert(ArgLVal.getType()->isPointerType()); 565 ArgAddr = CGF.EmitLoadOfPointer( 566 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 567 } 568 if (!FO.RegisterCastedArgsOnly) { 569 LocalAddrs.insert( 570 {Args[Cnt], 571 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 572 } 573 } else if (I->capturesVariableByCopy()) { 574 assert(!FD->getType()->isAnyPointerType() && 575 "Not expecting a captured pointer."); 576 const VarDecl *Var = I->getCapturedVar(); 577 LocalAddrs.insert({Args[Cnt], 578 {Var, FO.UIntPtrCastRequired 579 ? castValueFromUintptr( 580 CGF, I->getLocation(), FD->getType(), 581 Args[Cnt]->getName(), ArgLVal) 582 : ArgLVal.getAddress(CGF)}}); 583 } else { 584 // If 'this' is captured, load it into CXXThisValue. 585 assert(I->capturesThis()); 586 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 587 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 588 } 589 ++Cnt; 590 ++I; 591 } 592 593 return F; 594 } 595 596 llvm::Function * 597 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 598 SourceLocation Loc) { 599 assert( 600 CapturedStmtInfo && 601 "CapturedStmtInfo should be set when generating the captured function"); 602 const CapturedDecl *CD = S.getCapturedDecl(); 603 // Build the argument list. 604 bool NeedWrapperFunction = 605 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 606 FunctionArgList Args; 607 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 608 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 609 SmallString<256> Buffer; 610 llvm::raw_svector_ostream Out(Buffer); 611 Out << CapturedStmtInfo->getHelperName(); 612 if (NeedWrapperFunction) 613 Out << "_debug__"; 614 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 615 Out.str(), Loc); 616 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 617 VLASizes, CXXThisValue, FO); 618 CodeGenFunction::OMPPrivateScope LocalScope(*this); 619 for (const auto &LocalAddrPair : LocalAddrs) { 620 if (LocalAddrPair.second.first) { 621 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 622 return LocalAddrPair.second.second; 623 }); 624 } 625 } 626 (void)LocalScope.Privatize(); 627 for (const auto &VLASizePair : VLASizes) 628 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 629 PGO.assignRegionCounters(GlobalDecl(CD), F); 630 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 631 (void)LocalScope.ForceCleanup(); 632 FinishFunction(CD->getBodyRBrace()); 633 if (!NeedWrapperFunction) 634 return F; 635 636 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 637 /*RegisterCastedArgsOnly=*/true, 638 CapturedStmtInfo->getHelperName(), Loc); 639 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 640 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 641 Args.clear(); 642 LocalAddrs.clear(); 643 VLASizes.clear(); 644 llvm::Function *WrapperF = 645 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 646 WrapperCGF.CXXThisValue, WrapperFO); 647 llvm::SmallVector<llvm::Value *, 4> CallArgs; 648 auto *PI = F->arg_begin(); 649 for (const auto *Arg : Args) { 650 llvm::Value *CallArg; 651 auto I = LocalAddrs.find(Arg); 652 if (I != LocalAddrs.end()) { 653 LValue LV = WrapperCGF.MakeAddrLValue( 654 I->second.second, 655 I->second.first ? I->second.first->getType() : Arg->getType(), 656 AlignmentSource::Decl); 657 if (LV.getType()->isAnyComplexType()) 658 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 659 LV.getAddress(WrapperCGF), 660 PI->getType()->getPointerTo( 661 LV.getAddress(WrapperCGF).getAddressSpace()))); 662 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 663 } else { 664 auto EI = VLASizes.find(Arg); 665 if (EI != VLASizes.end()) { 666 CallArg = EI->second.second; 667 } else { 668 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 669 Arg->getType(), 670 AlignmentSource::Decl); 671 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 672 } 673 } 674 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 675 ++PI; 676 } 677 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 678 WrapperCGF.FinishFunction(); 679 return WrapperF; 680 } 681 682 //===----------------------------------------------------------------------===// 683 // OpenMP Directive Emission 684 //===----------------------------------------------------------------------===// 685 void CodeGenFunction::EmitOMPAggregateAssign( 686 Address DestAddr, Address SrcAddr, QualType OriginalType, 687 const llvm::function_ref<void(Address, Address)> CopyGen) { 688 // Perform element-by-element initialization. 689 QualType ElementTy; 690 691 // Drill down to the base element type on both arrays. 692 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 693 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 694 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 695 696 llvm::Value *SrcBegin = SrcAddr.getPointer(); 697 llvm::Value *DestBegin = DestAddr.getPointer(); 698 // Cast from pointer to array type to pointer to single element. 699 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 700 // The basic structure here is a while-do loop. 701 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 702 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 703 llvm::Value *IsEmpty = 704 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 705 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 706 707 // Enter the loop body, making that address the current address. 708 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 709 EmitBlock(BodyBB); 710 711 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 712 713 llvm::PHINode *SrcElementPHI = 714 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 715 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 716 Address SrcElementCurrent = 717 Address(SrcElementPHI, 718 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 719 720 llvm::PHINode *DestElementPHI = 721 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 722 DestElementPHI->addIncoming(DestBegin, EntryBB); 723 Address DestElementCurrent = 724 Address(DestElementPHI, 725 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 726 727 // Emit copy. 728 CopyGen(DestElementCurrent, SrcElementCurrent); 729 730 // Shift the address forward by one element. 731 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 732 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 733 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 734 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 735 // Check whether we've reached the end. 736 llvm::Value *Done = 737 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 738 Builder.CreateCondBr(Done, DoneBB, BodyBB); 739 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 740 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 741 742 // Done. 743 EmitBlock(DoneBB, /*IsFinished=*/true); 744 } 745 746 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 747 Address SrcAddr, const VarDecl *DestVD, 748 const VarDecl *SrcVD, const Expr *Copy) { 749 if (OriginalType->isArrayType()) { 750 const auto *BO = dyn_cast<BinaryOperator>(Copy); 751 if (BO && BO->getOpcode() == BO_Assign) { 752 // Perform simple memcpy for simple copying. 753 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 754 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 755 EmitAggregateAssign(Dest, Src, OriginalType); 756 } else { 757 // For arrays with complex element types perform element by element 758 // copying. 759 EmitOMPAggregateAssign( 760 DestAddr, SrcAddr, OriginalType, 761 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 762 // Working with the single array element, so have to remap 763 // destination and source variables to corresponding array 764 // elements. 765 CodeGenFunction::OMPPrivateScope Remap(*this); 766 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 767 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 768 (void)Remap.Privatize(); 769 EmitIgnoredExpr(Copy); 770 }); 771 } 772 } else { 773 // Remap pseudo source variable to private copy. 774 CodeGenFunction::OMPPrivateScope Remap(*this); 775 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 776 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 777 (void)Remap.Privatize(); 778 // Emit copying of the whole variable. 779 EmitIgnoredExpr(Copy); 780 } 781 } 782 783 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 784 OMPPrivateScope &PrivateScope) { 785 if (!HaveInsertPoint()) 786 return false; 787 bool DeviceConstTarget = 788 getLangOpts().OpenMPIsDevice && 789 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 790 bool FirstprivateIsLastprivate = false; 791 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 792 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 793 for (const auto *D : C->varlists()) 794 Lastprivates.try_emplace( 795 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 796 C->getKind()); 797 } 798 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 799 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 800 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 801 // Force emission of the firstprivate copy if the directive does not emit 802 // outlined function, like omp for, omp simd, omp distribute etc. 803 bool MustEmitFirstprivateCopy = 804 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 805 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 806 const auto *IRef = C->varlist_begin(); 807 const auto *InitsRef = C->inits().begin(); 808 for (const Expr *IInit : C->private_copies()) { 809 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 810 bool ThisFirstprivateIsLastprivate = 811 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 812 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 813 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 814 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 815 !FD->getType()->isReferenceType() && 816 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 817 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 818 ++IRef; 819 ++InitsRef; 820 continue; 821 } 822 // Do not emit copy for firstprivate constant variables in target regions, 823 // captured by reference. 824 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 825 FD && FD->getType()->isReferenceType() && 826 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 827 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 828 OrigVD); 829 ++IRef; 830 ++InitsRef; 831 continue; 832 } 833 FirstprivateIsLastprivate = 834 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 835 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 836 const auto *VDInit = 837 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 838 bool IsRegistered; 839 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 840 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 841 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 842 LValue OriginalLVal; 843 if (!FD) { 844 // Check if the firstprivate variable is just a constant value. 845 ConstantEmission CE = tryEmitAsConstant(&DRE); 846 if (CE && !CE.isReference()) { 847 // Constant value, no need to create a copy. 848 ++IRef; 849 ++InitsRef; 850 continue; 851 } 852 if (CE && CE.isReference()) { 853 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 854 } else { 855 assert(!CE && "Expected non-constant firstprivate."); 856 OriginalLVal = EmitLValue(&DRE); 857 } 858 } else { 859 OriginalLVal = EmitLValue(&DRE); 860 } 861 QualType Type = VD->getType(); 862 if (Type->isArrayType()) { 863 // Emit VarDecl with copy init for arrays. 864 // Get the address of the original variable captured in current 865 // captured region. 866 IsRegistered = PrivateScope.addPrivate( 867 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 868 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 869 const Expr *Init = VD->getInit(); 870 if (!isa<CXXConstructExpr>(Init) || 871 isTrivialInitializer(Init)) { 872 // Perform simple memcpy. 873 LValue Dest = 874 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 875 EmitAggregateAssign(Dest, OriginalLVal, Type); 876 } else { 877 EmitOMPAggregateAssign( 878 Emission.getAllocatedAddress(), 879 OriginalLVal.getAddress(*this), Type, 880 [this, VDInit, Init](Address DestElement, 881 Address SrcElement) { 882 // Clean up any temporaries needed by the 883 // initialization. 884 RunCleanupsScope InitScope(*this); 885 // Emit initialization for single element. 886 setAddrOfLocalVar(VDInit, SrcElement); 887 EmitAnyExprToMem(Init, DestElement, 888 Init->getType().getQualifiers(), 889 /*IsInitializer*/ false); 890 LocalDeclMap.erase(VDInit); 891 }); 892 } 893 EmitAutoVarCleanups(Emission); 894 return Emission.getAllocatedAddress(); 895 }); 896 } else { 897 Address OriginalAddr = OriginalLVal.getAddress(*this); 898 IsRegistered = 899 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 900 ThisFirstprivateIsLastprivate, 901 OrigVD, &Lastprivates, IRef]() { 902 // Emit private VarDecl with copy init. 903 // Remap temp VDInit variable to the address of the original 904 // variable (for proper handling of captured global variables). 905 setAddrOfLocalVar(VDInit, OriginalAddr); 906 EmitDecl(*VD); 907 LocalDeclMap.erase(VDInit); 908 if (ThisFirstprivateIsLastprivate && 909 Lastprivates[OrigVD->getCanonicalDecl()] == 910 OMPC_LASTPRIVATE_conditional) { 911 // Create/init special variable for lastprivate conditionals. 912 Address VDAddr = 913 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 914 *this, OrigVD); 915 llvm::Value *V = EmitLoadOfScalar( 916 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 917 AlignmentSource::Decl), 918 (*IRef)->getExprLoc()); 919 EmitStoreOfScalar(V, 920 MakeAddrLValue(VDAddr, (*IRef)->getType(), 921 AlignmentSource::Decl)); 922 LocalDeclMap.erase(VD); 923 setAddrOfLocalVar(VD, VDAddr); 924 return VDAddr; 925 } 926 return GetAddrOfLocalVar(VD); 927 }); 928 } 929 assert(IsRegistered && 930 "firstprivate var already registered as private"); 931 // Silence the warning about unused variable. 932 (void)IsRegistered; 933 } 934 ++IRef; 935 ++InitsRef; 936 } 937 } 938 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 939 } 940 941 void CodeGenFunction::EmitOMPPrivateClause( 942 const OMPExecutableDirective &D, 943 CodeGenFunction::OMPPrivateScope &PrivateScope) { 944 if (!HaveInsertPoint()) 945 return; 946 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 947 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 948 auto IRef = C->varlist_begin(); 949 for (const Expr *IInit : C->private_copies()) { 950 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 951 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 952 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 953 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 954 // Emit private VarDecl with copy init. 955 EmitDecl(*VD); 956 return GetAddrOfLocalVar(VD); 957 }); 958 assert(IsRegistered && "private var already registered as private"); 959 // Silence the warning about unused variable. 960 (void)IsRegistered; 961 } 962 ++IRef; 963 } 964 } 965 } 966 967 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 968 if (!HaveInsertPoint()) 969 return false; 970 // threadprivate_var1 = master_threadprivate_var1; 971 // operator=(threadprivate_var2, master_threadprivate_var2); 972 // ... 973 // __kmpc_barrier(&loc, global_tid); 974 llvm::DenseSet<const VarDecl *> CopiedVars; 975 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 976 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 977 auto IRef = C->varlist_begin(); 978 auto ISrcRef = C->source_exprs().begin(); 979 auto IDestRef = C->destination_exprs().begin(); 980 for (const Expr *AssignOp : C->assignment_ops()) { 981 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 982 QualType Type = VD->getType(); 983 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 984 // Get the address of the master variable. If we are emitting code with 985 // TLS support, the address is passed from the master as field in the 986 // captured declaration. 987 Address MasterAddr = Address::invalid(); 988 if (getLangOpts().OpenMPUseTLS && 989 getContext().getTargetInfo().isTLSSupported()) { 990 assert(CapturedStmtInfo->lookup(VD) && 991 "Copyin threadprivates should have been captured!"); 992 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 993 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 994 MasterAddr = EmitLValue(&DRE).getAddress(*this); 995 LocalDeclMap.erase(VD); 996 } else { 997 MasterAddr = 998 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 999 : CGM.GetAddrOfGlobal(VD), 1000 getContext().getDeclAlign(VD)); 1001 } 1002 // Get the address of the threadprivate variable. 1003 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 1004 if (CopiedVars.size() == 1) { 1005 // At first check if current thread is a master thread. If it is, no 1006 // need to copy data. 1007 CopyBegin = createBasicBlock("copyin.not.master"); 1008 CopyEnd = createBasicBlock("copyin.not.master.end"); 1009 Builder.CreateCondBr( 1010 Builder.CreateICmpNE( 1011 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 1012 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 1013 CGM.IntPtrTy)), 1014 CopyBegin, CopyEnd); 1015 EmitBlock(CopyBegin); 1016 } 1017 const auto *SrcVD = 1018 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1019 const auto *DestVD = 1020 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1021 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 1022 } 1023 ++IRef; 1024 ++ISrcRef; 1025 ++IDestRef; 1026 } 1027 } 1028 if (CopyEnd) { 1029 // Exit out of copying procedure for non-master thread. 1030 EmitBlock(CopyEnd, /*IsFinished=*/true); 1031 return true; 1032 } 1033 return false; 1034 } 1035 1036 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1037 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1038 if (!HaveInsertPoint()) 1039 return false; 1040 bool HasAtLeastOneLastprivate = false; 1041 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1042 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1043 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1044 for (const Expr *C : LoopDirective->counters()) { 1045 SIMDLCVs.insert( 1046 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1047 } 1048 } 1049 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1050 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1051 HasAtLeastOneLastprivate = true; 1052 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1053 !getLangOpts().OpenMPSimd) 1054 break; 1055 const auto *IRef = C->varlist_begin(); 1056 const auto *IDestRef = C->destination_exprs().begin(); 1057 for (const Expr *IInit : C->private_copies()) { 1058 // Keep the address of the original variable for future update at the end 1059 // of the loop. 1060 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1061 // Taskloops do not require additional initialization, it is done in 1062 // runtime support library. 1063 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1064 const auto *DestVD = 1065 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1066 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1067 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1068 /*RefersToEnclosingVariableOrCapture=*/ 1069 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1070 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1071 return EmitLValue(&DRE).getAddress(*this); 1072 }); 1073 // Check if the variable is also a firstprivate: in this case IInit is 1074 // not generated. Initialization of this variable will happen in codegen 1075 // for 'firstprivate' clause. 1076 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1077 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1078 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1079 OrigVD]() { 1080 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1081 Address VDAddr = 1082 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1083 OrigVD); 1084 setAddrOfLocalVar(VD, VDAddr); 1085 return VDAddr; 1086 } 1087 // Emit private VarDecl with copy init. 1088 EmitDecl(*VD); 1089 return GetAddrOfLocalVar(VD); 1090 }); 1091 assert(IsRegistered && 1092 "lastprivate var already registered as private"); 1093 (void)IsRegistered; 1094 } 1095 } 1096 ++IRef; 1097 ++IDestRef; 1098 } 1099 } 1100 return HasAtLeastOneLastprivate; 1101 } 1102 1103 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1104 const OMPExecutableDirective &D, bool NoFinals, 1105 llvm::Value *IsLastIterCond) { 1106 if (!HaveInsertPoint()) 1107 return; 1108 // Emit following code: 1109 // if (<IsLastIterCond>) { 1110 // orig_var1 = private_orig_var1; 1111 // ... 1112 // orig_varn = private_orig_varn; 1113 // } 1114 llvm::BasicBlock *ThenBB = nullptr; 1115 llvm::BasicBlock *DoneBB = nullptr; 1116 if (IsLastIterCond) { 1117 // Emit implicit barrier if at least one lastprivate conditional is found 1118 // and this is not a simd mode. 1119 if (!getLangOpts().OpenMPSimd && 1120 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1121 [](const OMPLastprivateClause *C) { 1122 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1123 })) { 1124 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1125 OMPD_unknown, 1126 /*EmitChecks=*/false, 1127 /*ForceSimpleCall=*/true); 1128 } 1129 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1130 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1131 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1132 EmitBlock(ThenBB); 1133 } 1134 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1135 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1136 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1137 auto IC = LoopDirective->counters().begin(); 1138 for (const Expr *F : LoopDirective->finals()) { 1139 const auto *D = 1140 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1141 if (NoFinals) 1142 AlreadyEmittedVars.insert(D); 1143 else 1144 LoopCountersAndUpdates[D] = F; 1145 ++IC; 1146 } 1147 } 1148 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1149 auto IRef = C->varlist_begin(); 1150 auto ISrcRef = C->source_exprs().begin(); 1151 auto IDestRef = C->destination_exprs().begin(); 1152 for (const Expr *AssignOp : C->assignment_ops()) { 1153 const auto *PrivateVD = 1154 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1155 QualType Type = PrivateVD->getType(); 1156 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1157 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1158 // If lastprivate variable is a loop control variable for loop-based 1159 // directive, update its value before copyin back to original 1160 // variable. 1161 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1162 EmitIgnoredExpr(FinalExpr); 1163 const auto *SrcVD = 1164 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1165 const auto *DestVD = 1166 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1167 // Get the address of the private variable. 1168 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1169 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1170 PrivateAddr = 1171 Address(Builder.CreateLoad(PrivateAddr), 1172 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1173 // Store the last value to the private copy in the last iteration. 1174 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1175 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1176 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1177 (*IRef)->getExprLoc()); 1178 // Get the address of the original variable. 1179 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1180 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1181 } 1182 ++IRef; 1183 ++ISrcRef; 1184 ++IDestRef; 1185 } 1186 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1187 EmitIgnoredExpr(PostUpdate); 1188 } 1189 if (IsLastIterCond) 1190 EmitBlock(DoneBB, /*IsFinished=*/true); 1191 } 1192 1193 void CodeGenFunction::EmitOMPReductionClauseInit( 1194 const OMPExecutableDirective &D, 1195 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1196 if (!HaveInsertPoint()) 1197 return; 1198 SmallVector<const Expr *, 4> Shareds; 1199 SmallVector<const Expr *, 4> Privates; 1200 SmallVector<const Expr *, 4> ReductionOps; 1201 SmallVector<const Expr *, 4> LHSs; 1202 SmallVector<const Expr *, 4> RHSs; 1203 OMPTaskDataTy Data; 1204 SmallVector<const Expr *, 4> TaskLHSs; 1205 SmallVector<const Expr *, 4> TaskRHSs; 1206 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1207 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1208 continue; 1209 Shareds.append(C->varlist_begin(), C->varlist_end()); 1210 Privates.append(C->privates().begin(), C->privates().end()); 1211 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1212 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1213 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1214 if (C->getModifier() == OMPC_REDUCTION_task) { 1215 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1216 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1217 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1218 Data.ReductionOps.append(C->reduction_ops().begin(), 1219 C->reduction_ops().end()); 1220 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1221 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1222 } 1223 } 1224 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1225 unsigned Count = 0; 1226 auto *ILHS = LHSs.begin(); 1227 auto *IRHS = RHSs.begin(); 1228 auto *IPriv = Privates.begin(); 1229 for (const Expr *IRef : Shareds) { 1230 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1231 // Emit private VarDecl with reduction init. 1232 RedCG.emitSharedOrigLValue(*this, Count); 1233 RedCG.emitAggregateType(*this, Count); 1234 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1235 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1236 RedCG.getSharedLValue(Count), 1237 [&Emission](CodeGenFunction &CGF) { 1238 CGF.EmitAutoVarInit(Emission); 1239 return true; 1240 }); 1241 EmitAutoVarCleanups(Emission); 1242 Address BaseAddr = RedCG.adjustPrivateAddress( 1243 *this, Count, Emission.getAllocatedAddress()); 1244 bool IsRegistered = PrivateScope.addPrivate( 1245 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1246 assert(IsRegistered && "private var already registered as private"); 1247 // Silence the warning about unused variable. 1248 (void)IsRegistered; 1249 1250 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1251 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1252 QualType Type = PrivateVD->getType(); 1253 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1254 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1258 return RedCG.getSharedLValue(Count).getAddress(*this); 1259 }); 1260 PrivateScope.addPrivate( 1261 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1262 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1263 isa<ArraySubscriptExpr>(IRef)) { 1264 // Store the address of the original variable associated with the LHS 1265 // implicit variable. 1266 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1267 return RedCG.getSharedLValue(Count).getAddress(*this); 1268 }); 1269 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1270 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1271 ConvertTypeForMem(RHSVD->getType()), 1272 "rhs.begin"); 1273 }); 1274 } else { 1275 QualType Type = PrivateVD->getType(); 1276 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1277 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1278 // Store the address of the original variable associated with the LHS 1279 // implicit variable. 1280 if (IsArray) { 1281 OriginalAddr = Builder.CreateElementBitCast( 1282 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1283 } 1284 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1285 PrivateScope.addPrivate( 1286 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1287 return IsArray 1288 ? Builder.CreateElementBitCast( 1289 GetAddrOfLocalVar(PrivateVD), 1290 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1291 : GetAddrOfLocalVar(PrivateVD); 1292 }); 1293 } 1294 ++ILHS; 1295 ++IRHS; 1296 ++IPriv; 1297 ++Count; 1298 } 1299 if (!Data.ReductionVars.empty()) { 1300 Data.IsReductionWithTaskMod = true; 1301 Data.IsWorksharingReduction = 1302 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1303 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1304 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1305 const Expr *TaskRedRef = nullptr; 1306 switch (D.getDirectiveKind()) { 1307 case OMPD_parallel: 1308 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1309 break; 1310 case OMPD_for: 1311 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1312 break; 1313 case OMPD_sections: 1314 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_parallel_for: 1317 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1318 break; 1319 case OMPD_parallel_master: 1320 TaskRedRef = 1321 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1322 break; 1323 case OMPD_parallel_sections: 1324 TaskRedRef = 1325 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1326 break; 1327 case OMPD_target_parallel: 1328 TaskRedRef = 1329 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1330 break; 1331 case OMPD_target_parallel_for: 1332 TaskRedRef = 1333 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1334 break; 1335 case OMPD_distribute_parallel_for: 1336 TaskRedRef = 1337 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1338 break; 1339 case OMPD_teams_distribute_parallel_for: 1340 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1341 .getTaskReductionRefExpr(); 1342 break; 1343 case OMPD_target_teams_distribute_parallel_for: 1344 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1345 .getTaskReductionRefExpr(); 1346 break; 1347 case OMPD_simd: 1348 case OMPD_for_simd: 1349 case OMPD_section: 1350 case OMPD_single: 1351 case OMPD_master: 1352 case OMPD_critical: 1353 case OMPD_parallel_for_simd: 1354 case OMPD_task: 1355 case OMPD_taskyield: 1356 case OMPD_barrier: 1357 case OMPD_taskwait: 1358 case OMPD_taskgroup: 1359 case OMPD_flush: 1360 case OMPD_depobj: 1361 case OMPD_scan: 1362 case OMPD_ordered: 1363 case OMPD_atomic: 1364 case OMPD_teams: 1365 case OMPD_target: 1366 case OMPD_cancellation_point: 1367 case OMPD_cancel: 1368 case OMPD_target_data: 1369 case OMPD_target_enter_data: 1370 case OMPD_target_exit_data: 1371 case OMPD_taskloop: 1372 case OMPD_taskloop_simd: 1373 case OMPD_master_taskloop: 1374 case OMPD_master_taskloop_simd: 1375 case OMPD_parallel_master_taskloop: 1376 case OMPD_parallel_master_taskloop_simd: 1377 case OMPD_distribute: 1378 case OMPD_target_update: 1379 case OMPD_distribute_parallel_for_simd: 1380 case OMPD_distribute_simd: 1381 case OMPD_target_parallel_for_simd: 1382 case OMPD_target_simd: 1383 case OMPD_teams_distribute: 1384 case OMPD_teams_distribute_simd: 1385 case OMPD_teams_distribute_parallel_for_simd: 1386 case OMPD_target_teams: 1387 case OMPD_target_teams_distribute: 1388 case OMPD_target_teams_distribute_parallel_for_simd: 1389 case OMPD_target_teams_distribute_simd: 1390 case OMPD_declare_target: 1391 case OMPD_end_declare_target: 1392 case OMPD_threadprivate: 1393 case OMPD_allocate: 1394 case OMPD_declare_reduction: 1395 case OMPD_declare_mapper: 1396 case OMPD_declare_simd: 1397 case OMPD_requires: 1398 case OMPD_declare_variant: 1399 case OMPD_begin_declare_variant: 1400 case OMPD_end_declare_variant: 1401 case OMPD_unknown: 1402 default: 1403 llvm_unreachable("Enexpected directive with task reductions."); 1404 } 1405 1406 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1407 EmitVarDecl(*VD); 1408 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1409 /*Volatile=*/false, TaskRedRef->getType()); 1410 } 1411 } 1412 1413 void CodeGenFunction::EmitOMPReductionClauseFinal( 1414 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1415 if (!HaveInsertPoint()) 1416 return; 1417 llvm::SmallVector<const Expr *, 8> Privates; 1418 llvm::SmallVector<const Expr *, 8> LHSExprs; 1419 llvm::SmallVector<const Expr *, 8> RHSExprs; 1420 llvm::SmallVector<const Expr *, 8> ReductionOps; 1421 bool HasAtLeastOneReduction = false; 1422 bool IsReductionWithTaskMod = false; 1423 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1424 // Do not emit for inscan reductions. 1425 if (C->getModifier() == OMPC_REDUCTION_inscan) 1426 continue; 1427 HasAtLeastOneReduction = true; 1428 Privates.append(C->privates().begin(), C->privates().end()); 1429 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1430 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1431 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1432 IsReductionWithTaskMod = 1433 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1434 } 1435 if (HasAtLeastOneReduction) { 1436 if (IsReductionWithTaskMod) { 1437 CGM.getOpenMPRuntime().emitTaskReductionFini( 1438 *this, D.getBeginLoc(), 1439 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1440 } 1441 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1442 isOpenMPParallelDirective(D.getDirectiveKind()) || 1443 ReductionKind == OMPD_simd; 1444 bool SimpleReduction = ReductionKind == OMPD_simd; 1445 // Emit nowait reduction if nowait clause is present or directive is a 1446 // parallel directive (it always has implicit barrier). 1447 CGM.getOpenMPRuntime().emitReduction( 1448 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1449 {WithNowait, SimpleReduction, ReductionKind}); 1450 } 1451 } 1452 1453 static void emitPostUpdateForReductionClause( 1454 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1455 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1456 if (!CGF.HaveInsertPoint()) 1457 return; 1458 llvm::BasicBlock *DoneBB = nullptr; 1459 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1460 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1461 if (!DoneBB) { 1462 if (llvm::Value *Cond = CondGen(CGF)) { 1463 // If the first post-update expression is found, emit conditional 1464 // block if it was requested. 1465 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1466 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1467 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1468 CGF.EmitBlock(ThenBB); 1469 } 1470 } 1471 CGF.EmitIgnoredExpr(PostUpdate); 1472 } 1473 } 1474 if (DoneBB) 1475 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1476 } 1477 1478 namespace { 1479 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1480 /// parallel function. This is necessary for combined constructs such as 1481 /// 'distribute parallel for' 1482 typedef llvm::function_ref<void(CodeGenFunction &, 1483 const OMPExecutableDirective &, 1484 llvm::SmallVectorImpl<llvm::Value *> &)> 1485 CodeGenBoundParametersTy; 1486 } // anonymous namespace 1487 1488 static void 1489 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1490 const OMPExecutableDirective &S) { 1491 if (CGF.getLangOpts().OpenMP < 50) 1492 return; 1493 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1494 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1495 for (const Expr *Ref : C->varlists()) { 1496 if (!Ref->getType()->isScalarType()) 1497 continue; 1498 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1499 if (!DRE) 1500 continue; 1501 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1502 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1503 } 1504 } 1505 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1506 for (const Expr *Ref : C->varlists()) { 1507 if (!Ref->getType()->isScalarType()) 1508 continue; 1509 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1510 if (!DRE) 1511 continue; 1512 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1513 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1514 } 1515 } 1516 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1517 for (const Expr *Ref : C->varlists()) { 1518 if (!Ref->getType()->isScalarType()) 1519 continue; 1520 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1521 if (!DRE) 1522 continue; 1523 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1524 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1525 } 1526 } 1527 // Privates should ne analyzed since they are not captured at all. 1528 // Task reductions may be skipped - tasks are ignored. 1529 // Firstprivates do not return value but may be passed by reference - no need 1530 // to check for updated lastprivate conditional. 1531 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1532 for (const Expr *Ref : C->varlists()) { 1533 if (!Ref->getType()->isScalarType()) 1534 continue; 1535 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1536 if (!DRE) 1537 continue; 1538 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1539 } 1540 } 1541 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1542 CGF, S, PrivateDecls); 1543 } 1544 1545 static void emitCommonOMPParallelDirective( 1546 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1547 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1548 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1549 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1550 llvm::Function *OutlinedFn = 1551 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1552 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1553 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1554 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1555 llvm::Value *NumThreads = 1556 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1557 /*IgnoreResultAssign=*/true); 1558 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1559 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1560 } 1561 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1562 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1563 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1564 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1565 } 1566 const Expr *IfCond = nullptr; 1567 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1568 if (C->getNameModifier() == OMPD_unknown || 1569 C->getNameModifier() == OMPD_parallel) { 1570 IfCond = C->getCondition(); 1571 break; 1572 } 1573 } 1574 1575 OMPParallelScope Scope(CGF, S); 1576 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1577 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1578 // lower and upper bounds with the pragma 'for' chunking mechanism. 1579 // The following lambda takes care of appending the lower and upper bound 1580 // parameters when necessary 1581 CodeGenBoundParameters(CGF, S, CapturedVars); 1582 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1583 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1584 CapturedVars, IfCond); 1585 } 1586 1587 static bool isAllocatableDecl(const VarDecl *VD) { 1588 const VarDecl *CVD = VD->getCanonicalDecl(); 1589 if (!CVD->hasAttr<OMPAllocateDeclAttr>()) 1590 return false; 1591 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1592 // Use the default allocation. 1593 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || 1594 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && 1595 !AA->getAllocator()); 1596 } 1597 1598 static void emitEmptyBoundParameters(CodeGenFunction &, 1599 const OMPExecutableDirective &, 1600 llvm::SmallVectorImpl<llvm::Value *> &) {} 1601 1602 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable( 1603 CodeGenFunction &CGF, const VarDecl *VD) { 1604 CodeGenModule &CGM = CGF.CGM; 1605 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1606 1607 if (!VD) 1608 return Address::invalid(); 1609 const VarDecl *CVD = VD->getCanonicalDecl(); 1610 if (!isAllocatableDecl(CVD)) 1611 return Address::invalid(); 1612 llvm::Value *Size; 1613 CharUnits Align = CGM.getContext().getDeclAlign(CVD); 1614 if (CVD->getType()->isVariablyModifiedType()) { 1615 Size = CGF.getTypeSize(CVD->getType()); 1616 // Align the size: ((size + align - 1) / align) * align 1617 Size = CGF.Builder.CreateNUWAdd( 1618 Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); 1619 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); 1620 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); 1621 } else { 1622 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); 1623 Size = CGM.getSize(Sz.alignTo(Align)); 1624 } 1625 1626 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1627 assert(AA->getAllocator() && 1628 "Expected allocator expression for non-default allocator."); 1629 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); 1630 // According to the standard, the original allocator type is a enum (integer). 1631 // Convert to pointer type, if required. 1632 if (Allocator->getType()->isIntegerTy()) 1633 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy); 1634 else if (Allocator->getType()->isPointerTy()) 1635 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator, 1636 CGM.VoidPtrTy); 1637 1638 llvm::Value *Addr = OMPBuilder.createOMPAlloc( 1639 CGF.Builder, Size, Allocator, 1640 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", ".")); 1641 llvm::CallInst *FreeCI = 1642 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator); 1643 1644 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI); 1645 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1646 Addr, 1647 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())), 1648 getNameWithSeparators({CVD->getName(), ".addr"}, ".", ".")); 1649 return Address(Addr, Align); 1650 } 1651 1652 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 1653 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, 1654 SourceLocation Loc) { 1655 CodeGenModule &CGM = CGF.CGM; 1656 if (CGM.getLangOpts().OpenMPUseTLS && 1657 CGM.getContext().getTargetInfo().isTLSSupported()) 1658 return VDAddr; 1659 1660 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1661 1662 llvm::Type *VarTy = VDAddr.getElementType(); 1663 llvm::Value *Data = 1664 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); 1665 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); 1666 std::string Suffix = getNameWithSeparators({"cache", ""}); 1667 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); 1668 1669 llvm::CallInst *ThreadPrivateCacheCall = 1670 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName); 1671 1672 return Address(ThreadPrivateCacheCall, VDAddr.getAlignment()); 1673 } 1674 1675 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators( 1676 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) { 1677 SmallString<128> Buffer; 1678 llvm::raw_svector_ostream OS(Buffer); 1679 StringRef Sep = FirstSeparator; 1680 for (StringRef Part : Parts) { 1681 OS << Sep << Part; 1682 Sep = Separator; 1683 } 1684 return OS.str().str(); 1685 } 1686 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1687 if (CGM.getLangOpts().OpenMPIRBuilder) { 1688 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1689 // Check if we have any if clause associated with the directive. 1690 llvm::Value *IfCond = nullptr; 1691 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1692 IfCond = EmitScalarExpr(C->getCondition(), 1693 /*IgnoreResultAssign=*/true); 1694 1695 llvm::Value *NumThreads = nullptr; 1696 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1697 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1698 /*IgnoreResultAssign=*/true); 1699 1700 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1701 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1702 ProcBind = ProcBindClause->getProcBindKind(); 1703 1704 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1705 1706 // The cleanup callback that finalizes all variabels at the given location, 1707 // thus calls destructors etc. 1708 auto FiniCB = [this](InsertPointTy IP) { 1709 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1710 }; 1711 1712 // Privatization callback that performs appropriate action for 1713 // shared/private/firstprivate/lastprivate/copyin/... variables. 1714 // 1715 // TODO: This defaults to shared right now. 1716 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1717 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 1718 // The next line is appropriate only for variables (Val) with the 1719 // data-sharing attribute "shared". 1720 ReplVal = &Val; 1721 1722 return CodeGenIP; 1723 }; 1724 1725 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1726 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1727 1728 auto BodyGenCB = [ParallelRegionBodyStmt, 1729 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1730 llvm::BasicBlock &ContinuationBB) { 1731 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1732 ContinuationBB); 1733 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1734 CodeGenIP, ContinuationBB); 1735 }; 1736 1737 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1738 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1739 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 1740 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 1741 Builder.restoreIP( 1742 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB, 1743 IfCond, NumThreads, ProcBind, S.hasCancel())); 1744 return; 1745 } 1746 1747 // Emit parallel region as a standalone region. 1748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1749 Action.Enter(CGF); 1750 OMPPrivateScope PrivateScope(CGF); 1751 bool Copyins = CGF.EmitOMPCopyinClause(S); 1752 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1753 if (Copyins) { 1754 // Emit implicit barrier to synchronize threads and avoid data races on 1755 // propagation master's thread values of threadprivate variables to local 1756 // instances of that variables of all other implicit threads. 1757 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1758 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1759 /*ForceSimpleCall=*/true); 1760 } 1761 CGF.EmitOMPPrivateClause(S, PrivateScope); 1762 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1763 (void)PrivateScope.Privatize(); 1764 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1765 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1766 }; 1767 { 1768 auto LPCRegion = 1769 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1770 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1771 emitEmptyBoundParameters); 1772 emitPostUpdateForReductionClause(*this, S, 1773 [](CodeGenFunction &) { return nullptr; }); 1774 } 1775 // Check for outer lastprivate conditional update. 1776 checkForLastprivateConditionalUpdate(*this, S); 1777 } 1778 1779 namespace { 1780 /// RAII to handle scopes for loop transformation directives. 1781 class OMPTransformDirectiveScopeRAII { 1782 OMPLoopScope *Scope = nullptr; 1783 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr; 1784 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr; 1785 1786 public: 1787 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) { 1788 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) { 1789 Scope = new OMPLoopScope(CGF, *Dir); 1790 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); 1791 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); 1792 } 1793 } 1794 ~OMPTransformDirectiveScopeRAII() { 1795 if (!Scope) 1796 return; 1797 delete CapInfoRAII; 1798 delete CGSI; 1799 delete Scope; 1800 } 1801 }; 1802 } // namespace 1803 1804 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1805 int MaxLevel, int Level = 0) { 1806 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1807 const Stmt *SimplifiedS = S->IgnoreContainers(); 1808 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1809 PrettyStackTraceLoc CrashInfo( 1810 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1811 "LLVM IR generation of compound statement ('{}')"); 1812 1813 // Keep track of the current cleanup stack depth, including debug scopes. 1814 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1815 for (const Stmt *CurStmt : CS->body()) 1816 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1817 return; 1818 } 1819 if (SimplifiedS == NextLoop) { 1820 OMPTransformDirectiveScopeRAII PossiblyTransformDirectiveScope(CGF, 1821 SimplifiedS); 1822 if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS)) 1823 SimplifiedS = Dir->getTransformedStmt(); 1824 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS)) 1825 SimplifiedS = CanonLoop->getLoopStmt(); 1826 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1827 S = For->getBody(); 1828 } else { 1829 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1830 "Expected canonical for loop or range-based for loop."); 1831 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1832 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1833 S = CXXFor->getBody(); 1834 } 1835 if (Level + 1 < MaxLevel) { 1836 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1837 S, /*TryImperfectlyNestedLoops=*/true); 1838 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1839 return; 1840 } 1841 } 1842 CGF.EmitStmt(S); 1843 } 1844 1845 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1846 JumpDest LoopExit) { 1847 RunCleanupsScope BodyScope(*this); 1848 // Update counters values on current iteration. 1849 for (const Expr *UE : D.updates()) 1850 EmitIgnoredExpr(UE); 1851 // Update the linear variables. 1852 // In distribute directives only loop counters may be marked as linear, no 1853 // need to generate the code for them. 1854 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1855 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1856 for (const Expr *UE : C->updates()) 1857 EmitIgnoredExpr(UE); 1858 } 1859 } 1860 1861 // On a continue in the body, jump to the end. 1862 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1863 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1864 for (const Expr *E : D.finals_conditions()) { 1865 if (!E) 1866 continue; 1867 // Check that loop counter in non-rectangular nest fits into the iteration 1868 // space. 1869 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1870 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1871 getProfileCount(D.getBody())); 1872 EmitBlock(NextBB); 1873 } 1874 1875 OMPPrivateScope InscanScope(*this); 1876 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1877 bool IsInscanRegion = InscanScope.Privatize(); 1878 if (IsInscanRegion) { 1879 // Need to remember the block before and after scan directive 1880 // to dispatch them correctly depending on the clause used in 1881 // this directive, inclusive or exclusive. For inclusive scan the natural 1882 // order of the blocks is used, for exclusive clause the blocks must be 1883 // executed in reverse order. 1884 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1885 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1886 // No need to allocate inscan exit block, in simd mode it is selected in the 1887 // codegen for the scan directive. 1888 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1889 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1890 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1891 EmitBranch(OMPScanDispatch); 1892 EmitBlock(OMPBeforeScanBlock); 1893 } 1894 1895 // Emit loop variables for C++ range loops. 1896 const Stmt *Body = 1897 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1898 // Emit loop body. 1899 emitBody(*this, Body, 1900 OMPLoopBasedDirective::tryToFindNextInnerLoop( 1901 Body, /*TryImperfectlyNestedLoops=*/true), 1902 D.getLoopsNumber()); 1903 1904 // Jump to the dispatcher at the end of the loop body. 1905 if (IsInscanRegion) 1906 EmitBranch(OMPScanExitBlock); 1907 1908 // The end (updates/cleanups). 1909 EmitBlock(Continue.getBlock()); 1910 BreakContinueStack.pop_back(); 1911 } 1912 1913 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>; 1914 1915 /// Emit a captured statement and return the function as well as its captured 1916 /// closure context. 1917 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, 1918 const CapturedStmt *S) { 1919 LValue CapStruct = ParentCGF.InitCapturedStruct(*S); 1920 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true); 1921 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI = 1922 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S); 1923 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get()); 1924 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S); 1925 1926 return {F, CapStruct.getPointer(ParentCGF)}; 1927 } 1928 1929 /// Emit a call to a previously captured closure. 1930 static llvm::CallInst * 1931 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, 1932 llvm::ArrayRef<llvm::Value *> Args) { 1933 // Append the closure context to the argument. 1934 SmallVector<llvm::Value *> EffectiveArgs; 1935 EffectiveArgs.reserve(Args.size() + 1); 1936 llvm::append_range(EffectiveArgs, Args); 1937 EffectiveArgs.push_back(Cap.second); 1938 1939 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs); 1940 } 1941 1942 llvm::CanonicalLoopInfo * 1943 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) { 1944 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented"); 1945 1946 EmitStmt(S); 1947 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops"); 1948 1949 // The last added loop is the outermost one. 1950 return OMPLoopNestStack.back(); 1951 } 1952 1953 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { 1954 const Stmt *SyntacticalLoop = S->getLoopStmt(); 1955 if (!getLangOpts().OpenMPIRBuilder) { 1956 // Ignore if OpenMPIRBuilder is not enabled. 1957 EmitStmt(SyntacticalLoop); 1958 return; 1959 } 1960 1961 LexicalScope ForScope(*this, S->getSourceRange()); 1962 1963 // Emit init statements. The Distance/LoopVar funcs may reference variable 1964 // declarations they contain. 1965 const Stmt *BodyStmt; 1966 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) { 1967 if (const Stmt *InitStmt = For->getInit()) 1968 EmitStmt(InitStmt); 1969 BodyStmt = For->getBody(); 1970 } else if (const auto *RangeFor = 1971 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) { 1972 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt()) 1973 EmitStmt(RangeStmt); 1974 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt()) 1975 EmitStmt(BeginStmt); 1976 if (const DeclStmt *EndStmt = RangeFor->getEndStmt()) 1977 EmitStmt(EndStmt); 1978 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt()) 1979 EmitStmt(LoopVarStmt); 1980 BodyStmt = RangeFor->getBody(); 1981 } else 1982 llvm_unreachable("Expected for-stmt or range-based for-stmt"); 1983 1984 // Emit closure for later use. By-value captures will be captured here. 1985 const CapturedStmt *DistanceFunc = S->getDistanceFunc(); 1986 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc); 1987 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc(); 1988 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc); 1989 1990 // Call the distance function to get the number of iterations of the loop to 1991 // come. 1992 QualType LogicalTy = DistanceFunc->getCapturedDecl() 1993 ->getParam(0) 1994 ->getType() 1995 .getNonReferenceType(); 1996 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); 1997 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); 1998 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); 1999 2000 // Emit the loop structure. 2001 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 2002 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, 2003 llvm::Value *IndVar) { 2004 Builder.restoreIP(CodeGenIP); 2005 2006 // Emit the loop body: Convert the logical iteration number to the loop 2007 // variable and emit the body. 2008 const DeclRefExpr *LoopVarRef = S->getLoopVarRef(); 2009 LValue LCVal = EmitLValue(LoopVarRef); 2010 Address LoopVarAddress = LCVal.getAddress(*this); 2011 emitCapturedStmtCall(*this, LoopVarClosure, 2012 {LoopVarAddress.getPointer(), IndVar}); 2013 2014 RunCleanupsScope BodyScope(*this); 2015 EmitStmt(BodyStmt); 2016 }; 2017 llvm::CanonicalLoopInfo *CL = 2018 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal); 2019 2020 // Finish up the loop. 2021 Builder.restoreIP(CL->getAfterIP()); 2022 ForScope.ForceCleanup(); 2023 2024 // Remember the CanonicalLoopInfo for parent AST nodes consuming it. 2025 OMPLoopNestStack.push_back(CL); 2026 } 2027 2028 void CodeGenFunction::EmitOMPInnerLoop( 2029 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 2030 const Expr *IncExpr, 2031 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 2032 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 2033 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 2034 2035 // Start the loop with a block that tests the condition. 2036 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 2037 EmitBlock(CondBlock); 2038 const SourceRange R = S.getSourceRange(); 2039 2040 // If attributes are attached, push to the basic block with them. 2041 const auto &OMPED = cast<OMPExecutableDirective>(S); 2042 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 2043 const Stmt *SS = ICS->getCapturedStmt(); 2044 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 2045 OMPLoopNestStack.clear(); 2046 if (AS) 2047 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 2048 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 2049 SourceLocToDebugLoc(R.getEnd())); 2050 else 2051 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2052 SourceLocToDebugLoc(R.getEnd())); 2053 2054 // If there are any cleanups between here and the loop-exit scope, 2055 // create a block to stage a loop exit along. 2056 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2057 if (RequiresCleanup) 2058 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 2059 2060 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 2061 2062 // Emit condition. 2063 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 2064 if (ExitBlock != LoopExit.getBlock()) { 2065 EmitBlock(ExitBlock); 2066 EmitBranchThroughCleanup(LoopExit); 2067 } 2068 2069 EmitBlock(LoopBody); 2070 incrementProfileCounter(&S); 2071 2072 // Create a block for the increment. 2073 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 2074 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2075 2076 BodyGen(*this); 2077 2078 // Emit "IV = IV + 1" and a back-edge to the condition block. 2079 EmitBlock(Continue.getBlock()); 2080 EmitIgnoredExpr(IncExpr); 2081 PostIncGen(*this); 2082 BreakContinueStack.pop_back(); 2083 EmitBranch(CondBlock); 2084 LoopStack.pop(); 2085 // Emit the fall-through block. 2086 EmitBlock(LoopExit.getBlock()); 2087 } 2088 2089 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 2090 if (!HaveInsertPoint()) 2091 return false; 2092 // Emit inits for the linear variables. 2093 bool HasLinears = false; 2094 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2095 for (const Expr *Init : C->inits()) { 2096 HasLinears = true; 2097 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 2098 if (const auto *Ref = 2099 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 2100 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 2101 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 2102 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2103 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2104 VD->getInit()->getType(), VK_LValue, 2105 VD->getInit()->getExprLoc()); 2106 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 2107 VD->getType()), 2108 /*capturedByInit=*/false); 2109 EmitAutoVarCleanups(Emission); 2110 } else { 2111 EmitVarDecl(*VD); 2112 } 2113 } 2114 // Emit the linear steps for the linear clauses. 2115 // If a step is not constant, it is pre-calculated before the loop. 2116 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 2117 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 2118 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 2119 // Emit calculation of the linear step. 2120 EmitIgnoredExpr(CS); 2121 } 2122 } 2123 return HasLinears; 2124 } 2125 2126 void CodeGenFunction::EmitOMPLinearClauseFinal( 2127 const OMPLoopDirective &D, 2128 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2129 if (!HaveInsertPoint()) 2130 return; 2131 llvm::BasicBlock *DoneBB = nullptr; 2132 // Emit the final values of the linear variables. 2133 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2134 auto IC = C->varlist_begin(); 2135 for (const Expr *F : C->finals()) { 2136 if (!DoneBB) { 2137 if (llvm::Value *Cond = CondGen(*this)) { 2138 // If the first post-update expression is found, emit conditional 2139 // block if it was requested. 2140 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 2141 DoneBB = createBasicBlock(".omp.linear.pu.done"); 2142 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2143 EmitBlock(ThenBB); 2144 } 2145 } 2146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 2147 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2148 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2149 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 2150 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 2151 CodeGenFunction::OMPPrivateScope VarScope(*this); 2152 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2153 (void)VarScope.Privatize(); 2154 EmitIgnoredExpr(F); 2155 ++IC; 2156 } 2157 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 2158 EmitIgnoredExpr(PostUpdate); 2159 } 2160 if (DoneBB) 2161 EmitBlock(DoneBB, /*IsFinished=*/true); 2162 } 2163 2164 static void emitAlignedClause(CodeGenFunction &CGF, 2165 const OMPExecutableDirective &D) { 2166 if (!CGF.HaveInsertPoint()) 2167 return; 2168 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 2169 llvm::APInt ClauseAlignment(64, 0); 2170 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 2171 auto *AlignmentCI = 2172 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 2173 ClauseAlignment = AlignmentCI->getValue(); 2174 } 2175 for (const Expr *E : Clause->varlists()) { 2176 llvm::APInt Alignment(ClauseAlignment); 2177 if (Alignment == 0) { 2178 // OpenMP [2.8.1, Description] 2179 // If no optional parameter is specified, implementation-defined default 2180 // alignments for SIMD instructions on the target platforms are assumed. 2181 Alignment = 2182 CGF.getContext() 2183 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2184 E->getType()->getPointeeType())) 2185 .getQuantity(); 2186 } 2187 assert((Alignment == 0 || Alignment.isPowerOf2()) && 2188 "alignment is not power of 2"); 2189 if (Alignment != 0) { 2190 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 2191 CGF.emitAlignmentAssumption( 2192 PtrValue, E, /*No second loc needed*/ SourceLocation(), 2193 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 2194 } 2195 } 2196 } 2197 } 2198 2199 void CodeGenFunction::EmitOMPPrivateLoopCounters( 2200 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 2201 if (!HaveInsertPoint()) 2202 return; 2203 auto I = S.private_counters().begin(); 2204 for (const Expr *E : S.counters()) { 2205 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2206 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 2207 // Emit var without initialization. 2208 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 2209 EmitAutoVarCleanups(VarEmission); 2210 LocalDeclMap.erase(PrivateVD); 2211 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 2212 return VarEmission.getAllocatedAddress(); 2213 }); 2214 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 2215 VD->hasGlobalStorage()) { 2216 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 2217 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 2218 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 2219 E->getType(), VK_LValue, E->getExprLoc()); 2220 return EmitLValue(&DRE).getAddress(*this); 2221 }); 2222 } else { 2223 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 2224 return VarEmission.getAllocatedAddress(); 2225 }); 2226 } 2227 ++I; 2228 } 2229 // Privatize extra loop counters used in loops for ordered(n) clauses. 2230 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 2231 if (!C->getNumForLoops()) 2232 continue; 2233 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size(); 2234 I < E; ++I) { 2235 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 2236 const auto *VD = cast<VarDecl>(DRE->getDecl()); 2237 // Override only those variables that can be captured to avoid re-emission 2238 // of the variables declared within the loops. 2239 if (DRE->refersToEnclosingVariableOrCapture()) { 2240 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 2241 return CreateMemTemp(DRE->getType(), VD->getName()); 2242 }); 2243 } 2244 } 2245 } 2246 } 2247 2248 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 2249 const Expr *Cond, llvm::BasicBlock *TrueBlock, 2250 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 2251 if (!CGF.HaveInsertPoint()) 2252 return; 2253 { 2254 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 2255 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 2256 (void)PreCondScope.Privatize(); 2257 // Get initial values of real counters. 2258 for (const Expr *I : S.inits()) { 2259 CGF.EmitIgnoredExpr(I); 2260 } 2261 } 2262 // Create temp loop control variables with their init values to support 2263 // non-rectangular loops. 2264 CodeGenFunction::OMPMapVars PreCondVars; 2265 for (const Expr * E: S.dependent_counters()) { 2266 if (!E) 2267 continue; 2268 assert(!E->getType().getNonReferenceType()->isRecordType() && 2269 "dependent counter must not be an iterator."); 2270 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2271 Address CounterAddr = 2272 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2273 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2274 } 2275 (void)PreCondVars.apply(CGF); 2276 for (const Expr *E : S.dependent_inits()) { 2277 if (!E) 2278 continue; 2279 CGF.EmitIgnoredExpr(E); 2280 } 2281 // Check that loop is executed at least one time. 2282 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2283 PreCondVars.restore(CGF); 2284 } 2285 2286 void CodeGenFunction::EmitOMPLinearClause( 2287 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2288 if (!HaveInsertPoint()) 2289 return; 2290 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2291 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2292 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2293 for (const Expr *C : LoopDirective->counters()) { 2294 SIMDLCVs.insert( 2295 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2296 } 2297 } 2298 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2299 auto CurPrivate = C->privates().begin(); 2300 for (const Expr *E : C->varlists()) { 2301 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2302 const auto *PrivateVD = 2303 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2304 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2305 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2306 // Emit private VarDecl with copy init. 2307 EmitVarDecl(*PrivateVD); 2308 return GetAddrOfLocalVar(PrivateVD); 2309 }); 2310 assert(IsRegistered && "linear var already registered as private"); 2311 // Silence the warning about unused variable. 2312 (void)IsRegistered; 2313 } else { 2314 EmitVarDecl(*PrivateVD); 2315 } 2316 ++CurPrivate; 2317 } 2318 } 2319 } 2320 2321 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2322 const OMPExecutableDirective &D, 2323 bool IsMonotonic) { 2324 if (!CGF.HaveInsertPoint()) 2325 return; 2326 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2327 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2328 /*ignoreResult=*/true); 2329 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2330 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2331 // In presence of finite 'safelen', it may be unsafe to mark all 2332 // the memory instructions parallel, because loop-carried 2333 // dependences of 'safelen' iterations are possible. 2334 if (!IsMonotonic) 2335 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2336 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2337 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2338 /*ignoreResult=*/true); 2339 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2340 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2341 // In presence of finite 'safelen', it may be unsafe to mark all 2342 // the memory instructions parallel, because loop-carried 2343 // dependences of 'safelen' iterations are possible. 2344 CGF.LoopStack.setParallel(/*Enable=*/false); 2345 } 2346 } 2347 2348 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2349 bool IsMonotonic) { 2350 // Walk clauses and process safelen/lastprivate. 2351 LoopStack.setParallel(!IsMonotonic); 2352 LoopStack.setVectorizeEnable(); 2353 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2354 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2355 if (C->getKind() == OMPC_ORDER_concurrent) 2356 LoopStack.setParallel(/*Enable=*/true); 2357 if ((D.getDirectiveKind() == OMPD_simd || 2358 (getLangOpts().OpenMPSimd && 2359 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2360 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2361 [](const OMPReductionClause *C) { 2362 return C->getModifier() == OMPC_REDUCTION_inscan; 2363 })) 2364 // Disable parallel access in case of prefix sum. 2365 LoopStack.setParallel(/*Enable=*/false); 2366 } 2367 2368 void CodeGenFunction::EmitOMPSimdFinal( 2369 const OMPLoopDirective &D, 2370 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2371 if (!HaveInsertPoint()) 2372 return; 2373 llvm::BasicBlock *DoneBB = nullptr; 2374 auto IC = D.counters().begin(); 2375 auto IPC = D.private_counters().begin(); 2376 for (const Expr *F : D.finals()) { 2377 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2378 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2379 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2380 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2381 OrigVD->hasGlobalStorage() || CED) { 2382 if (!DoneBB) { 2383 if (llvm::Value *Cond = CondGen(*this)) { 2384 // If the first post-update expression is found, emit conditional 2385 // block if it was requested. 2386 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2387 DoneBB = createBasicBlock(".omp.final.done"); 2388 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2389 EmitBlock(ThenBB); 2390 } 2391 } 2392 Address OrigAddr = Address::invalid(); 2393 if (CED) { 2394 OrigAddr = 2395 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2396 } else { 2397 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2398 /*RefersToEnclosingVariableOrCapture=*/false, 2399 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2400 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2401 } 2402 OMPPrivateScope VarScope(*this); 2403 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2404 (void)VarScope.Privatize(); 2405 EmitIgnoredExpr(F); 2406 } 2407 ++IC; 2408 ++IPC; 2409 } 2410 if (DoneBB) 2411 EmitBlock(DoneBB, /*IsFinished=*/true); 2412 } 2413 2414 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2415 const OMPLoopDirective &S, 2416 CodeGenFunction::JumpDest LoopExit) { 2417 CGF.EmitOMPLoopBody(S, LoopExit); 2418 CGF.EmitStopPoint(&S); 2419 } 2420 2421 /// Emit a helper variable and return corresponding lvalue. 2422 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2423 const DeclRefExpr *Helper) { 2424 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2425 CGF.EmitVarDecl(*VDecl); 2426 return CGF.EmitLValue(Helper); 2427 } 2428 2429 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2430 const RegionCodeGenTy &SimdInitGen, 2431 const RegionCodeGenTy &BodyCodeGen) { 2432 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2433 PrePostActionTy &) { 2434 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2435 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2436 SimdInitGen(CGF); 2437 2438 BodyCodeGen(CGF); 2439 }; 2440 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2441 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2442 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2443 2444 BodyCodeGen(CGF); 2445 }; 2446 const Expr *IfCond = nullptr; 2447 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2448 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2449 if (CGF.getLangOpts().OpenMP >= 50 && 2450 (C->getNameModifier() == OMPD_unknown || 2451 C->getNameModifier() == OMPD_simd)) { 2452 IfCond = C->getCondition(); 2453 break; 2454 } 2455 } 2456 } 2457 if (IfCond) { 2458 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2459 } else { 2460 RegionCodeGenTy ThenRCG(ThenGen); 2461 ThenRCG(CGF); 2462 } 2463 } 2464 2465 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2466 PrePostActionTy &Action) { 2467 Action.Enter(CGF); 2468 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2469 "Expected simd directive"); 2470 OMPLoopScope PreInitScope(CGF, S); 2471 // if (PreCond) { 2472 // for (IV in 0..LastIteration) BODY; 2473 // <Final counter/linear vars updates>; 2474 // } 2475 // 2476 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2477 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2478 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2479 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2480 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2481 } 2482 2483 // Emit: if (PreCond) - begin. 2484 // If the condition constant folds and can be elided, avoid emitting the 2485 // whole loop. 2486 bool CondConstant; 2487 llvm::BasicBlock *ContBlock = nullptr; 2488 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2489 if (!CondConstant) 2490 return; 2491 } else { 2492 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2493 ContBlock = CGF.createBasicBlock("simd.if.end"); 2494 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2495 CGF.getProfileCount(&S)); 2496 CGF.EmitBlock(ThenBlock); 2497 CGF.incrementProfileCounter(&S); 2498 } 2499 2500 // Emit the loop iteration variable. 2501 const Expr *IVExpr = S.getIterationVariable(); 2502 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2503 CGF.EmitVarDecl(*IVDecl); 2504 CGF.EmitIgnoredExpr(S.getInit()); 2505 2506 // Emit the iterations count variable. 2507 // If it is not a variable, Sema decided to calculate iterations count on 2508 // each iteration (e.g., it is foldable into a constant). 2509 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2510 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2511 // Emit calculation of the iterations count. 2512 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2513 } 2514 2515 emitAlignedClause(CGF, S); 2516 (void)CGF.EmitOMPLinearClauseInit(S); 2517 { 2518 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2519 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2520 CGF.EmitOMPLinearClause(S, LoopScope); 2521 CGF.EmitOMPPrivateClause(S, LoopScope); 2522 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2523 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2524 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2525 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2526 (void)LoopScope.Privatize(); 2527 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2528 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2529 2530 emitCommonSimdLoop( 2531 CGF, S, 2532 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2533 CGF.EmitOMPSimdInit(S); 2534 }, 2535 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2536 CGF.EmitOMPInnerLoop( 2537 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2538 [&S](CodeGenFunction &CGF) { 2539 emitOMPLoopBodyWithStopPoint(CGF, S, 2540 CodeGenFunction::JumpDest()); 2541 }, 2542 [](CodeGenFunction &) {}); 2543 }); 2544 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2545 // Emit final copy of the lastprivate variables at the end of loops. 2546 if (HasLastprivateClause) 2547 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2548 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2549 emitPostUpdateForReductionClause(CGF, S, 2550 [](CodeGenFunction &) { return nullptr; }); 2551 } 2552 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2553 // Emit: if (PreCond) - end. 2554 if (ContBlock) { 2555 CGF.EmitBranch(ContBlock); 2556 CGF.EmitBlock(ContBlock, true); 2557 } 2558 } 2559 2560 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2561 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2562 OMPFirstScanLoop = true; 2563 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2564 emitOMPSimdRegion(CGF, S, Action); 2565 }; 2566 { 2567 auto LPCRegion = 2568 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2569 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2570 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2571 } 2572 // Check for outer lastprivate conditional update. 2573 checkForLastprivateConditionalUpdate(*this, S); 2574 } 2575 2576 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) { 2577 // Emit the de-sugared statement. 2578 OMPTransformDirectiveScopeRAII TileScope(*this, &S); 2579 EmitStmt(S.getTransformedStmt()); 2580 } 2581 2582 void CodeGenFunction::EmitOMPOuterLoop( 2583 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2584 CodeGenFunction::OMPPrivateScope &LoopScope, 2585 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2586 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2587 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2588 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2589 2590 const Expr *IVExpr = S.getIterationVariable(); 2591 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2592 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2593 2594 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2595 2596 // Start the loop with a block that tests the condition. 2597 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2598 EmitBlock(CondBlock); 2599 const SourceRange R = S.getSourceRange(); 2600 OMPLoopNestStack.clear(); 2601 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2602 SourceLocToDebugLoc(R.getEnd())); 2603 2604 llvm::Value *BoolCondVal = nullptr; 2605 if (!DynamicOrOrdered) { 2606 // UB = min(UB, GlobalUB) or 2607 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2608 // 'distribute parallel for') 2609 EmitIgnoredExpr(LoopArgs.EUB); 2610 // IV = LB 2611 EmitIgnoredExpr(LoopArgs.Init); 2612 // IV < UB 2613 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2614 } else { 2615 BoolCondVal = 2616 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2617 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2618 } 2619 2620 // If there are any cleanups between here and the loop-exit scope, 2621 // create a block to stage a loop exit along. 2622 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2623 if (LoopScope.requiresCleanups()) 2624 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2625 2626 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2627 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2628 if (ExitBlock != LoopExit.getBlock()) { 2629 EmitBlock(ExitBlock); 2630 EmitBranchThroughCleanup(LoopExit); 2631 } 2632 EmitBlock(LoopBody); 2633 2634 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2635 // LB for loop condition and emitted it above). 2636 if (DynamicOrOrdered) 2637 EmitIgnoredExpr(LoopArgs.Init); 2638 2639 // Create a block for the increment. 2640 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2641 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2642 2643 emitCommonSimdLoop( 2644 *this, S, 2645 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2646 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2647 // with dynamic/guided scheduling and without ordered clause. 2648 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2649 CGF.LoopStack.setParallel(!IsMonotonic); 2650 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2651 if (C->getKind() == OMPC_ORDER_concurrent) 2652 CGF.LoopStack.setParallel(/*Enable=*/true); 2653 } else { 2654 CGF.EmitOMPSimdInit(S, IsMonotonic); 2655 } 2656 }, 2657 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2658 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2659 SourceLocation Loc = S.getBeginLoc(); 2660 // when 'distribute' is not combined with a 'for': 2661 // while (idx <= UB) { BODY; ++idx; } 2662 // when 'distribute' is combined with a 'for' 2663 // (e.g. 'distribute parallel for') 2664 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2665 CGF.EmitOMPInnerLoop( 2666 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2667 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2668 CodeGenLoop(CGF, S, LoopExit); 2669 }, 2670 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2671 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2672 }); 2673 }); 2674 2675 EmitBlock(Continue.getBlock()); 2676 BreakContinueStack.pop_back(); 2677 if (!DynamicOrOrdered) { 2678 // Emit "LB = LB + Stride", "UB = UB + Stride". 2679 EmitIgnoredExpr(LoopArgs.NextLB); 2680 EmitIgnoredExpr(LoopArgs.NextUB); 2681 } 2682 2683 EmitBranch(CondBlock); 2684 OMPLoopNestStack.clear(); 2685 LoopStack.pop(); 2686 // Emit the fall-through block. 2687 EmitBlock(LoopExit.getBlock()); 2688 2689 // Tell the runtime we are done. 2690 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2691 if (!DynamicOrOrdered) 2692 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2693 S.getDirectiveKind()); 2694 }; 2695 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2696 } 2697 2698 void CodeGenFunction::EmitOMPForOuterLoop( 2699 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2700 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2701 const OMPLoopArguments &LoopArgs, 2702 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2703 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2704 2705 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2706 const bool DynamicOrOrdered = 2707 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2708 2709 assert((Ordered || 2710 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2711 LoopArgs.Chunk != nullptr)) && 2712 "static non-chunked schedule does not need outer loop"); 2713 2714 // Emit outer loop. 2715 // 2716 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2717 // When schedule(dynamic,chunk_size) is specified, the iterations are 2718 // distributed to threads in the team in chunks as the threads request them. 2719 // Each thread executes a chunk of iterations, then requests another chunk, 2720 // until no chunks remain to be distributed. Each chunk contains chunk_size 2721 // iterations, except for the last chunk to be distributed, which may have 2722 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2723 // 2724 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2725 // to threads in the team in chunks as the executing threads request them. 2726 // Each thread executes a chunk of iterations, then requests another chunk, 2727 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2728 // each chunk is proportional to the number of unassigned iterations divided 2729 // by the number of threads in the team, decreasing to 1. For a chunk_size 2730 // with value k (greater than 1), the size of each chunk is determined in the 2731 // same way, with the restriction that the chunks do not contain fewer than k 2732 // iterations (except for the last chunk to be assigned, which may have fewer 2733 // than k iterations). 2734 // 2735 // When schedule(auto) is specified, the decision regarding scheduling is 2736 // delegated to the compiler and/or runtime system. The programmer gives the 2737 // implementation the freedom to choose any possible mapping of iterations to 2738 // threads in the team. 2739 // 2740 // When schedule(runtime) is specified, the decision regarding scheduling is 2741 // deferred until run time, and the schedule and chunk size are taken from the 2742 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2743 // implementation defined 2744 // 2745 // while(__kmpc_dispatch_next(&LB, &UB)) { 2746 // idx = LB; 2747 // while (idx <= UB) { BODY; ++idx; 2748 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2749 // } // inner loop 2750 // } 2751 // 2752 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2753 // When schedule(static, chunk_size) is specified, iterations are divided into 2754 // chunks of size chunk_size, and the chunks are assigned to the threads in 2755 // the team in a round-robin fashion in the order of the thread number. 2756 // 2757 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2758 // while (idx <= UB) { BODY; ++idx; } // inner loop 2759 // LB = LB + ST; 2760 // UB = UB + ST; 2761 // } 2762 // 2763 2764 const Expr *IVExpr = S.getIterationVariable(); 2765 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2766 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2767 2768 if (DynamicOrOrdered) { 2769 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2770 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2771 llvm::Value *LBVal = DispatchBounds.first; 2772 llvm::Value *UBVal = DispatchBounds.second; 2773 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2774 LoopArgs.Chunk}; 2775 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2776 IVSigned, Ordered, DipatchRTInputValues); 2777 } else { 2778 CGOpenMPRuntime::StaticRTInput StaticInit( 2779 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2780 LoopArgs.ST, LoopArgs.Chunk); 2781 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2782 ScheduleKind, StaticInit); 2783 } 2784 2785 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2786 const unsigned IVSize, 2787 const bool IVSigned) { 2788 if (Ordered) { 2789 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2790 IVSigned); 2791 } 2792 }; 2793 2794 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2795 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2796 OuterLoopArgs.IncExpr = S.getInc(); 2797 OuterLoopArgs.Init = S.getInit(); 2798 OuterLoopArgs.Cond = S.getCond(); 2799 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2800 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2801 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2802 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2803 } 2804 2805 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2806 const unsigned IVSize, const bool IVSigned) {} 2807 2808 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2809 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2810 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2811 const CodeGenLoopTy &CodeGenLoopContent) { 2812 2813 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2814 2815 // Emit outer loop. 2816 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2817 // dynamic 2818 // 2819 2820 const Expr *IVExpr = S.getIterationVariable(); 2821 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2822 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2823 2824 CGOpenMPRuntime::StaticRTInput StaticInit( 2825 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2826 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2827 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2828 2829 // for combined 'distribute' and 'for' the increment expression of distribute 2830 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2831 Expr *IncExpr; 2832 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2833 IncExpr = S.getDistInc(); 2834 else 2835 IncExpr = S.getInc(); 2836 2837 // this routine is shared by 'omp distribute parallel for' and 2838 // 'omp distribute': select the right EUB expression depending on the 2839 // directive 2840 OMPLoopArguments OuterLoopArgs; 2841 OuterLoopArgs.LB = LoopArgs.LB; 2842 OuterLoopArgs.UB = LoopArgs.UB; 2843 OuterLoopArgs.ST = LoopArgs.ST; 2844 OuterLoopArgs.IL = LoopArgs.IL; 2845 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2846 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2847 ? S.getCombinedEnsureUpperBound() 2848 : S.getEnsureUpperBound(); 2849 OuterLoopArgs.IncExpr = IncExpr; 2850 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2851 ? S.getCombinedInit() 2852 : S.getInit(); 2853 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2854 ? S.getCombinedCond() 2855 : S.getCond(); 2856 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2857 ? S.getCombinedNextLowerBound() 2858 : S.getNextLowerBound(); 2859 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2860 ? S.getCombinedNextUpperBound() 2861 : S.getNextUpperBound(); 2862 2863 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2864 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2865 emitEmptyOrdered); 2866 } 2867 2868 static std::pair<LValue, LValue> 2869 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2870 const OMPExecutableDirective &S) { 2871 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2872 LValue LB = 2873 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2874 LValue UB = 2875 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2876 2877 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2878 // parallel for') we need to use the 'distribute' 2879 // chunk lower and upper bounds rather than the whole loop iteration 2880 // space. These are parameters to the outlined function for 'parallel' 2881 // and we copy the bounds of the previous schedule into the 2882 // the current ones. 2883 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2884 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2885 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2886 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2887 PrevLBVal = CGF.EmitScalarConversion( 2888 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2889 LS.getIterationVariable()->getType(), 2890 LS.getPrevLowerBoundVariable()->getExprLoc()); 2891 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2892 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2893 PrevUBVal = CGF.EmitScalarConversion( 2894 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2895 LS.getIterationVariable()->getType(), 2896 LS.getPrevUpperBoundVariable()->getExprLoc()); 2897 2898 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2899 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2900 2901 return {LB, UB}; 2902 } 2903 2904 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2905 /// we need to use the LB and UB expressions generated by the worksharing 2906 /// code generation support, whereas in non combined situations we would 2907 /// just emit 0 and the LastIteration expression 2908 /// This function is necessary due to the difference of the LB and UB 2909 /// types for the RT emission routines for 'for_static_init' and 2910 /// 'for_dispatch_init' 2911 static std::pair<llvm::Value *, llvm::Value *> 2912 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2913 const OMPExecutableDirective &S, 2914 Address LB, Address UB) { 2915 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2916 const Expr *IVExpr = LS.getIterationVariable(); 2917 // when implementing a dynamic schedule for a 'for' combined with a 2918 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2919 // is not normalized as each team only executes its own assigned 2920 // distribute chunk 2921 QualType IteratorTy = IVExpr->getType(); 2922 llvm::Value *LBVal = 2923 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2924 llvm::Value *UBVal = 2925 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2926 return {LBVal, UBVal}; 2927 } 2928 2929 static void emitDistributeParallelForDistributeInnerBoundParams( 2930 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2931 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2932 const auto &Dir = cast<OMPLoopDirective>(S); 2933 LValue LB = 2934 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2935 llvm::Value *LBCast = 2936 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2937 CGF.SizeTy, /*isSigned=*/false); 2938 CapturedVars.push_back(LBCast); 2939 LValue UB = 2940 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2941 2942 llvm::Value *UBCast = 2943 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2944 CGF.SizeTy, /*isSigned=*/false); 2945 CapturedVars.push_back(UBCast); 2946 } 2947 2948 static void 2949 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2950 const OMPLoopDirective &S, 2951 CodeGenFunction::JumpDest LoopExit) { 2952 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2953 PrePostActionTy &Action) { 2954 Action.Enter(CGF); 2955 bool HasCancel = false; 2956 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2957 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2958 HasCancel = D->hasCancel(); 2959 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2960 HasCancel = D->hasCancel(); 2961 else if (const auto *D = 2962 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2963 HasCancel = D->hasCancel(); 2964 } 2965 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2966 HasCancel); 2967 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2968 emitDistributeParallelForInnerBounds, 2969 emitDistributeParallelForDispatchBounds); 2970 }; 2971 2972 emitCommonOMPParallelDirective( 2973 CGF, S, 2974 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2975 CGInlinedWorksharingLoop, 2976 emitDistributeParallelForDistributeInnerBoundParams); 2977 } 2978 2979 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2980 const OMPDistributeParallelForDirective &S) { 2981 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2982 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2983 S.getDistInc()); 2984 }; 2985 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2986 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2987 } 2988 2989 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2990 const OMPDistributeParallelForSimdDirective &S) { 2991 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2992 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2993 S.getDistInc()); 2994 }; 2995 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2996 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2997 } 2998 2999 void CodeGenFunction::EmitOMPDistributeSimdDirective( 3000 const OMPDistributeSimdDirective &S) { 3001 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3002 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3003 }; 3004 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3005 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3006 } 3007 3008 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 3009 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 3010 // Emit SPMD target parallel for region as a standalone region. 3011 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3012 emitOMPSimdRegion(CGF, S, Action); 3013 }; 3014 llvm::Function *Fn; 3015 llvm::Constant *Addr; 3016 // Emit target region as a standalone region. 3017 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3018 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3019 assert(Fn && Addr && "Target device function emission failed."); 3020 } 3021 3022 void CodeGenFunction::EmitOMPTargetSimdDirective( 3023 const OMPTargetSimdDirective &S) { 3024 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3025 emitOMPSimdRegion(CGF, S, Action); 3026 }; 3027 emitCommonOMPTargetDirective(*this, S, CodeGen); 3028 } 3029 3030 namespace { 3031 struct ScheduleKindModifiersTy { 3032 OpenMPScheduleClauseKind Kind; 3033 OpenMPScheduleClauseModifier M1; 3034 OpenMPScheduleClauseModifier M2; 3035 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 3036 OpenMPScheduleClauseModifier M1, 3037 OpenMPScheduleClauseModifier M2) 3038 : Kind(Kind), M1(M1), M2(M2) {} 3039 }; 3040 } // namespace 3041 3042 bool CodeGenFunction::EmitOMPWorksharingLoop( 3043 const OMPLoopDirective &S, Expr *EUB, 3044 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 3045 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 3046 // Emit the loop iteration variable. 3047 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3048 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3049 EmitVarDecl(*IVDecl); 3050 3051 // Emit the iterations count variable. 3052 // If it is not a variable, Sema decided to calculate iterations count on each 3053 // iteration (e.g., it is foldable into a constant). 3054 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3055 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3056 // Emit calculation of the iterations count. 3057 EmitIgnoredExpr(S.getCalcLastIteration()); 3058 } 3059 3060 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3061 3062 bool HasLastprivateClause; 3063 // Check pre-condition. 3064 { 3065 OMPLoopScope PreInitScope(*this, S); 3066 // Skip the entire loop if we don't meet the precondition. 3067 // If the condition constant folds and can be elided, avoid emitting the 3068 // whole loop. 3069 bool CondConstant; 3070 llvm::BasicBlock *ContBlock = nullptr; 3071 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3072 if (!CondConstant) 3073 return false; 3074 } else { 3075 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3076 ContBlock = createBasicBlock("omp.precond.end"); 3077 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3078 getProfileCount(&S)); 3079 EmitBlock(ThenBlock); 3080 incrementProfileCounter(&S); 3081 } 3082 3083 RunCleanupsScope DoacrossCleanupScope(*this); 3084 bool Ordered = false; 3085 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 3086 if (OrderedClause->getNumForLoops()) 3087 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 3088 else 3089 Ordered = true; 3090 } 3091 3092 llvm::DenseSet<const Expr *> EmittedFinals; 3093 emitAlignedClause(*this, S); 3094 bool HasLinears = EmitOMPLinearClauseInit(S); 3095 // Emit helper vars inits. 3096 3097 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 3098 LValue LB = Bounds.first; 3099 LValue UB = Bounds.second; 3100 LValue ST = 3101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3102 LValue IL = 3103 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3104 3105 // Emit 'then' code. 3106 { 3107 OMPPrivateScope LoopScope(*this); 3108 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 3109 // Emit implicit barrier to synchronize threads and avoid data races on 3110 // initialization of firstprivate variables and post-update of 3111 // lastprivate variables. 3112 CGM.getOpenMPRuntime().emitBarrierCall( 3113 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3114 /*ForceSimpleCall=*/true); 3115 } 3116 EmitOMPPrivateClause(S, LoopScope); 3117 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 3118 *this, S, EmitLValue(S.getIterationVariable())); 3119 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3120 EmitOMPReductionClauseInit(S, LoopScope); 3121 EmitOMPPrivateLoopCounters(S, LoopScope); 3122 EmitOMPLinearClause(S, LoopScope); 3123 (void)LoopScope.Privatize(); 3124 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3125 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3126 3127 // Detect the loop schedule kind and chunk. 3128 const Expr *ChunkExpr = nullptr; 3129 OpenMPScheduleTy ScheduleKind; 3130 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 3131 ScheduleKind.Schedule = C->getScheduleKind(); 3132 ScheduleKind.M1 = C->getFirstScheduleModifier(); 3133 ScheduleKind.M2 = C->getSecondScheduleModifier(); 3134 ChunkExpr = C->getChunkSize(); 3135 } else { 3136 // Default behaviour for schedule clause. 3137 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 3138 *this, S, ScheduleKind.Schedule, ChunkExpr); 3139 } 3140 bool HasChunkSizeOne = false; 3141 llvm::Value *Chunk = nullptr; 3142 if (ChunkExpr) { 3143 Chunk = EmitScalarExpr(ChunkExpr); 3144 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 3145 S.getIterationVariable()->getType(), 3146 S.getBeginLoc()); 3147 Expr::EvalResult Result; 3148 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 3149 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 3150 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 3151 } 3152 } 3153 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3154 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3155 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 3156 // If the static schedule kind is specified or if the ordered clause is 3157 // specified, and if no monotonic modifier is specified, the effect will 3158 // be as if the monotonic modifier was specified. 3159 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 3160 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 3161 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3162 bool IsMonotonic = 3163 Ordered || 3164 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 3165 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 3166 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 3167 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 3168 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 3169 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 3170 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 3171 /* Chunked */ Chunk != nullptr) || 3172 StaticChunkedOne) && 3173 !Ordered) { 3174 JumpDest LoopExit = 3175 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3176 emitCommonSimdLoop( 3177 *this, S, 3178 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 3179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3180 CGF.EmitOMPSimdInit(S, IsMonotonic); 3181 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 3182 if (C->getKind() == OMPC_ORDER_concurrent) 3183 CGF.LoopStack.setParallel(/*Enable=*/true); 3184 } 3185 }, 3186 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 3187 &S, ScheduleKind, LoopExit, 3188 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 3189 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 3190 // When no chunk_size is specified, the iteration space is divided 3191 // into chunks that are approximately equal in size, and at most 3192 // one chunk is distributed to each thread. Note that the size of 3193 // the chunks is unspecified in this case. 3194 CGOpenMPRuntime::StaticRTInput StaticInit( 3195 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 3196 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 3197 StaticChunkedOne ? Chunk : nullptr); 3198 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3199 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 3200 StaticInit); 3201 // UB = min(UB, GlobalUB); 3202 if (!StaticChunkedOne) 3203 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 3204 // IV = LB; 3205 CGF.EmitIgnoredExpr(S.getInit()); 3206 // For unchunked static schedule generate: 3207 // 3208 // while (idx <= UB) { 3209 // BODY; 3210 // ++idx; 3211 // } 3212 // 3213 // For static schedule with chunk one: 3214 // 3215 // while (IV <= PrevUB) { 3216 // BODY; 3217 // IV += ST; 3218 // } 3219 CGF.EmitOMPInnerLoop( 3220 S, LoopScope.requiresCleanups(), 3221 StaticChunkedOne ? S.getCombinedParForInDistCond() 3222 : S.getCond(), 3223 StaticChunkedOne ? S.getDistInc() : S.getInc(), 3224 [&S, LoopExit](CodeGenFunction &CGF) { 3225 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 3226 }, 3227 [](CodeGenFunction &) {}); 3228 }); 3229 EmitBlock(LoopExit.getBlock()); 3230 // Tell the runtime we are done. 3231 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3232 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3233 S.getDirectiveKind()); 3234 }; 3235 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 3236 } else { 3237 // Emit the outer loop, which requests its work chunk [LB..UB] from 3238 // runtime and runs the inner loop to process it. 3239 const OMPLoopArguments LoopArguments( 3240 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3241 IL.getAddress(*this), Chunk, EUB); 3242 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 3243 LoopArguments, CGDispatchBounds); 3244 } 3245 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3246 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3247 return CGF.Builder.CreateIsNotNull( 3248 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3249 }); 3250 } 3251 EmitOMPReductionClauseFinal( 3252 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 3253 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 3254 : /*Parallel only*/ OMPD_parallel); 3255 // Emit post-update of the reduction variables if IsLastIter != 0. 3256 emitPostUpdateForReductionClause( 3257 *this, S, [IL, &S](CodeGenFunction &CGF) { 3258 return CGF.Builder.CreateIsNotNull( 3259 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3260 }); 3261 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3262 if (HasLastprivateClause) 3263 EmitOMPLastprivateClauseFinal( 3264 S, isOpenMPSimdDirective(S.getDirectiveKind()), 3265 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3266 } 3267 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 3268 return CGF.Builder.CreateIsNotNull( 3269 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3270 }); 3271 DoacrossCleanupScope.ForceCleanup(); 3272 // We're now done with the loop, so jump to the continuation block. 3273 if (ContBlock) { 3274 EmitBranch(ContBlock); 3275 EmitBlock(ContBlock, /*IsFinished=*/true); 3276 } 3277 } 3278 return HasLastprivateClause; 3279 } 3280 3281 /// The following two functions generate expressions for the loop lower 3282 /// and upper bounds in case of static and dynamic (dispatch) schedule 3283 /// of the associated 'for' or 'distribute' loop. 3284 static std::pair<LValue, LValue> 3285 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3286 const auto &LS = cast<OMPLoopDirective>(S); 3287 LValue LB = 3288 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3289 LValue UB = 3290 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3291 return {LB, UB}; 3292 } 3293 3294 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3295 /// consider the lower and upper bound expressions generated by the 3296 /// worksharing loop support, but we use 0 and the iteration space size as 3297 /// constants 3298 static std::pair<llvm::Value *, llvm::Value *> 3299 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3300 Address LB, Address UB) { 3301 const auto &LS = cast<OMPLoopDirective>(S); 3302 const Expr *IVExpr = LS.getIterationVariable(); 3303 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3304 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3305 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3306 return {LBVal, UBVal}; 3307 } 3308 3309 /// Emits internal temp array declarations for the directive with inscan 3310 /// reductions. 3311 /// The code is the following: 3312 /// \code 3313 /// size num_iters = <num_iters>; 3314 /// <type> buffer[num_iters]; 3315 /// \endcode 3316 static void emitScanBasedDirectiveDecls( 3317 CodeGenFunction &CGF, const OMPLoopDirective &S, 3318 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) { 3319 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3320 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3321 SmallVector<const Expr *, 4> Shareds; 3322 SmallVector<const Expr *, 4> Privates; 3323 SmallVector<const Expr *, 4> ReductionOps; 3324 SmallVector<const Expr *, 4> CopyArrayTemps; 3325 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3326 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3327 "Only inscan reductions are expected."); 3328 Shareds.append(C->varlist_begin(), C->varlist_end()); 3329 Privates.append(C->privates().begin(), C->privates().end()); 3330 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3331 CopyArrayTemps.append(C->copy_array_temps().begin(), 3332 C->copy_array_temps().end()); 3333 } 3334 { 3335 // Emit buffers for each reduction variables. 3336 // ReductionCodeGen is required to emit correctly the code for array 3337 // reductions. 3338 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3339 unsigned Count = 0; 3340 auto *ITA = CopyArrayTemps.begin(); 3341 for (const Expr *IRef : Privates) { 3342 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3343 // Emit variably modified arrays, used for arrays/array sections 3344 // reductions. 3345 if (PrivateVD->getType()->isVariablyModifiedType()) { 3346 RedCG.emitSharedOrigLValue(CGF, Count); 3347 RedCG.emitAggregateType(CGF, Count); 3348 } 3349 CodeGenFunction::OpaqueValueMapping DimMapping( 3350 CGF, 3351 cast<OpaqueValueExpr>( 3352 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3353 ->getSizeExpr()), 3354 RValue::get(OMPScanNumIterations)); 3355 // Emit temp buffer. 3356 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3357 ++ITA; 3358 ++Count; 3359 } 3360 } 3361 } 3362 3363 /// Emits the code for the directive with inscan reductions. 3364 /// The code is the following: 3365 /// \code 3366 /// #pragma omp ... 3367 /// for (i: 0..<num_iters>) { 3368 /// <input phase>; 3369 /// buffer[i] = red; 3370 /// } 3371 /// #pragma omp master // in parallel region 3372 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3373 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3374 /// buffer[i] op= buffer[i-pow(2,k)]; 3375 /// #pragma omp barrier // in parallel region 3376 /// #pragma omp ... 3377 /// for (0..<num_iters>) { 3378 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3379 /// <scan phase>; 3380 /// } 3381 /// \endcode 3382 static void emitScanBasedDirective( 3383 CodeGenFunction &CGF, const OMPLoopDirective &S, 3384 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3385 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3386 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3387 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3388 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3389 SmallVector<const Expr *, 4> Privates; 3390 SmallVector<const Expr *, 4> ReductionOps; 3391 SmallVector<const Expr *, 4> LHSs; 3392 SmallVector<const Expr *, 4> RHSs; 3393 SmallVector<const Expr *, 4> CopyArrayElems; 3394 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3395 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3396 "Only inscan reductions are expected."); 3397 Privates.append(C->privates().begin(), C->privates().end()); 3398 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3399 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3400 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3401 CopyArrayElems.append(C->copy_array_elems().begin(), 3402 C->copy_array_elems().end()); 3403 } 3404 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3405 { 3406 // Emit loop with input phase: 3407 // #pragma omp ... 3408 // for (i: 0..<num_iters>) { 3409 // <input phase>; 3410 // buffer[i] = red; 3411 // } 3412 CGF.OMPFirstScanLoop = true; 3413 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3414 FirstGen(CGF); 3415 } 3416 // #pragma omp barrier // in parallel region 3417 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems, 3418 &ReductionOps, 3419 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) { 3420 Action.Enter(CGF); 3421 // Emit prefix reduction: 3422 // #pragma omp master // in parallel region 3423 // for (int k = 0; k <= ceil(log2(n)); ++k) 3424 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3425 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3426 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3427 llvm::Function *F = 3428 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3429 llvm::Value *Arg = 3430 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3431 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3432 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3433 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3434 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3435 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3436 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3437 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3438 CGF.EmitBlock(LoopBB); 3439 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3440 // size pow2k = 1; 3441 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3442 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3443 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3444 // for (size i = n - 1; i >= 2 ^ k; --i) 3445 // tmp[i] op= tmp[i-pow2k]; 3446 llvm::BasicBlock *InnerLoopBB = 3447 CGF.createBasicBlock("omp.inner.log.scan.body"); 3448 llvm::BasicBlock *InnerExitBB = 3449 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3450 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3451 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3452 CGF.EmitBlock(InnerLoopBB); 3453 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3454 IVal->addIncoming(NMin1, LoopBB); 3455 { 3456 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3457 auto *ILHS = LHSs.begin(); 3458 auto *IRHS = RHSs.begin(); 3459 for (const Expr *CopyArrayElem : CopyArrayElems) { 3460 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3461 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3462 Address LHSAddr = Address::invalid(); 3463 { 3464 CodeGenFunction::OpaqueValueMapping IdxMapping( 3465 CGF, 3466 cast<OpaqueValueExpr>( 3467 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3468 RValue::get(IVal)); 3469 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3470 } 3471 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3472 Address RHSAddr = Address::invalid(); 3473 { 3474 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3475 CodeGenFunction::OpaqueValueMapping IdxMapping( 3476 CGF, 3477 cast<OpaqueValueExpr>( 3478 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3479 RValue::get(OffsetIVal)); 3480 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3481 } 3482 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3483 ++ILHS; 3484 ++IRHS; 3485 } 3486 PrivScope.Privatize(); 3487 CGF.CGM.getOpenMPRuntime().emitReduction( 3488 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3489 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3490 } 3491 llvm::Value *NextIVal = 3492 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3493 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3494 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3495 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3496 CGF.EmitBlock(InnerExitBB); 3497 llvm::Value *Next = 3498 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3499 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3500 // pow2k <<= 1; 3501 llvm::Value *NextPow2K = 3502 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3503 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3504 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3505 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3506 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3507 CGF.EmitBlock(ExitBB); 3508 }; 3509 if (isOpenMPParallelDirective(S.getDirectiveKind())) { 3510 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3511 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3512 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3513 /*ForceSimpleCall=*/true); 3514 } else { 3515 RegionCodeGenTy RCG(CodeGen); 3516 RCG(CGF); 3517 } 3518 3519 CGF.OMPFirstScanLoop = false; 3520 SecondGen(CGF); 3521 } 3522 3523 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3524 const OMPLoopDirective &S, 3525 bool HasCancel) { 3526 bool HasLastprivates; 3527 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3528 [](const OMPReductionClause *C) { 3529 return C->getModifier() == OMPC_REDUCTION_inscan; 3530 })) { 3531 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3532 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3533 OMPLoopScope LoopScope(CGF, S); 3534 return CGF.EmitScalarExpr(S.getNumIterations()); 3535 }; 3536 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3537 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3538 CGF, S.getDirectiveKind(), HasCancel); 3539 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3540 emitForLoopBounds, 3541 emitDispatchForLoopBounds); 3542 // Emit an implicit barrier at the end. 3543 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3544 OMPD_for); 3545 }; 3546 const auto &&SecondGen = [&S, HasCancel, 3547 &HasLastprivates](CodeGenFunction &CGF) { 3548 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3549 CGF, S.getDirectiveKind(), HasCancel); 3550 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3551 emitForLoopBounds, 3552 emitDispatchForLoopBounds); 3553 }; 3554 if (!isOpenMPParallelDirective(S.getDirectiveKind())) 3555 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen); 3556 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3557 } else { 3558 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3559 HasCancel); 3560 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3561 emitForLoopBounds, 3562 emitDispatchForLoopBounds); 3563 } 3564 return HasLastprivates; 3565 } 3566 3567 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) { 3568 if (S.hasCancel()) 3569 return false; 3570 for (OMPClause *C : S.clauses()) 3571 if (!isa<OMPNowaitClause>(C)) 3572 return false; 3573 3574 return true; 3575 } 3576 3577 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3578 bool HasLastprivates = false; 3579 bool UseOMPIRBuilder = 3580 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 3581 auto &&CodeGen = [this, &S, &HasLastprivates, 3582 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) { 3583 // Use the OpenMPIRBuilder if enabled. 3584 if (UseOMPIRBuilder) { 3585 // Emit the associated statement and get its loop representation. 3586 const Stmt *Inner = S.getRawStmt(); 3587 llvm::CanonicalLoopInfo *CLI = 3588 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 3589 3590 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>(); 3591 llvm::OpenMPIRBuilder &OMPBuilder = 3592 CGM.getOpenMPRuntime().getOMPBuilder(); 3593 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3594 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3595 OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier); 3596 return; 3597 } 3598 3599 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3600 }; 3601 { 3602 auto LPCRegion = 3603 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3604 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3605 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3606 S.hasCancel()); 3607 } 3608 3609 if (!UseOMPIRBuilder) { 3610 // Emit an implicit barrier at the end. 3611 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3612 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3613 } 3614 // Check for outer lastprivate conditional update. 3615 checkForLastprivateConditionalUpdate(*this, S); 3616 } 3617 3618 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3619 bool HasLastprivates = false; 3620 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3621 PrePostActionTy &) { 3622 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3623 }; 3624 { 3625 auto LPCRegion = 3626 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3627 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3628 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3629 } 3630 3631 // Emit an implicit barrier at the end. 3632 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3633 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3634 // Check for outer lastprivate conditional update. 3635 checkForLastprivateConditionalUpdate(*this, S); 3636 } 3637 3638 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3639 const Twine &Name, 3640 llvm::Value *Init = nullptr) { 3641 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3642 if (Init) 3643 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3644 return LVal; 3645 } 3646 3647 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3648 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3649 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3650 bool HasLastprivates = false; 3651 auto &&CodeGen = [&S, CapturedStmt, CS, 3652 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3653 const ASTContext &C = CGF.getContext(); 3654 QualType KmpInt32Ty = 3655 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3656 // Emit helper vars inits. 3657 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3658 CGF.Builder.getInt32(0)); 3659 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3660 ? CGF.Builder.getInt32(CS->size() - 1) 3661 : CGF.Builder.getInt32(0); 3662 LValue UB = 3663 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3664 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3665 CGF.Builder.getInt32(1)); 3666 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3667 CGF.Builder.getInt32(0)); 3668 // Loop counter. 3669 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3670 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3671 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3672 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3673 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3674 // Generate condition for loop. 3675 BinaryOperator *Cond = BinaryOperator::Create( 3676 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3677 S.getBeginLoc(), FPOptionsOverride()); 3678 // Increment for loop counter. 3679 UnaryOperator *Inc = UnaryOperator::Create( 3680 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3681 S.getBeginLoc(), true, FPOptionsOverride()); 3682 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3683 // Iterate through all sections and emit a switch construct: 3684 // switch (IV) { 3685 // case 0: 3686 // <SectionStmt[0]>; 3687 // break; 3688 // ... 3689 // case <NumSection> - 1: 3690 // <SectionStmt[<NumSection> - 1]>; 3691 // break; 3692 // } 3693 // .omp.sections.exit: 3694 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3695 llvm::SwitchInst *SwitchStmt = 3696 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3697 ExitBB, CS == nullptr ? 1 : CS->size()); 3698 if (CS) { 3699 unsigned CaseNumber = 0; 3700 for (const Stmt *SubStmt : CS->children()) { 3701 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3702 CGF.EmitBlock(CaseBB); 3703 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3704 CGF.EmitStmt(SubStmt); 3705 CGF.EmitBranch(ExitBB); 3706 ++CaseNumber; 3707 } 3708 } else { 3709 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3710 CGF.EmitBlock(CaseBB); 3711 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3712 CGF.EmitStmt(CapturedStmt); 3713 CGF.EmitBranch(ExitBB); 3714 } 3715 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3716 }; 3717 3718 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3719 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3720 // Emit implicit barrier to synchronize threads and avoid data races on 3721 // initialization of firstprivate variables and post-update of lastprivate 3722 // variables. 3723 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3724 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3725 /*ForceSimpleCall=*/true); 3726 } 3727 CGF.EmitOMPPrivateClause(S, LoopScope); 3728 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3729 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3730 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3731 (void)LoopScope.Privatize(); 3732 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3733 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3734 3735 // Emit static non-chunked loop. 3736 OpenMPScheduleTy ScheduleKind; 3737 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3738 CGOpenMPRuntime::StaticRTInput StaticInit( 3739 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3740 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3741 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3742 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3743 // UB = min(UB, GlobalUB); 3744 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3745 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3746 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3747 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3748 // IV = LB; 3749 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3750 // while (idx <= UB) { BODY; ++idx; } 3751 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3752 [](CodeGenFunction &) {}); 3753 // Tell the runtime we are done. 3754 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3755 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3756 S.getDirectiveKind()); 3757 }; 3758 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3759 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3760 // Emit post-update of the reduction variables if IsLastIter != 0. 3761 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3762 return CGF.Builder.CreateIsNotNull( 3763 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3764 }); 3765 3766 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3767 if (HasLastprivates) 3768 CGF.EmitOMPLastprivateClauseFinal( 3769 S, /*NoFinals=*/false, 3770 CGF.Builder.CreateIsNotNull( 3771 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3772 }; 3773 3774 bool HasCancel = false; 3775 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3776 HasCancel = OSD->hasCancel(); 3777 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3778 HasCancel = OPSD->hasCancel(); 3779 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3780 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3781 HasCancel); 3782 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3783 // clause. Otherwise the barrier will be generated by the codegen for the 3784 // directive. 3785 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3786 // Emit implicit barrier to synchronize threads and avoid data races on 3787 // initialization of firstprivate variables. 3788 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3789 OMPD_unknown); 3790 } 3791 } 3792 3793 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3794 { 3795 auto LPCRegion = 3796 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3797 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3798 EmitSections(S); 3799 } 3800 // Emit an implicit barrier at the end. 3801 if (!S.getSingleClause<OMPNowaitClause>()) { 3802 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3803 OMPD_sections); 3804 } 3805 // Check for outer lastprivate conditional update. 3806 checkForLastprivateConditionalUpdate(*this, S); 3807 } 3808 3809 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3810 LexicalScope Scope(*this, S.getSourceRange()); 3811 EmitStopPoint(&S); 3812 EmitStmt(S.getAssociatedStmt()); 3813 } 3814 3815 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3816 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3817 llvm::SmallVector<const Expr *, 8> DestExprs; 3818 llvm::SmallVector<const Expr *, 8> SrcExprs; 3819 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3820 // Check if there are any 'copyprivate' clauses associated with this 3821 // 'single' construct. 3822 // Build a list of copyprivate variables along with helper expressions 3823 // (<source>, <destination>, <destination>=<source> expressions) 3824 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3825 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3826 DestExprs.append(C->destination_exprs().begin(), 3827 C->destination_exprs().end()); 3828 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3829 AssignmentOps.append(C->assignment_ops().begin(), 3830 C->assignment_ops().end()); 3831 } 3832 // Emit code for 'single' region along with 'copyprivate' clauses 3833 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3834 Action.Enter(CGF); 3835 OMPPrivateScope SingleScope(CGF); 3836 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3837 CGF.EmitOMPPrivateClause(S, SingleScope); 3838 (void)SingleScope.Privatize(); 3839 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3840 }; 3841 { 3842 auto LPCRegion = 3843 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3844 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3845 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3846 CopyprivateVars, DestExprs, 3847 SrcExprs, AssignmentOps); 3848 } 3849 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3850 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3851 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3852 CGM.getOpenMPRuntime().emitBarrierCall( 3853 *this, S.getBeginLoc(), 3854 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3855 } 3856 // Check for outer lastprivate conditional update. 3857 checkForLastprivateConditionalUpdate(*this, S); 3858 } 3859 3860 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3861 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3862 Action.Enter(CGF); 3863 CGF.EmitStmt(S.getRawStmt()); 3864 }; 3865 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3866 } 3867 3868 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3869 if (CGM.getLangOpts().OpenMPIRBuilder) { 3870 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3871 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3872 3873 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt(); 3874 3875 auto FiniCB = [this](InsertPointTy IP) { 3876 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3877 }; 3878 3879 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3880 InsertPointTy CodeGenIP, 3881 llvm::BasicBlock &FiniBB) { 3882 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3883 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3884 CodeGenIP, FiniBB); 3885 }; 3886 3887 LexicalScope Scope(*this, S.getSourceRange()); 3888 EmitStopPoint(&S); 3889 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB)); 3890 3891 return; 3892 } 3893 LexicalScope Scope(*this, S.getSourceRange()); 3894 EmitStopPoint(&S); 3895 emitMaster(*this, S); 3896 } 3897 3898 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3899 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3900 Action.Enter(CGF); 3901 CGF.EmitStmt(S.getRawStmt()); 3902 }; 3903 Expr *Filter = nullptr; 3904 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3905 Filter = FilterClause->getThreadID(); 3906 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(), 3907 Filter); 3908 } 3909 3910 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) { 3911 if (CGM.getLangOpts().OpenMPIRBuilder) { 3912 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3913 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3914 3915 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt(); 3916 const Expr *Filter = nullptr; 3917 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3918 Filter = FilterClause->getThreadID(); 3919 llvm::Value *FilterVal = Filter 3920 ? EmitScalarExpr(Filter, CGM.Int32Ty) 3921 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); 3922 3923 auto FiniCB = [this](InsertPointTy IP) { 3924 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3925 }; 3926 3927 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP, 3928 InsertPointTy CodeGenIP, 3929 llvm::BasicBlock &FiniBB) { 3930 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3931 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt, 3932 CodeGenIP, FiniBB); 3933 }; 3934 3935 LexicalScope Scope(*this, S.getSourceRange()); 3936 EmitStopPoint(&S); 3937 Builder.restoreIP( 3938 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal)); 3939 3940 return; 3941 } 3942 LexicalScope Scope(*this, S.getSourceRange()); 3943 EmitStopPoint(&S); 3944 emitMasked(*this, S); 3945 } 3946 3947 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3948 if (CGM.getLangOpts().OpenMPIRBuilder) { 3949 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3950 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3951 3952 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt(); 3953 const Expr *Hint = nullptr; 3954 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3955 Hint = HintClause->getHint(); 3956 3957 // TODO: This is slightly different from what's currently being done in 3958 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3959 // about typing is final. 3960 llvm::Value *HintInst = nullptr; 3961 if (Hint) 3962 HintInst = 3963 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3964 3965 auto FiniCB = [this](InsertPointTy IP) { 3966 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3967 }; 3968 3969 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3970 InsertPointTy CodeGenIP, 3971 llvm::BasicBlock &FiniBB) { 3972 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3973 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3974 CodeGenIP, FiniBB); 3975 }; 3976 3977 LexicalScope Scope(*this, S.getSourceRange()); 3978 EmitStopPoint(&S); 3979 Builder.restoreIP(OMPBuilder.createCritical( 3980 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3981 HintInst)); 3982 3983 return; 3984 } 3985 3986 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3987 Action.Enter(CGF); 3988 CGF.EmitStmt(S.getAssociatedStmt()); 3989 }; 3990 const Expr *Hint = nullptr; 3991 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3992 Hint = HintClause->getHint(); 3993 LexicalScope Scope(*this, S.getSourceRange()); 3994 EmitStopPoint(&S); 3995 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3996 S.getDirectiveName().getAsString(), 3997 CodeGen, S.getBeginLoc(), Hint); 3998 } 3999 4000 void CodeGenFunction::EmitOMPParallelForDirective( 4001 const OMPParallelForDirective &S) { 4002 // Emit directive as a combined directive that consists of two implicit 4003 // directives: 'parallel' with 'for' directive. 4004 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4005 Action.Enter(CGF); 4006 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 4007 }; 4008 { 4009 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4010 [](const OMPReductionClause *C) { 4011 return C->getModifier() == OMPC_REDUCTION_inscan; 4012 })) { 4013 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4014 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4015 CGCapturedStmtInfo CGSI(CR_OpenMP); 4016 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4017 OMPLoopScope LoopScope(CGF, S); 4018 return CGF.EmitScalarExpr(S.getNumIterations()); 4019 }; 4020 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4021 } 4022 auto LPCRegion = 4023 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4024 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 4025 emitEmptyBoundParameters); 4026 } 4027 // Check for outer lastprivate conditional update. 4028 checkForLastprivateConditionalUpdate(*this, S); 4029 } 4030 4031 void CodeGenFunction::EmitOMPParallelForSimdDirective( 4032 const OMPParallelForSimdDirective &S) { 4033 // Emit directive as a combined directive that consists of two implicit 4034 // directives: 'parallel' with 'for' directive. 4035 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4036 Action.Enter(CGF); 4037 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 4038 }; 4039 { 4040 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4041 [](const OMPReductionClause *C) { 4042 return C->getModifier() == OMPC_REDUCTION_inscan; 4043 })) { 4044 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4045 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4046 CGCapturedStmtInfo CGSI(CR_OpenMP); 4047 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4048 OMPLoopScope LoopScope(CGF, S); 4049 return CGF.EmitScalarExpr(S.getNumIterations()); 4050 }; 4051 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4052 } 4053 auto LPCRegion = 4054 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4055 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 4056 emitEmptyBoundParameters); 4057 } 4058 // Check for outer lastprivate conditional update. 4059 checkForLastprivateConditionalUpdate(*this, S); 4060 } 4061 4062 void CodeGenFunction::EmitOMPParallelMasterDirective( 4063 const OMPParallelMasterDirective &S) { 4064 // Emit directive as a combined directive that consists of two implicit 4065 // directives: 'parallel' with 'master' directive. 4066 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4067 Action.Enter(CGF); 4068 OMPPrivateScope PrivateScope(CGF); 4069 bool Copyins = CGF.EmitOMPCopyinClause(S); 4070 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4071 if (Copyins) { 4072 // Emit implicit barrier to synchronize threads and avoid data races on 4073 // propagation master's thread values of threadprivate variables to local 4074 // instances of that variables of all other implicit threads. 4075 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 4076 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4077 /*ForceSimpleCall=*/true); 4078 } 4079 CGF.EmitOMPPrivateClause(S, PrivateScope); 4080 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4081 (void)PrivateScope.Privatize(); 4082 emitMaster(CGF, S); 4083 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4084 }; 4085 { 4086 auto LPCRegion = 4087 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4088 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 4089 emitEmptyBoundParameters); 4090 emitPostUpdateForReductionClause(*this, S, 4091 [](CodeGenFunction &) { return nullptr; }); 4092 } 4093 // Check for outer lastprivate conditional update. 4094 checkForLastprivateConditionalUpdate(*this, S); 4095 } 4096 4097 void CodeGenFunction::EmitOMPParallelSectionsDirective( 4098 const OMPParallelSectionsDirective &S) { 4099 // Emit directive as a combined directive that consists of two implicit 4100 // directives: 'parallel' with 'sections' directive. 4101 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4102 Action.Enter(CGF); 4103 CGF.EmitSections(S); 4104 }; 4105 { 4106 auto LPCRegion = 4107 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4108 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 4109 emitEmptyBoundParameters); 4110 } 4111 // Check for outer lastprivate conditional update. 4112 checkForLastprivateConditionalUpdate(*this, S); 4113 } 4114 4115 namespace { 4116 /// Get the list of variables declared in the context of the untied tasks. 4117 class CheckVarsEscapingUntiedTaskDeclContext final 4118 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> { 4119 llvm::SmallVector<const VarDecl *, 4> PrivateDecls; 4120 4121 public: 4122 explicit CheckVarsEscapingUntiedTaskDeclContext() = default; 4123 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default; 4124 void VisitDeclStmt(const DeclStmt *S) { 4125 if (!S) 4126 return; 4127 // Need to privatize only local vars, static locals can be processed as is. 4128 for (const Decl *D : S->decls()) { 4129 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) 4130 if (VD->hasLocalStorage()) 4131 PrivateDecls.push_back(VD); 4132 } 4133 } 4134 void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; } 4135 void VisitCapturedStmt(const CapturedStmt *) { return; } 4136 void VisitLambdaExpr(const LambdaExpr *) { return; } 4137 void VisitBlockExpr(const BlockExpr *) { return; } 4138 void VisitStmt(const Stmt *S) { 4139 if (!S) 4140 return; 4141 for (const Stmt *Child : S->children()) 4142 if (Child) 4143 Visit(Child); 4144 } 4145 4146 /// Swaps list of vars with the provided one. 4147 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; } 4148 }; 4149 } // anonymous namespace 4150 4151 void CodeGenFunction::EmitOMPTaskBasedDirective( 4152 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 4153 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 4154 OMPTaskDataTy &Data) { 4155 // Emit outlined function for task construct. 4156 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 4157 auto I = CS->getCapturedDecl()->param_begin(); 4158 auto PartId = std::next(I); 4159 auto TaskT = std::next(I, 4); 4160 // Check if the task is final 4161 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 4162 // If the condition constant folds and can be elided, try to avoid emitting 4163 // the condition and the dead arm of the if/else. 4164 const Expr *Cond = Clause->getCondition(); 4165 bool CondConstant; 4166 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 4167 Data.Final.setInt(CondConstant); 4168 else 4169 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 4170 } else { 4171 // By default the task is not final. 4172 Data.Final.setInt(/*IntVal=*/false); 4173 } 4174 // Check if the task has 'priority' clause. 4175 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 4176 const Expr *Prio = Clause->getPriority(); 4177 Data.Priority.setInt(/*IntVal=*/true); 4178 Data.Priority.setPointer(EmitScalarConversion( 4179 EmitScalarExpr(Prio), Prio->getType(), 4180 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 4181 Prio->getExprLoc())); 4182 } 4183 // The first function argument for tasks is a thread id, the second one is a 4184 // part id (0 for tied tasks, >=0 for untied task). 4185 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 4186 // Get list of private variables. 4187 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 4188 auto IRef = C->varlist_begin(); 4189 for (const Expr *IInit : C->private_copies()) { 4190 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4191 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4192 Data.PrivateVars.push_back(*IRef); 4193 Data.PrivateCopies.push_back(IInit); 4194 } 4195 ++IRef; 4196 } 4197 } 4198 EmittedAsPrivate.clear(); 4199 // Get list of firstprivate variables. 4200 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4201 auto IRef = C->varlist_begin(); 4202 auto IElemInitRef = C->inits().begin(); 4203 for (const Expr *IInit : C->private_copies()) { 4204 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4205 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4206 Data.FirstprivateVars.push_back(*IRef); 4207 Data.FirstprivateCopies.push_back(IInit); 4208 Data.FirstprivateInits.push_back(*IElemInitRef); 4209 } 4210 ++IRef; 4211 ++IElemInitRef; 4212 } 4213 } 4214 // Get list of lastprivate variables (for taskloops). 4215 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 4216 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 4217 auto IRef = C->varlist_begin(); 4218 auto ID = C->destination_exprs().begin(); 4219 for (const Expr *IInit : C->private_copies()) { 4220 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4221 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4222 Data.LastprivateVars.push_back(*IRef); 4223 Data.LastprivateCopies.push_back(IInit); 4224 } 4225 LastprivateDstsOrigs.insert( 4226 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 4227 cast<DeclRefExpr>(*IRef)}); 4228 ++IRef; 4229 ++ID; 4230 } 4231 } 4232 SmallVector<const Expr *, 4> LHSs; 4233 SmallVector<const Expr *, 4> RHSs; 4234 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 4235 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4236 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4237 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4238 Data.ReductionOps.append(C->reduction_ops().begin(), 4239 C->reduction_ops().end()); 4240 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4241 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4242 } 4243 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 4244 *this, S.getBeginLoc(), LHSs, RHSs, Data); 4245 // Build list of dependences. 4246 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4247 OMPTaskDataTy::DependData &DD = 4248 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4249 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4250 } 4251 // Get list of local vars for untied tasks. 4252 if (!Data.Tied) { 4253 CheckVarsEscapingUntiedTaskDeclContext Checker; 4254 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt()); 4255 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(), 4256 Checker.getPrivateDecls().end()); 4257 } 4258 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 4259 CapturedRegion](CodeGenFunction &CGF, 4260 PrePostActionTy &Action) { 4261 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> 4262 UntiedLocalVars; 4263 // Set proper addresses for generated private copies. 4264 OMPPrivateScope Scope(CGF); 4265 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 4266 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 4267 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) { 4268 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4269 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4270 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4271 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4272 CS->getCapturedDecl()->getParam(PrivatesParam))); 4273 // Map privates. 4274 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4275 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4276 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4277 CallArgs.push_back(PrivatesPtr); 4278 ParamTypes.push_back(PrivatesPtr->getType()); 4279 for (const Expr *E : Data.PrivateVars) { 4280 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4281 Address PrivatePtr = CGF.CreateMemTemp( 4282 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 4283 PrivatePtrs.emplace_back(VD, PrivatePtr); 4284 CallArgs.push_back(PrivatePtr.getPointer()); 4285 ParamTypes.push_back(PrivatePtr.getType()); 4286 } 4287 for (const Expr *E : Data.FirstprivateVars) { 4288 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4289 Address PrivatePtr = 4290 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4291 ".firstpriv.ptr.addr"); 4292 PrivatePtrs.emplace_back(VD, PrivatePtr); 4293 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 4294 CallArgs.push_back(PrivatePtr.getPointer()); 4295 ParamTypes.push_back(PrivatePtr.getType()); 4296 } 4297 for (const Expr *E : Data.LastprivateVars) { 4298 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4299 Address PrivatePtr = 4300 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4301 ".lastpriv.ptr.addr"); 4302 PrivatePtrs.emplace_back(VD, PrivatePtr); 4303 CallArgs.push_back(PrivatePtr.getPointer()); 4304 ParamTypes.push_back(PrivatePtr.getType()); 4305 } 4306 for (const VarDecl *VD : Data.PrivateLocals) { 4307 QualType Ty = VD->getType().getNonReferenceType(); 4308 if (VD->getType()->isLValueReferenceType()) 4309 Ty = CGF.getContext().getPointerType(Ty); 4310 if (isAllocatableDecl(VD)) 4311 Ty = CGF.getContext().getPointerType(Ty); 4312 Address PrivatePtr = CGF.CreateMemTemp( 4313 CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); 4314 UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid()); 4315 CallArgs.push_back(PrivatePtr.getPointer()); 4316 ParamTypes.push_back(PrivatePtr.getType()); 4317 } 4318 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4319 ParamTypes, /*isVarArg=*/false); 4320 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4321 CopyFn, CopyFnTy->getPointerTo()); 4322 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4323 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4324 for (const auto &Pair : LastprivateDstsOrigs) { 4325 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 4326 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 4327 /*RefersToEnclosingVariableOrCapture=*/ 4328 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 4329 Pair.second->getType(), VK_LValue, 4330 Pair.second->getExprLoc()); 4331 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 4332 return CGF.EmitLValue(&DRE).getAddress(CGF); 4333 }); 4334 } 4335 for (const auto &Pair : PrivatePtrs) { 4336 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4337 CGF.getContext().getDeclAlign(Pair.first)); 4338 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4339 } 4340 // Adjust mapping for internal locals by mapping actual memory instead of 4341 // a pointer to this memory. 4342 for (auto &Pair : UntiedLocalVars) { 4343 if (isAllocatableDecl(Pair.first)) { 4344 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4345 Address Replacement(Ptr, CGF.getPointerAlign()); 4346 Pair.getSecond().first = Replacement; 4347 Ptr = CGF.Builder.CreateLoad(Replacement); 4348 Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4349 Pair.getSecond().second = Replacement; 4350 } else { 4351 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4352 Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4353 Pair.getSecond().first = Replacement; 4354 } 4355 } 4356 } 4357 if (Data.Reductions) { 4358 OMPPrivateScope FirstprivateScope(CGF); 4359 for (const auto &Pair : FirstprivatePtrs) { 4360 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4361 CGF.getContext().getDeclAlign(Pair.first)); 4362 FirstprivateScope.addPrivate(Pair.first, 4363 [Replacement]() { return Replacement; }); 4364 } 4365 (void)FirstprivateScope.Privatize(); 4366 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 4367 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 4368 Data.ReductionCopies, Data.ReductionOps); 4369 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 4370 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 4371 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 4372 RedCG.emitSharedOrigLValue(CGF, Cnt); 4373 RedCG.emitAggregateType(CGF, Cnt); 4374 // FIXME: This must removed once the runtime library is fixed. 4375 // Emit required threadprivate variables for 4376 // initializer/combiner/finalizer. 4377 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4378 RedCG, Cnt); 4379 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4380 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4381 Replacement = 4382 Address(CGF.EmitScalarConversion( 4383 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4384 CGF.getContext().getPointerType( 4385 Data.ReductionCopies[Cnt]->getType()), 4386 Data.ReductionCopies[Cnt]->getExprLoc()), 4387 Replacement.getAlignment()); 4388 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4389 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 4390 [Replacement]() { return Replacement; }); 4391 } 4392 } 4393 // Privatize all private variables except for in_reduction items. 4394 (void)Scope.Privatize(); 4395 SmallVector<const Expr *, 4> InRedVars; 4396 SmallVector<const Expr *, 4> InRedPrivs; 4397 SmallVector<const Expr *, 4> InRedOps; 4398 SmallVector<const Expr *, 4> TaskgroupDescriptors; 4399 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 4400 auto IPriv = C->privates().begin(); 4401 auto IRed = C->reduction_ops().begin(); 4402 auto ITD = C->taskgroup_descriptors().begin(); 4403 for (const Expr *Ref : C->varlists()) { 4404 InRedVars.emplace_back(Ref); 4405 InRedPrivs.emplace_back(*IPriv); 4406 InRedOps.emplace_back(*IRed); 4407 TaskgroupDescriptors.emplace_back(*ITD); 4408 std::advance(IPriv, 1); 4409 std::advance(IRed, 1); 4410 std::advance(ITD, 1); 4411 } 4412 } 4413 // Privatize in_reduction items here, because taskgroup descriptors must be 4414 // privatized earlier. 4415 OMPPrivateScope InRedScope(CGF); 4416 if (!InRedVars.empty()) { 4417 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 4418 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 4419 RedCG.emitSharedOrigLValue(CGF, Cnt); 4420 RedCG.emitAggregateType(CGF, Cnt); 4421 // The taskgroup descriptor variable is always implicit firstprivate and 4422 // privatized already during processing of the firstprivates. 4423 // FIXME: This must removed once the runtime library is fixed. 4424 // Emit required threadprivate variables for 4425 // initializer/combiner/finalizer. 4426 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4427 RedCG, Cnt); 4428 llvm::Value *ReductionsPtr; 4429 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 4430 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 4431 TRExpr->getExprLoc()); 4432 } else { 4433 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4434 } 4435 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4436 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4437 Replacement = Address( 4438 CGF.EmitScalarConversion( 4439 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4440 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 4441 InRedPrivs[Cnt]->getExprLoc()), 4442 Replacement.getAlignment()); 4443 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4444 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 4445 [Replacement]() { return Replacement; }); 4446 } 4447 } 4448 (void)InRedScope.Privatize(); 4449 4450 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF, 4451 UntiedLocalVars); 4452 Action.Enter(CGF); 4453 BodyGen(CGF); 4454 }; 4455 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4456 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 4457 Data.NumberOfParts); 4458 OMPLexicalScope Scope(*this, S, llvm::None, 4459 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4460 !isOpenMPSimdDirective(S.getDirectiveKind())); 4461 TaskGen(*this, OutlinedFn, Data); 4462 } 4463 4464 static ImplicitParamDecl * 4465 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 4466 QualType Ty, CapturedDecl *CD, 4467 SourceLocation Loc) { 4468 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4469 ImplicitParamDecl::Other); 4470 auto *OrigRef = DeclRefExpr::Create( 4471 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 4472 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4473 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4474 ImplicitParamDecl::Other); 4475 auto *PrivateRef = DeclRefExpr::Create( 4476 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 4477 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4478 QualType ElemType = C.getBaseElementType(Ty); 4479 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 4480 ImplicitParamDecl::Other); 4481 auto *InitRef = DeclRefExpr::Create( 4482 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 4483 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 4484 PrivateVD->setInitStyle(VarDecl::CInit); 4485 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 4486 InitRef, /*BasePath=*/nullptr, 4487 VK_RValue, FPOptionsOverride())); 4488 Data.FirstprivateVars.emplace_back(OrigRef); 4489 Data.FirstprivateCopies.emplace_back(PrivateRef); 4490 Data.FirstprivateInits.emplace_back(InitRef); 4491 return OrigVD; 4492 } 4493 4494 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4495 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4496 OMPTargetDataInfo &InputInfo) { 4497 // Emit outlined function for task construct. 4498 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4499 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4500 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4501 auto I = CS->getCapturedDecl()->param_begin(); 4502 auto PartId = std::next(I); 4503 auto TaskT = std::next(I, 4); 4504 OMPTaskDataTy Data; 4505 // The task is not final. 4506 Data.Final.setInt(/*IntVal=*/false); 4507 // Get list of firstprivate variables. 4508 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4509 auto IRef = C->varlist_begin(); 4510 auto IElemInitRef = C->inits().begin(); 4511 for (auto *IInit : C->private_copies()) { 4512 Data.FirstprivateVars.push_back(*IRef); 4513 Data.FirstprivateCopies.push_back(IInit); 4514 Data.FirstprivateInits.push_back(*IElemInitRef); 4515 ++IRef; 4516 ++IElemInitRef; 4517 } 4518 } 4519 OMPPrivateScope TargetScope(*this); 4520 VarDecl *BPVD = nullptr; 4521 VarDecl *PVD = nullptr; 4522 VarDecl *SVD = nullptr; 4523 VarDecl *MVD = nullptr; 4524 if (InputInfo.NumberOfTargetItems > 0) { 4525 auto *CD = CapturedDecl::Create( 4526 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4527 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4528 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType( 4529 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4530 /*IndexTypeQuals=*/0); 4531 BPVD = createImplicitFirstprivateForType( 4532 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4533 PVD = createImplicitFirstprivateForType( 4534 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4535 QualType SizesType = getContext().getConstantArrayType( 4536 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4537 ArrSize, nullptr, ArrayType::Normal, 4538 /*IndexTypeQuals=*/0); 4539 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4540 S.getBeginLoc()); 4541 TargetScope.addPrivate( 4542 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4543 TargetScope.addPrivate(PVD, 4544 [&InputInfo]() { return InputInfo.PointersArray; }); 4545 TargetScope.addPrivate(SVD, 4546 [&InputInfo]() { return InputInfo.SizesArray; }); 4547 // If there is no user-defined mapper, the mapper array will be nullptr. In 4548 // this case, we don't need to privatize it. 4549 if (!dyn_cast_or_null<llvm::ConstantPointerNull>( 4550 InputInfo.MappersArray.getPointer())) { 4551 MVD = createImplicitFirstprivateForType( 4552 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4553 TargetScope.addPrivate(MVD, 4554 [&InputInfo]() { return InputInfo.MappersArray; }); 4555 } 4556 } 4557 (void)TargetScope.Privatize(); 4558 // Build list of dependences. 4559 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4560 OMPTaskDataTy::DependData &DD = 4561 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4562 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4563 } 4564 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, 4565 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4566 // Set proper addresses for generated private copies. 4567 OMPPrivateScope Scope(CGF); 4568 if (!Data.FirstprivateVars.empty()) { 4569 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4570 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4571 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4572 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4573 CS->getCapturedDecl()->getParam(PrivatesParam))); 4574 // Map privates. 4575 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4576 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4577 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4578 CallArgs.push_back(PrivatesPtr); 4579 ParamTypes.push_back(PrivatesPtr->getType()); 4580 for (const Expr *E : Data.FirstprivateVars) { 4581 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4582 Address PrivatePtr = 4583 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4584 ".firstpriv.ptr.addr"); 4585 PrivatePtrs.emplace_back(VD, PrivatePtr); 4586 CallArgs.push_back(PrivatePtr.getPointer()); 4587 ParamTypes.push_back(PrivatePtr.getType()); 4588 } 4589 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4590 ParamTypes, /*isVarArg=*/false); 4591 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4592 CopyFn, CopyFnTy->getPointerTo()); 4593 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4594 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4595 for (const auto &Pair : PrivatePtrs) { 4596 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4597 CGF.getContext().getDeclAlign(Pair.first)); 4598 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4599 } 4600 } 4601 // Privatize all private variables except for in_reduction items. 4602 (void)Scope.Privatize(); 4603 if (InputInfo.NumberOfTargetItems > 0) { 4604 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4605 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4606 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4607 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4608 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4609 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4610 // If MVD is nullptr, the mapper array is not privatized 4611 if (MVD) 4612 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP( 4613 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0); 4614 } 4615 4616 Action.Enter(CGF); 4617 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4618 BodyGen(CGF); 4619 }; 4620 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4621 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4622 Data.NumberOfParts); 4623 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4624 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4625 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4626 SourceLocation()); 4627 4628 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4629 SharedsTy, CapturedStruct, &IfCond, Data); 4630 } 4631 4632 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4633 // Emit outlined function for task construct. 4634 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4635 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4636 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4637 const Expr *IfCond = nullptr; 4638 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4639 if (C->getNameModifier() == OMPD_unknown || 4640 C->getNameModifier() == OMPD_task) { 4641 IfCond = C->getCondition(); 4642 break; 4643 } 4644 } 4645 4646 OMPTaskDataTy Data; 4647 // Check if we should emit tied or untied task. 4648 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4649 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4650 CGF.EmitStmt(CS->getCapturedStmt()); 4651 }; 4652 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4653 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4654 const OMPTaskDataTy &Data) { 4655 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4656 SharedsTy, CapturedStruct, IfCond, 4657 Data); 4658 }; 4659 auto LPCRegion = 4660 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4661 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4662 } 4663 4664 void CodeGenFunction::EmitOMPTaskyieldDirective( 4665 const OMPTaskyieldDirective &S) { 4666 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4667 } 4668 4669 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4670 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4671 } 4672 4673 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4674 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4675 } 4676 4677 void CodeGenFunction::EmitOMPTaskgroupDirective( 4678 const OMPTaskgroupDirective &S) { 4679 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4680 Action.Enter(CGF); 4681 if (const Expr *E = S.getReductionRef()) { 4682 SmallVector<const Expr *, 4> LHSs; 4683 SmallVector<const Expr *, 4> RHSs; 4684 OMPTaskDataTy Data; 4685 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4686 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4687 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4688 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4689 Data.ReductionOps.append(C->reduction_ops().begin(), 4690 C->reduction_ops().end()); 4691 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4692 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4693 } 4694 llvm::Value *ReductionDesc = 4695 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4696 LHSs, RHSs, Data); 4697 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4698 CGF.EmitVarDecl(*VD); 4699 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4700 /*Volatile=*/false, E->getType()); 4701 } 4702 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4703 }; 4704 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4705 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4706 } 4707 4708 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4709 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4710 ? llvm::AtomicOrdering::NotAtomic 4711 : llvm::AtomicOrdering::AcquireRelease; 4712 CGM.getOpenMPRuntime().emitFlush( 4713 *this, 4714 [&S]() -> ArrayRef<const Expr *> { 4715 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4716 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4717 FlushClause->varlist_end()); 4718 return llvm::None; 4719 }(), 4720 S.getBeginLoc(), AO); 4721 } 4722 4723 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4724 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4725 LValue DOLVal = EmitLValue(DO->getDepobj()); 4726 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4727 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4728 DC->getModifier()); 4729 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4730 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4731 *this, Dependencies, DC->getBeginLoc()); 4732 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4733 return; 4734 } 4735 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4736 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4737 return; 4738 } 4739 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4740 CGM.getOpenMPRuntime().emitUpdateClause( 4741 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4742 return; 4743 } 4744 } 4745 4746 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4747 if (!OMPParentLoopDirectiveForScan) 4748 return; 4749 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4750 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4751 SmallVector<const Expr *, 4> Shareds; 4752 SmallVector<const Expr *, 4> Privates; 4753 SmallVector<const Expr *, 4> LHSs; 4754 SmallVector<const Expr *, 4> RHSs; 4755 SmallVector<const Expr *, 4> ReductionOps; 4756 SmallVector<const Expr *, 4> CopyOps; 4757 SmallVector<const Expr *, 4> CopyArrayTemps; 4758 SmallVector<const Expr *, 4> CopyArrayElems; 4759 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4760 if (C->getModifier() != OMPC_REDUCTION_inscan) 4761 continue; 4762 Shareds.append(C->varlist_begin(), C->varlist_end()); 4763 Privates.append(C->privates().begin(), C->privates().end()); 4764 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4765 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4766 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4767 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4768 CopyArrayTemps.append(C->copy_array_temps().begin(), 4769 C->copy_array_temps().end()); 4770 CopyArrayElems.append(C->copy_array_elems().begin(), 4771 C->copy_array_elems().end()); 4772 } 4773 if (ParentDir.getDirectiveKind() == OMPD_simd || 4774 (getLangOpts().OpenMPSimd && 4775 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4776 // For simd directive and simd-based directives in simd only mode, use the 4777 // following codegen: 4778 // int x = 0; 4779 // #pragma omp simd reduction(inscan, +: x) 4780 // for (..) { 4781 // <first part> 4782 // #pragma omp scan inclusive(x) 4783 // <second part> 4784 // } 4785 // is transformed to: 4786 // int x = 0; 4787 // for (..) { 4788 // int x_priv = 0; 4789 // <first part> 4790 // x = x_priv + x; 4791 // x_priv = x; 4792 // <second part> 4793 // } 4794 // and 4795 // int x = 0; 4796 // #pragma omp simd reduction(inscan, +: x) 4797 // for (..) { 4798 // <first part> 4799 // #pragma omp scan exclusive(x) 4800 // <second part> 4801 // } 4802 // to 4803 // int x = 0; 4804 // for (..) { 4805 // int x_priv = 0; 4806 // <second part> 4807 // int temp = x; 4808 // x = x_priv + x; 4809 // x_priv = temp; 4810 // <first part> 4811 // } 4812 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4813 EmitBranch(IsInclusive 4814 ? OMPScanReduce 4815 : BreakContinueStack.back().ContinueBlock.getBlock()); 4816 EmitBlock(OMPScanDispatch); 4817 { 4818 // New scope for correct construction/destruction of temp variables for 4819 // exclusive scan. 4820 LexicalScope Scope(*this, S.getSourceRange()); 4821 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4822 EmitBlock(OMPScanReduce); 4823 if (!IsInclusive) { 4824 // Create temp var and copy LHS value to this temp value. 4825 // TMP = LHS; 4826 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4827 const Expr *PrivateExpr = Privates[I]; 4828 const Expr *TempExpr = CopyArrayTemps[I]; 4829 EmitAutoVarDecl( 4830 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4831 LValue DestLVal = EmitLValue(TempExpr); 4832 LValue SrcLVal = EmitLValue(LHSs[I]); 4833 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4834 SrcLVal.getAddress(*this), 4835 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4836 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4837 CopyOps[I]); 4838 } 4839 } 4840 CGM.getOpenMPRuntime().emitReduction( 4841 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4842 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4843 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4844 const Expr *PrivateExpr = Privates[I]; 4845 LValue DestLVal; 4846 LValue SrcLVal; 4847 if (IsInclusive) { 4848 DestLVal = EmitLValue(RHSs[I]); 4849 SrcLVal = EmitLValue(LHSs[I]); 4850 } else { 4851 const Expr *TempExpr = CopyArrayTemps[I]; 4852 DestLVal = EmitLValue(RHSs[I]); 4853 SrcLVal = EmitLValue(TempExpr); 4854 } 4855 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4856 SrcLVal.getAddress(*this), 4857 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4858 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4859 CopyOps[I]); 4860 } 4861 } 4862 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4863 OMPScanExitBlock = IsInclusive 4864 ? BreakContinueStack.back().ContinueBlock.getBlock() 4865 : OMPScanReduce; 4866 EmitBlock(OMPAfterScanBlock); 4867 return; 4868 } 4869 if (!IsInclusive) { 4870 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4871 EmitBlock(OMPScanExitBlock); 4872 } 4873 if (OMPFirstScanLoop) { 4874 // Emit buffer[i] = red; at the end of the input phase. 4875 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4876 .getIterationVariable() 4877 ->IgnoreParenImpCasts(); 4878 LValue IdxLVal = EmitLValue(IVExpr); 4879 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4880 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4881 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4882 const Expr *PrivateExpr = Privates[I]; 4883 const Expr *OrigExpr = Shareds[I]; 4884 const Expr *CopyArrayElem = CopyArrayElems[I]; 4885 OpaqueValueMapping IdxMapping( 4886 *this, 4887 cast<OpaqueValueExpr>( 4888 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4889 RValue::get(IdxVal)); 4890 LValue DestLVal = EmitLValue(CopyArrayElem); 4891 LValue SrcLVal = EmitLValue(OrigExpr); 4892 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4893 SrcLVal.getAddress(*this), 4894 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4895 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4896 CopyOps[I]); 4897 } 4898 } 4899 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4900 if (IsInclusive) { 4901 EmitBlock(OMPScanExitBlock); 4902 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4903 } 4904 EmitBlock(OMPScanDispatch); 4905 if (!OMPFirstScanLoop) { 4906 // Emit red = buffer[i]; at the entrance to the scan phase. 4907 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4908 .getIterationVariable() 4909 ->IgnoreParenImpCasts(); 4910 LValue IdxLVal = EmitLValue(IVExpr); 4911 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4912 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4913 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4914 if (!IsInclusive) { 4915 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4916 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4917 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4918 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4919 EmitBlock(ContBB); 4920 // Use idx - 1 iteration for exclusive scan. 4921 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4922 } 4923 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4924 const Expr *PrivateExpr = Privates[I]; 4925 const Expr *OrigExpr = Shareds[I]; 4926 const Expr *CopyArrayElem = CopyArrayElems[I]; 4927 OpaqueValueMapping IdxMapping( 4928 *this, 4929 cast<OpaqueValueExpr>( 4930 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4931 RValue::get(IdxVal)); 4932 LValue SrcLVal = EmitLValue(CopyArrayElem); 4933 LValue DestLVal = EmitLValue(OrigExpr); 4934 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4935 SrcLVal.getAddress(*this), 4936 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4937 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4938 CopyOps[I]); 4939 } 4940 if (!IsInclusive) { 4941 EmitBlock(ExclusiveExitBB); 4942 } 4943 } 4944 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4945 : OMPAfterScanBlock); 4946 EmitBlock(OMPAfterScanBlock); 4947 } 4948 4949 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4950 const CodeGenLoopTy &CodeGenLoop, 4951 Expr *IncExpr) { 4952 // Emit the loop iteration variable. 4953 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4954 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4955 EmitVarDecl(*IVDecl); 4956 4957 // Emit the iterations count variable. 4958 // If it is not a variable, Sema decided to calculate iterations count on each 4959 // iteration (e.g., it is foldable into a constant). 4960 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4961 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4962 // Emit calculation of the iterations count. 4963 EmitIgnoredExpr(S.getCalcLastIteration()); 4964 } 4965 4966 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4967 4968 bool HasLastprivateClause = false; 4969 // Check pre-condition. 4970 { 4971 OMPLoopScope PreInitScope(*this, S); 4972 // Skip the entire loop if we don't meet the precondition. 4973 // If the condition constant folds and can be elided, avoid emitting the 4974 // whole loop. 4975 bool CondConstant; 4976 llvm::BasicBlock *ContBlock = nullptr; 4977 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4978 if (!CondConstant) 4979 return; 4980 } else { 4981 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4982 ContBlock = createBasicBlock("omp.precond.end"); 4983 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4984 getProfileCount(&S)); 4985 EmitBlock(ThenBlock); 4986 incrementProfileCounter(&S); 4987 } 4988 4989 emitAlignedClause(*this, S); 4990 // Emit 'then' code. 4991 { 4992 // Emit helper vars inits. 4993 4994 LValue LB = EmitOMPHelperVar( 4995 *this, cast<DeclRefExpr>( 4996 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4997 ? S.getCombinedLowerBoundVariable() 4998 : S.getLowerBoundVariable()))); 4999 LValue UB = EmitOMPHelperVar( 5000 *this, cast<DeclRefExpr>( 5001 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5002 ? S.getCombinedUpperBoundVariable() 5003 : S.getUpperBoundVariable()))); 5004 LValue ST = 5005 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 5006 LValue IL = 5007 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 5008 5009 OMPPrivateScope LoopScope(*this); 5010 if (EmitOMPFirstprivateClause(S, LoopScope)) { 5011 // Emit implicit barrier to synchronize threads and avoid data races 5012 // on initialization of firstprivate variables and post-update of 5013 // lastprivate variables. 5014 CGM.getOpenMPRuntime().emitBarrierCall( 5015 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 5016 /*ForceSimpleCall=*/true); 5017 } 5018 EmitOMPPrivateClause(S, LoopScope); 5019 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5020 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5021 !isOpenMPTeamsDirective(S.getDirectiveKind())) 5022 EmitOMPReductionClauseInit(S, LoopScope); 5023 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 5024 EmitOMPPrivateLoopCounters(S, LoopScope); 5025 (void)LoopScope.Privatize(); 5026 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5027 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 5028 5029 // Detect the distribute schedule kind and chunk. 5030 llvm::Value *Chunk = nullptr; 5031 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 5032 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 5033 ScheduleKind = C->getDistScheduleKind(); 5034 if (const Expr *Ch = C->getChunkSize()) { 5035 Chunk = EmitScalarExpr(Ch); 5036 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 5037 S.getIterationVariable()->getType(), 5038 S.getBeginLoc()); 5039 } 5040 } else { 5041 // Default behaviour for dist_schedule clause. 5042 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 5043 *this, S, ScheduleKind, Chunk); 5044 } 5045 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 5046 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 5047 5048 // OpenMP [2.10.8, distribute Construct, Description] 5049 // If dist_schedule is specified, kind must be static. If specified, 5050 // iterations are divided into chunks of size chunk_size, chunks are 5051 // assigned to the teams of the league in a round-robin fashion in the 5052 // order of the team number. When no chunk_size is specified, the 5053 // iteration space is divided into chunks that are approximately equal 5054 // in size, and at most one chunk is distributed to each team of the 5055 // league. The size of the chunks is unspecified in this case. 5056 bool StaticChunked = RT.isStaticChunked( 5057 ScheduleKind, /* Chunked */ Chunk != nullptr) && 5058 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 5059 if (RT.isStaticNonchunked(ScheduleKind, 5060 /* Chunked */ Chunk != nullptr) || 5061 StaticChunked) { 5062 CGOpenMPRuntime::StaticRTInput StaticInit( 5063 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 5064 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5065 StaticChunked ? Chunk : nullptr); 5066 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 5067 StaticInit); 5068 JumpDest LoopExit = 5069 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 5070 // UB = min(UB, GlobalUB); 5071 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5072 ? S.getCombinedEnsureUpperBound() 5073 : S.getEnsureUpperBound()); 5074 // IV = LB; 5075 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5076 ? S.getCombinedInit() 5077 : S.getInit()); 5078 5079 const Expr *Cond = 5080 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5081 ? S.getCombinedCond() 5082 : S.getCond(); 5083 5084 if (StaticChunked) 5085 Cond = S.getCombinedDistCond(); 5086 5087 // For static unchunked schedules generate: 5088 // 5089 // 1. For distribute alone, codegen 5090 // while (idx <= UB) { 5091 // BODY; 5092 // ++idx; 5093 // } 5094 // 5095 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 5096 // while (idx <= UB) { 5097 // <CodeGen rest of pragma>(LB, UB); 5098 // idx += ST; 5099 // } 5100 // 5101 // For static chunk one schedule generate: 5102 // 5103 // while (IV <= GlobalUB) { 5104 // <CodeGen rest of pragma>(LB, UB); 5105 // LB += ST; 5106 // UB += ST; 5107 // UB = min(UB, GlobalUB); 5108 // IV = LB; 5109 // } 5110 // 5111 emitCommonSimdLoop( 5112 *this, S, 5113 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5114 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5115 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 5116 }, 5117 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 5118 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 5119 CGF.EmitOMPInnerLoop( 5120 S, LoopScope.requiresCleanups(), Cond, IncExpr, 5121 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 5122 CodeGenLoop(CGF, S, LoopExit); 5123 }, 5124 [&S, StaticChunked](CodeGenFunction &CGF) { 5125 if (StaticChunked) { 5126 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 5127 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 5128 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 5129 CGF.EmitIgnoredExpr(S.getCombinedInit()); 5130 } 5131 }); 5132 }); 5133 EmitBlock(LoopExit.getBlock()); 5134 // Tell the runtime we are done. 5135 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 5136 } else { 5137 // Emit the outer loop, which requests its work chunk [LB..UB] from 5138 // runtime and runs the inner loop to process it. 5139 const OMPLoopArguments LoopArguments = { 5140 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5141 IL.getAddress(*this), Chunk}; 5142 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 5143 CodeGenLoop); 5144 } 5145 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 5146 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 5147 return CGF.Builder.CreateIsNotNull( 5148 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5149 }); 5150 } 5151 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5152 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5153 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 5154 EmitOMPReductionClauseFinal(S, OMPD_simd); 5155 // Emit post-update of the reduction variables if IsLastIter != 0. 5156 emitPostUpdateForReductionClause( 5157 *this, S, [IL, &S](CodeGenFunction &CGF) { 5158 return CGF.Builder.CreateIsNotNull( 5159 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5160 }); 5161 } 5162 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5163 if (HasLastprivateClause) { 5164 EmitOMPLastprivateClauseFinal( 5165 S, /*NoFinals=*/false, 5166 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 5167 } 5168 } 5169 5170 // We're now done with the loop, so jump to the continuation block. 5171 if (ContBlock) { 5172 EmitBranch(ContBlock); 5173 EmitBlock(ContBlock, true); 5174 } 5175 } 5176 } 5177 5178 void CodeGenFunction::EmitOMPDistributeDirective( 5179 const OMPDistributeDirective &S) { 5180 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5181 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5182 }; 5183 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5184 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 5185 } 5186 5187 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 5188 const CapturedStmt *S, 5189 SourceLocation Loc) { 5190 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 5191 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 5192 CGF.CapturedStmtInfo = &CapStmtInfo; 5193 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 5194 Fn->setDoesNotRecurse(); 5195 return Fn; 5196 } 5197 5198 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 5199 if (S.hasClausesOfKind<OMPDependClause>()) { 5200 assert(!S.hasAssociatedStmt() && 5201 "No associated statement must be in ordered depend construct."); 5202 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 5203 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 5204 return; 5205 } 5206 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5207 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 5208 PrePostActionTy &Action) { 5209 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5210 if (C) { 5211 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5212 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5213 llvm::Function *OutlinedFn = 5214 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5215 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 5216 OutlinedFn, CapturedVars); 5217 } else { 5218 Action.Enter(CGF); 5219 CGF.EmitStmt(CS->getCapturedStmt()); 5220 } 5221 }; 5222 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5223 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 5224 } 5225 5226 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 5227 QualType SrcType, QualType DestType, 5228 SourceLocation Loc) { 5229 assert(CGF.hasScalarEvaluationKind(DestType) && 5230 "DestType must have scalar evaluation kind."); 5231 assert(!Val.isAggregate() && "Must be a scalar or complex."); 5232 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 5233 DestType, Loc) 5234 : CGF.EmitComplexToScalarConversion( 5235 Val.getComplexVal(), SrcType, DestType, Loc); 5236 } 5237 5238 static CodeGenFunction::ComplexPairTy 5239 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 5240 QualType DestType, SourceLocation Loc) { 5241 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 5242 "DestType must have complex evaluation kind."); 5243 CodeGenFunction::ComplexPairTy ComplexVal; 5244 if (Val.isScalar()) { 5245 // Convert the input element to the element type of the complex. 5246 QualType DestElementType = 5247 DestType->castAs<ComplexType>()->getElementType(); 5248 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 5249 Val.getScalarVal(), SrcType, DestElementType, Loc); 5250 ComplexVal = CodeGenFunction::ComplexPairTy( 5251 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 5252 } else { 5253 assert(Val.isComplex() && "Must be a scalar or complex."); 5254 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 5255 QualType DestElementType = 5256 DestType->castAs<ComplexType>()->getElementType(); 5257 ComplexVal.first = CGF.EmitScalarConversion( 5258 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 5259 ComplexVal.second = CGF.EmitScalarConversion( 5260 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 5261 } 5262 return ComplexVal; 5263 } 5264 5265 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5266 LValue LVal, RValue RVal) { 5267 if (LVal.isGlobalReg()) 5268 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 5269 else 5270 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 5271 } 5272 5273 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 5274 llvm::AtomicOrdering AO, LValue LVal, 5275 SourceLocation Loc) { 5276 if (LVal.isGlobalReg()) 5277 return CGF.EmitLoadOfLValue(LVal, Loc); 5278 return CGF.EmitAtomicLoad( 5279 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 5280 LVal.isVolatile()); 5281 } 5282 5283 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 5284 QualType RValTy, SourceLocation Loc) { 5285 switch (getEvaluationKind(LVal.getType())) { 5286 case TEK_Scalar: 5287 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 5288 *this, RVal, RValTy, LVal.getType(), Loc)), 5289 LVal); 5290 break; 5291 case TEK_Complex: 5292 EmitStoreOfComplex( 5293 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 5294 /*isInit=*/false); 5295 break; 5296 case TEK_Aggregate: 5297 llvm_unreachable("Must be a scalar or complex."); 5298 } 5299 } 5300 5301 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5302 const Expr *X, const Expr *V, 5303 SourceLocation Loc) { 5304 // v = x; 5305 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 5306 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 5307 LValue XLValue = CGF.EmitLValue(X); 5308 LValue VLValue = CGF.EmitLValue(V); 5309 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 5310 // OpenMP, 2.17.7, atomic Construct 5311 // If the read or capture clause is specified and the acquire, acq_rel, or 5312 // seq_cst clause is specified then the strong flush on exit from the atomic 5313 // operation is also an acquire flush. 5314 switch (AO) { 5315 case llvm::AtomicOrdering::Acquire: 5316 case llvm::AtomicOrdering::AcquireRelease: 5317 case llvm::AtomicOrdering::SequentiallyConsistent: 5318 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5319 llvm::AtomicOrdering::Acquire); 5320 break; 5321 case llvm::AtomicOrdering::Monotonic: 5322 case llvm::AtomicOrdering::Release: 5323 break; 5324 case llvm::AtomicOrdering::NotAtomic: 5325 case llvm::AtomicOrdering::Unordered: 5326 llvm_unreachable("Unexpected ordering."); 5327 } 5328 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 5329 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5330 } 5331 5332 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 5333 llvm::AtomicOrdering AO, const Expr *X, 5334 const Expr *E, SourceLocation Loc) { 5335 // x = expr; 5336 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 5337 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 5338 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5339 // OpenMP, 2.17.7, atomic Construct 5340 // If the write, update, or capture clause is specified and the release, 5341 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5342 // the atomic operation is also a release flush. 5343 switch (AO) { 5344 case llvm::AtomicOrdering::Release: 5345 case llvm::AtomicOrdering::AcquireRelease: 5346 case llvm::AtomicOrdering::SequentiallyConsistent: 5347 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5348 llvm::AtomicOrdering::Release); 5349 break; 5350 case llvm::AtomicOrdering::Acquire: 5351 case llvm::AtomicOrdering::Monotonic: 5352 break; 5353 case llvm::AtomicOrdering::NotAtomic: 5354 case llvm::AtomicOrdering::Unordered: 5355 llvm_unreachable("Unexpected ordering."); 5356 } 5357 } 5358 5359 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 5360 RValue Update, 5361 BinaryOperatorKind BO, 5362 llvm::AtomicOrdering AO, 5363 bool IsXLHSInRHSPart) { 5364 ASTContext &Context = CGF.getContext(); 5365 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 5366 // expression is simple and atomic is allowed for the given type for the 5367 // target platform. 5368 if (BO == BO_Comma || !Update.isScalar() || 5369 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 5370 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 5371 (Update.getScalarVal()->getType() != 5372 X.getAddress(CGF).getElementType())) || 5373 !X.getAddress(CGF).getElementType()->isIntegerTy() || 5374 !Context.getTargetInfo().hasBuiltinAtomic( 5375 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 5376 return std::make_pair(false, RValue::get(nullptr)); 5377 5378 llvm::AtomicRMWInst::BinOp RMWOp; 5379 switch (BO) { 5380 case BO_Add: 5381 RMWOp = llvm::AtomicRMWInst::Add; 5382 break; 5383 case BO_Sub: 5384 if (!IsXLHSInRHSPart) 5385 return std::make_pair(false, RValue::get(nullptr)); 5386 RMWOp = llvm::AtomicRMWInst::Sub; 5387 break; 5388 case BO_And: 5389 RMWOp = llvm::AtomicRMWInst::And; 5390 break; 5391 case BO_Or: 5392 RMWOp = llvm::AtomicRMWInst::Or; 5393 break; 5394 case BO_Xor: 5395 RMWOp = llvm::AtomicRMWInst::Xor; 5396 break; 5397 case BO_LT: 5398 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5399 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 5400 : llvm::AtomicRMWInst::Max) 5401 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 5402 : llvm::AtomicRMWInst::UMax); 5403 break; 5404 case BO_GT: 5405 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5406 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 5407 : llvm::AtomicRMWInst::Min) 5408 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 5409 : llvm::AtomicRMWInst::UMin); 5410 break; 5411 case BO_Assign: 5412 RMWOp = llvm::AtomicRMWInst::Xchg; 5413 break; 5414 case BO_Mul: 5415 case BO_Div: 5416 case BO_Rem: 5417 case BO_Shl: 5418 case BO_Shr: 5419 case BO_LAnd: 5420 case BO_LOr: 5421 return std::make_pair(false, RValue::get(nullptr)); 5422 case BO_PtrMemD: 5423 case BO_PtrMemI: 5424 case BO_LE: 5425 case BO_GE: 5426 case BO_EQ: 5427 case BO_NE: 5428 case BO_Cmp: 5429 case BO_AddAssign: 5430 case BO_SubAssign: 5431 case BO_AndAssign: 5432 case BO_OrAssign: 5433 case BO_XorAssign: 5434 case BO_MulAssign: 5435 case BO_DivAssign: 5436 case BO_RemAssign: 5437 case BO_ShlAssign: 5438 case BO_ShrAssign: 5439 case BO_Comma: 5440 llvm_unreachable("Unsupported atomic update operation"); 5441 } 5442 llvm::Value *UpdateVal = Update.getScalarVal(); 5443 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 5444 UpdateVal = CGF.Builder.CreateIntCast( 5445 IC, X.getAddress(CGF).getElementType(), 5446 X.getType()->hasSignedIntegerRepresentation()); 5447 } 5448 llvm::Value *Res = 5449 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 5450 return std::make_pair(true, RValue::get(Res)); 5451 } 5452 5453 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 5454 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 5455 llvm::AtomicOrdering AO, SourceLocation Loc, 5456 const llvm::function_ref<RValue(RValue)> CommonGen) { 5457 // Update expressions are allowed to have the following forms: 5458 // x binop= expr; -> xrval + expr; 5459 // x++, ++x -> xrval + 1; 5460 // x--, --x -> xrval - 1; 5461 // x = x binop expr; -> xrval binop expr 5462 // x = expr Op x; - > expr binop xrval; 5463 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 5464 if (!Res.first) { 5465 if (X.isGlobalReg()) { 5466 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 5467 // 'xrval'. 5468 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 5469 } else { 5470 // Perform compare-and-swap procedure. 5471 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 5472 } 5473 } 5474 return Res; 5475 } 5476 5477 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 5478 llvm::AtomicOrdering AO, const Expr *X, 5479 const Expr *E, const Expr *UE, 5480 bool IsXLHSInRHSPart, SourceLocation Loc) { 5481 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5482 "Update expr in 'atomic update' must be a binary operator."); 5483 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5484 // Update expressions are allowed to have the following forms: 5485 // x binop= expr; -> xrval + expr; 5486 // x++, ++x -> xrval + 1; 5487 // x--, --x -> xrval - 1; 5488 // x = x binop expr; -> xrval binop expr 5489 // x = expr Op x; - > expr binop xrval; 5490 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 5491 LValue XLValue = CGF.EmitLValue(X); 5492 RValue ExprRValue = CGF.EmitAnyExpr(E); 5493 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5494 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5495 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5496 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5497 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 5498 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5499 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5500 return CGF.EmitAnyExpr(UE); 5501 }; 5502 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 5503 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5504 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5505 // OpenMP, 2.17.7, atomic Construct 5506 // If the write, update, or capture clause is specified and the release, 5507 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5508 // the atomic operation is also a release flush. 5509 switch (AO) { 5510 case llvm::AtomicOrdering::Release: 5511 case llvm::AtomicOrdering::AcquireRelease: 5512 case llvm::AtomicOrdering::SequentiallyConsistent: 5513 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5514 llvm::AtomicOrdering::Release); 5515 break; 5516 case llvm::AtomicOrdering::Acquire: 5517 case llvm::AtomicOrdering::Monotonic: 5518 break; 5519 case llvm::AtomicOrdering::NotAtomic: 5520 case llvm::AtomicOrdering::Unordered: 5521 llvm_unreachable("Unexpected ordering."); 5522 } 5523 } 5524 5525 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5526 QualType SourceType, QualType ResType, 5527 SourceLocation Loc) { 5528 switch (CGF.getEvaluationKind(ResType)) { 5529 case TEK_Scalar: 5530 return RValue::get( 5531 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5532 case TEK_Complex: { 5533 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5534 return RValue::getComplex(Res.first, Res.second); 5535 } 5536 case TEK_Aggregate: 5537 break; 5538 } 5539 llvm_unreachable("Must be a scalar or complex."); 5540 } 5541 5542 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5543 llvm::AtomicOrdering AO, 5544 bool IsPostfixUpdate, const Expr *V, 5545 const Expr *X, const Expr *E, 5546 const Expr *UE, bool IsXLHSInRHSPart, 5547 SourceLocation Loc) { 5548 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5549 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5550 RValue NewVVal; 5551 LValue VLValue = CGF.EmitLValue(V); 5552 LValue XLValue = CGF.EmitLValue(X); 5553 RValue ExprRValue = CGF.EmitAnyExpr(E); 5554 QualType NewVValType; 5555 if (UE) { 5556 // 'x' is updated with some additional value. 5557 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5558 "Update expr in 'atomic capture' must be a binary operator."); 5559 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5560 // Update expressions are allowed to have the following forms: 5561 // x binop= expr; -> xrval + expr; 5562 // x++, ++x -> xrval + 1; 5563 // x--, --x -> xrval - 1; 5564 // x = x binop expr; -> xrval binop expr 5565 // x = expr Op x; - > expr binop xrval; 5566 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5567 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5568 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5569 NewVValType = XRValExpr->getType(); 5570 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5571 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5572 IsPostfixUpdate](RValue XRValue) { 5573 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5574 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5575 RValue Res = CGF.EmitAnyExpr(UE); 5576 NewVVal = IsPostfixUpdate ? XRValue : Res; 5577 return Res; 5578 }; 5579 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5580 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5581 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5582 if (Res.first) { 5583 // 'atomicrmw' instruction was generated. 5584 if (IsPostfixUpdate) { 5585 // Use old value from 'atomicrmw'. 5586 NewVVal = Res.second; 5587 } else { 5588 // 'atomicrmw' does not provide new value, so evaluate it using old 5589 // value of 'x'. 5590 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5591 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5592 NewVVal = CGF.EmitAnyExpr(UE); 5593 } 5594 } 5595 } else { 5596 // 'x' is simply rewritten with some 'expr'. 5597 NewVValType = X->getType().getNonReferenceType(); 5598 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5599 X->getType().getNonReferenceType(), Loc); 5600 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5601 NewVVal = XRValue; 5602 return ExprRValue; 5603 }; 5604 // Try to perform atomicrmw xchg, otherwise simple exchange. 5605 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5606 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5607 Loc, Gen); 5608 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5609 if (Res.first) { 5610 // 'atomicrmw' instruction was generated. 5611 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5612 } 5613 } 5614 // Emit post-update store to 'v' of old/new 'x' value. 5615 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5616 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5617 // OpenMP, 2.17.7, atomic Construct 5618 // If the write, update, or capture clause is specified and the release, 5619 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5620 // the atomic operation is also a release flush. 5621 // If the read or capture clause is specified and the acquire, acq_rel, or 5622 // seq_cst clause is specified then the strong flush on exit from the atomic 5623 // operation is also an acquire flush. 5624 switch (AO) { 5625 case llvm::AtomicOrdering::Release: 5626 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5627 llvm::AtomicOrdering::Release); 5628 break; 5629 case llvm::AtomicOrdering::Acquire: 5630 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5631 llvm::AtomicOrdering::Acquire); 5632 break; 5633 case llvm::AtomicOrdering::AcquireRelease: 5634 case llvm::AtomicOrdering::SequentiallyConsistent: 5635 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5636 llvm::AtomicOrdering::AcquireRelease); 5637 break; 5638 case llvm::AtomicOrdering::Monotonic: 5639 break; 5640 case llvm::AtomicOrdering::NotAtomic: 5641 case llvm::AtomicOrdering::Unordered: 5642 llvm_unreachable("Unexpected ordering."); 5643 } 5644 } 5645 5646 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5647 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5648 const Expr *X, const Expr *V, const Expr *E, 5649 const Expr *UE, bool IsXLHSInRHSPart, 5650 SourceLocation Loc) { 5651 switch (Kind) { 5652 case OMPC_read: 5653 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5654 break; 5655 case OMPC_write: 5656 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5657 break; 5658 case OMPC_unknown: 5659 case OMPC_update: 5660 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5661 break; 5662 case OMPC_capture: 5663 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5664 IsXLHSInRHSPart, Loc); 5665 break; 5666 case OMPC_if: 5667 case OMPC_final: 5668 case OMPC_num_threads: 5669 case OMPC_private: 5670 case OMPC_firstprivate: 5671 case OMPC_lastprivate: 5672 case OMPC_reduction: 5673 case OMPC_task_reduction: 5674 case OMPC_in_reduction: 5675 case OMPC_safelen: 5676 case OMPC_simdlen: 5677 case OMPC_sizes: 5678 case OMPC_allocator: 5679 case OMPC_allocate: 5680 case OMPC_collapse: 5681 case OMPC_default: 5682 case OMPC_seq_cst: 5683 case OMPC_acq_rel: 5684 case OMPC_acquire: 5685 case OMPC_release: 5686 case OMPC_relaxed: 5687 case OMPC_shared: 5688 case OMPC_linear: 5689 case OMPC_aligned: 5690 case OMPC_copyin: 5691 case OMPC_copyprivate: 5692 case OMPC_flush: 5693 case OMPC_depobj: 5694 case OMPC_proc_bind: 5695 case OMPC_schedule: 5696 case OMPC_ordered: 5697 case OMPC_nowait: 5698 case OMPC_untied: 5699 case OMPC_threadprivate: 5700 case OMPC_depend: 5701 case OMPC_mergeable: 5702 case OMPC_device: 5703 case OMPC_threads: 5704 case OMPC_simd: 5705 case OMPC_map: 5706 case OMPC_num_teams: 5707 case OMPC_thread_limit: 5708 case OMPC_priority: 5709 case OMPC_grainsize: 5710 case OMPC_nogroup: 5711 case OMPC_num_tasks: 5712 case OMPC_hint: 5713 case OMPC_dist_schedule: 5714 case OMPC_defaultmap: 5715 case OMPC_uniform: 5716 case OMPC_to: 5717 case OMPC_from: 5718 case OMPC_use_device_ptr: 5719 case OMPC_use_device_addr: 5720 case OMPC_is_device_ptr: 5721 case OMPC_unified_address: 5722 case OMPC_unified_shared_memory: 5723 case OMPC_reverse_offload: 5724 case OMPC_dynamic_allocators: 5725 case OMPC_atomic_default_mem_order: 5726 case OMPC_device_type: 5727 case OMPC_match: 5728 case OMPC_nontemporal: 5729 case OMPC_order: 5730 case OMPC_destroy: 5731 case OMPC_detach: 5732 case OMPC_inclusive: 5733 case OMPC_exclusive: 5734 case OMPC_uses_allocators: 5735 case OMPC_affinity: 5736 case OMPC_init: 5737 case OMPC_inbranch: 5738 case OMPC_notinbranch: 5739 case OMPC_link: 5740 case OMPC_use: 5741 case OMPC_novariants: 5742 case OMPC_nocontext: 5743 case OMPC_filter: 5744 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5745 } 5746 } 5747 5748 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5749 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5750 bool MemOrderingSpecified = false; 5751 if (S.getSingleClause<OMPSeqCstClause>()) { 5752 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5753 MemOrderingSpecified = true; 5754 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5755 AO = llvm::AtomicOrdering::AcquireRelease; 5756 MemOrderingSpecified = true; 5757 } else if (S.getSingleClause<OMPAcquireClause>()) { 5758 AO = llvm::AtomicOrdering::Acquire; 5759 MemOrderingSpecified = true; 5760 } else if (S.getSingleClause<OMPReleaseClause>()) { 5761 AO = llvm::AtomicOrdering::Release; 5762 MemOrderingSpecified = true; 5763 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5764 AO = llvm::AtomicOrdering::Monotonic; 5765 MemOrderingSpecified = true; 5766 } 5767 OpenMPClauseKind Kind = OMPC_unknown; 5768 for (const OMPClause *C : S.clauses()) { 5769 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5770 // if it is first). 5771 if (C->getClauseKind() != OMPC_seq_cst && 5772 C->getClauseKind() != OMPC_acq_rel && 5773 C->getClauseKind() != OMPC_acquire && 5774 C->getClauseKind() != OMPC_release && 5775 C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) { 5776 Kind = C->getClauseKind(); 5777 break; 5778 } 5779 } 5780 if (!MemOrderingSpecified) { 5781 llvm::AtomicOrdering DefaultOrder = 5782 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5783 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5784 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5785 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5786 Kind == OMPC_capture)) { 5787 AO = DefaultOrder; 5788 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5789 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5790 AO = llvm::AtomicOrdering::Release; 5791 } else if (Kind == OMPC_read) { 5792 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5793 AO = llvm::AtomicOrdering::Acquire; 5794 } 5795 } 5796 } 5797 5798 LexicalScope Scope(*this, S.getSourceRange()); 5799 EmitStopPoint(S.getAssociatedStmt()); 5800 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5801 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5802 S.getBeginLoc()); 5803 } 5804 5805 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5806 const OMPExecutableDirective &S, 5807 const RegionCodeGenTy &CodeGen) { 5808 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5809 CodeGenModule &CGM = CGF.CGM; 5810 5811 // On device emit this construct as inlined code. 5812 if (CGM.getLangOpts().OpenMPIsDevice) { 5813 OMPLexicalScope Scope(CGF, S, OMPD_target); 5814 CGM.getOpenMPRuntime().emitInlinedDirective( 5815 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5816 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5817 }); 5818 return; 5819 } 5820 5821 auto LPCRegion = 5822 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5823 llvm::Function *Fn = nullptr; 5824 llvm::Constant *FnID = nullptr; 5825 5826 const Expr *IfCond = nullptr; 5827 // Check for the at most one if clause associated with the target region. 5828 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5829 if (C->getNameModifier() == OMPD_unknown || 5830 C->getNameModifier() == OMPD_target) { 5831 IfCond = C->getCondition(); 5832 break; 5833 } 5834 } 5835 5836 // Check if we have any device clause associated with the directive. 5837 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5838 nullptr, OMPC_DEVICE_unknown); 5839 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5840 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5841 5842 // Check if we have an if clause whose conditional always evaluates to false 5843 // or if we do not have any targets specified. If so the target region is not 5844 // an offload entry point. 5845 bool IsOffloadEntry = true; 5846 if (IfCond) { 5847 bool Val; 5848 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5849 IsOffloadEntry = false; 5850 } 5851 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5852 IsOffloadEntry = false; 5853 5854 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5855 StringRef ParentName; 5856 // In case we have Ctors/Dtors we use the complete type variant to produce 5857 // the mangling of the device outlined kernel. 5858 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5859 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5860 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5861 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5862 else 5863 ParentName = 5864 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5865 5866 // Emit target region as a standalone region. 5867 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5868 IsOffloadEntry, CodeGen); 5869 OMPLexicalScope Scope(CGF, S, OMPD_task); 5870 auto &&SizeEmitter = 5871 [IsOffloadEntry](CodeGenFunction &CGF, 5872 const OMPLoopDirective &D) -> llvm::Value * { 5873 if (IsOffloadEntry) { 5874 OMPLoopScope(CGF, D); 5875 // Emit calculation of the iterations count. 5876 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5877 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5878 /*isSigned=*/false); 5879 return NumIterations; 5880 } 5881 return nullptr; 5882 }; 5883 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5884 SizeEmitter); 5885 } 5886 5887 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5888 PrePostActionTy &Action) { 5889 Action.Enter(CGF); 5890 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5891 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5892 CGF.EmitOMPPrivateClause(S, PrivateScope); 5893 (void)PrivateScope.Privatize(); 5894 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5895 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5896 5897 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5898 CGF.EnsureInsertPoint(); 5899 } 5900 5901 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5902 StringRef ParentName, 5903 const OMPTargetDirective &S) { 5904 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5905 emitTargetRegion(CGF, S, Action); 5906 }; 5907 llvm::Function *Fn; 5908 llvm::Constant *Addr; 5909 // Emit target region as a standalone region. 5910 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5911 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5912 assert(Fn && Addr && "Target device function emission failed."); 5913 } 5914 5915 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5916 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5917 emitTargetRegion(CGF, S, Action); 5918 }; 5919 emitCommonOMPTargetDirective(*this, S, CodeGen); 5920 } 5921 5922 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5923 const OMPExecutableDirective &S, 5924 OpenMPDirectiveKind InnermostKind, 5925 const RegionCodeGenTy &CodeGen) { 5926 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5927 llvm::Function *OutlinedFn = 5928 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5929 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5930 5931 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5932 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5933 if (NT || TL) { 5934 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5935 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5936 5937 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5938 S.getBeginLoc()); 5939 } 5940 5941 OMPTeamsScope Scope(CGF, S); 5942 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5943 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5944 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5945 CapturedVars); 5946 } 5947 5948 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5949 // Emit teams region as a standalone region. 5950 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5951 Action.Enter(CGF); 5952 OMPPrivateScope PrivateScope(CGF); 5953 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5954 CGF.EmitOMPPrivateClause(S, PrivateScope); 5955 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5956 (void)PrivateScope.Privatize(); 5957 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5958 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5959 }; 5960 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5961 emitPostUpdateForReductionClause(*this, S, 5962 [](CodeGenFunction &) { return nullptr; }); 5963 } 5964 5965 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5966 const OMPTargetTeamsDirective &S) { 5967 auto *CS = S.getCapturedStmt(OMPD_teams); 5968 Action.Enter(CGF); 5969 // Emit teams region as a standalone region. 5970 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5971 Action.Enter(CGF); 5972 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5973 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5974 CGF.EmitOMPPrivateClause(S, PrivateScope); 5975 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5976 (void)PrivateScope.Privatize(); 5977 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5978 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5979 CGF.EmitStmt(CS->getCapturedStmt()); 5980 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5981 }; 5982 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5983 emitPostUpdateForReductionClause(CGF, S, 5984 [](CodeGenFunction &) { return nullptr; }); 5985 } 5986 5987 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5988 CodeGenModule &CGM, StringRef ParentName, 5989 const OMPTargetTeamsDirective &S) { 5990 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5991 emitTargetTeamsRegion(CGF, Action, S); 5992 }; 5993 llvm::Function *Fn; 5994 llvm::Constant *Addr; 5995 // Emit target region as a standalone region. 5996 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5997 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5998 assert(Fn && Addr && "Target device function emission failed."); 5999 } 6000 6001 void CodeGenFunction::EmitOMPTargetTeamsDirective( 6002 const OMPTargetTeamsDirective &S) { 6003 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6004 emitTargetTeamsRegion(CGF, Action, S); 6005 }; 6006 emitCommonOMPTargetDirective(*this, S, CodeGen); 6007 } 6008 6009 static void 6010 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6011 const OMPTargetTeamsDistributeDirective &S) { 6012 Action.Enter(CGF); 6013 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6014 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6015 }; 6016 6017 // Emit teams region as a standalone region. 6018 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6019 PrePostActionTy &Action) { 6020 Action.Enter(CGF); 6021 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6022 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6023 (void)PrivateScope.Privatize(); 6024 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6025 CodeGenDistribute); 6026 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6027 }; 6028 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 6029 emitPostUpdateForReductionClause(CGF, S, 6030 [](CodeGenFunction &) { return nullptr; }); 6031 } 6032 6033 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 6034 CodeGenModule &CGM, StringRef ParentName, 6035 const OMPTargetTeamsDistributeDirective &S) { 6036 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6037 emitTargetTeamsDistributeRegion(CGF, Action, S); 6038 }; 6039 llvm::Function *Fn; 6040 llvm::Constant *Addr; 6041 // Emit target region as a standalone region. 6042 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6043 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6044 assert(Fn && Addr && "Target device function emission failed."); 6045 } 6046 6047 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 6048 const OMPTargetTeamsDistributeDirective &S) { 6049 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6050 emitTargetTeamsDistributeRegion(CGF, Action, S); 6051 }; 6052 emitCommonOMPTargetDirective(*this, S, CodeGen); 6053 } 6054 6055 static void emitTargetTeamsDistributeSimdRegion( 6056 CodeGenFunction &CGF, PrePostActionTy &Action, 6057 const OMPTargetTeamsDistributeSimdDirective &S) { 6058 Action.Enter(CGF); 6059 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6060 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6061 }; 6062 6063 // Emit teams region as a standalone region. 6064 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6065 PrePostActionTy &Action) { 6066 Action.Enter(CGF); 6067 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6068 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6069 (void)PrivateScope.Privatize(); 6070 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6071 CodeGenDistribute); 6072 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6073 }; 6074 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 6075 emitPostUpdateForReductionClause(CGF, S, 6076 [](CodeGenFunction &) { return nullptr; }); 6077 } 6078 6079 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 6080 CodeGenModule &CGM, StringRef ParentName, 6081 const OMPTargetTeamsDistributeSimdDirective &S) { 6082 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6083 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6084 }; 6085 llvm::Function *Fn; 6086 llvm::Constant *Addr; 6087 // Emit target region as a standalone region. 6088 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6089 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6090 assert(Fn && Addr && "Target device function emission failed."); 6091 } 6092 6093 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 6094 const OMPTargetTeamsDistributeSimdDirective &S) { 6095 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6096 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6097 }; 6098 emitCommonOMPTargetDirective(*this, S, CodeGen); 6099 } 6100 6101 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 6102 const OMPTeamsDistributeDirective &S) { 6103 6104 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6105 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6106 }; 6107 6108 // Emit teams region as a standalone region. 6109 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6110 PrePostActionTy &Action) { 6111 Action.Enter(CGF); 6112 OMPPrivateScope PrivateScope(CGF); 6113 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6114 (void)PrivateScope.Privatize(); 6115 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6116 CodeGenDistribute); 6117 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6118 }; 6119 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6120 emitPostUpdateForReductionClause(*this, S, 6121 [](CodeGenFunction &) { return nullptr; }); 6122 } 6123 6124 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 6125 const OMPTeamsDistributeSimdDirective &S) { 6126 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6127 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6128 }; 6129 6130 // Emit teams region as a standalone region. 6131 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6132 PrePostActionTy &Action) { 6133 Action.Enter(CGF); 6134 OMPPrivateScope PrivateScope(CGF); 6135 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6136 (void)PrivateScope.Privatize(); 6137 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 6138 CodeGenDistribute); 6139 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6140 }; 6141 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 6142 emitPostUpdateForReductionClause(*this, S, 6143 [](CodeGenFunction &) { return nullptr; }); 6144 } 6145 6146 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 6147 const OMPTeamsDistributeParallelForDirective &S) { 6148 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6149 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6150 S.getDistInc()); 6151 }; 6152 6153 // Emit teams region as a standalone region. 6154 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6155 PrePostActionTy &Action) { 6156 Action.Enter(CGF); 6157 OMPPrivateScope PrivateScope(CGF); 6158 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6159 (void)PrivateScope.Privatize(); 6160 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6161 CodeGenDistribute); 6162 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6163 }; 6164 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 6165 emitPostUpdateForReductionClause(*this, S, 6166 [](CodeGenFunction &) { return nullptr; }); 6167 } 6168 6169 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 6170 const OMPTeamsDistributeParallelForSimdDirective &S) { 6171 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6172 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6173 S.getDistInc()); 6174 }; 6175 6176 // Emit teams region as a standalone region. 6177 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6178 PrePostActionTy &Action) { 6179 Action.Enter(CGF); 6180 OMPPrivateScope PrivateScope(CGF); 6181 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6182 (void)PrivateScope.Privatize(); 6183 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6184 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6185 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6186 }; 6187 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 6188 CodeGen); 6189 emitPostUpdateForReductionClause(*this, S, 6190 [](CodeGenFunction &) { return nullptr; }); 6191 } 6192 6193 static void emitTargetTeamsDistributeParallelForRegion( 6194 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 6195 PrePostActionTy &Action) { 6196 Action.Enter(CGF); 6197 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6198 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6199 S.getDistInc()); 6200 }; 6201 6202 // Emit teams region as a standalone region. 6203 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6204 PrePostActionTy &Action) { 6205 Action.Enter(CGF); 6206 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6207 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6208 (void)PrivateScope.Privatize(); 6209 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6210 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6211 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6212 }; 6213 6214 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 6215 CodeGenTeams); 6216 emitPostUpdateForReductionClause(CGF, S, 6217 [](CodeGenFunction &) { return nullptr; }); 6218 } 6219 6220 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 6221 CodeGenModule &CGM, StringRef ParentName, 6222 const OMPTargetTeamsDistributeParallelForDirective &S) { 6223 // Emit SPMD target teams distribute parallel for region as a standalone 6224 // region. 6225 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6226 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6227 }; 6228 llvm::Function *Fn; 6229 llvm::Constant *Addr; 6230 // Emit target region as a standalone region. 6231 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6232 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6233 assert(Fn && Addr && "Target device function emission failed."); 6234 } 6235 6236 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 6237 const OMPTargetTeamsDistributeParallelForDirective &S) { 6238 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6239 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6240 }; 6241 emitCommonOMPTargetDirective(*this, S, CodeGen); 6242 } 6243 6244 static void emitTargetTeamsDistributeParallelForSimdRegion( 6245 CodeGenFunction &CGF, 6246 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 6247 PrePostActionTy &Action) { 6248 Action.Enter(CGF); 6249 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6250 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6251 S.getDistInc()); 6252 }; 6253 6254 // Emit teams region as a standalone region. 6255 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6256 PrePostActionTy &Action) { 6257 Action.Enter(CGF); 6258 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6259 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6260 (void)PrivateScope.Privatize(); 6261 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6262 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6263 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6264 }; 6265 6266 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 6267 CodeGenTeams); 6268 emitPostUpdateForReductionClause(CGF, S, 6269 [](CodeGenFunction &) { return nullptr; }); 6270 } 6271 6272 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 6273 CodeGenModule &CGM, StringRef ParentName, 6274 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6275 // Emit SPMD target teams distribute parallel for simd region as a standalone 6276 // region. 6277 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6278 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6279 }; 6280 llvm::Function *Fn; 6281 llvm::Constant *Addr; 6282 // Emit target region as a standalone region. 6283 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6284 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6285 assert(Fn && Addr && "Target device function emission failed."); 6286 } 6287 6288 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 6289 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6290 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6291 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6292 }; 6293 emitCommonOMPTargetDirective(*this, S, CodeGen); 6294 } 6295 6296 void CodeGenFunction::EmitOMPCancellationPointDirective( 6297 const OMPCancellationPointDirective &S) { 6298 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 6299 S.getCancelRegion()); 6300 } 6301 6302 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 6303 const Expr *IfCond = nullptr; 6304 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6305 if (C->getNameModifier() == OMPD_unknown || 6306 C->getNameModifier() == OMPD_cancel) { 6307 IfCond = C->getCondition(); 6308 break; 6309 } 6310 } 6311 if (CGM.getLangOpts().OpenMPIRBuilder) { 6312 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6313 // TODO: This check is necessary as we only generate `omp parallel` through 6314 // the OpenMPIRBuilder for now. 6315 if (S.getCancelRegion() == OMPD_parallel) { 6316 llvm::Value *IfCondition = nullptr; 6317 if (IfCond) 6318 IfCondition = EmitScalarExpr(IfCond, 6319 /*IgnoreResultAssign=*/true); 6320 return Builder.restoreIP( 6321 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion())); 6322 } 6323 } 6324 6325 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 6326 S.getCancelRegion()); 6327 } 6328 6329 CodeGenFunction::JumpDest 6330 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 6331 if (Kind == OMPD_parallel || Kind == OMPD_task || 6332 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 6333 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 6334 return ReturnBlock; 6335 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 6336 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 6337 Kind == OMPD_distribute_parallel_for || 6338 Kind == OMPD_target_parallel_for || 6339 Kind == OMPD_teams_distribute_parallel_for || 6340 Kind == OMPD_target_teams_distribute_parallel_for); 6341 return OMPCancelStack.getExitBlock(); 6342 } 6343 6344 void CodeGenFunction::EmitOMPUseDevicePtrClause( 6345 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 6346 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6347 auto OrigVarIt = C.varlist_begin(); 6348 auto InitIt = C.inits().begin(); 6349 for (const Expr *PvtVarIt : C.private_copies()) { 6350 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 6351 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 6352 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 6353 6354 // In order to identify the right initializer we need to match the 6355 // declaration used by the mapping logic. In some cases we may get 6356 // OMPCapturedExprDecl that refers to the original declaration. 6357 const ValueDecl *MatchingVD = OrigVD; 6358 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6359 // OMPCapturedExprDecl are used to privative fields of the current 6360 // structure. 6361 const auto *ME = cast<MemberExpr>(OED->getInit()); 6362 assert(isa<CXXThisExpr>(ME->getBase()) && 6363 "Base should be the current struct!"); 6364 MatchingVD = ME->getMemberDecl(); 6365 } 6366 6367 // If we don't have information about the current list item, move on to 6368 // the next one. 6369 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6370 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6371 continue; 6372 6373 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 6374 InitAddrIt, InitVD, 6375 PvtVD]() { 6376 // Initialize the temporary initialization variable with the address we 6377 // get from the runtime library. We have to cast the source address 6378 // because it is always a void *. References are materialized in the 6379 // privatization scope, so the initialization here disregards the fact 6380 // the original variable is a reference. 6381 QualType AddrQTy = 6382 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 6383 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 6384 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 6385 setAddrOfLocalVar(InitVD, InitAddr); 6386 6387 // Emit private declaration, it will be initialized by the value we 6388 // declaration we just added to the local declarations map. 6389 EmitDecl(*PvtVD); 6390 6391 // The initialization variables reached its purpose in the emission 6392 // of the previous declaration, so we don't need it anymore. 6393 LocalDeclMap.erase(InitVD); 6394 6395 // Return the address of the private variable. 6396 return GetAddrOfLocalVar(PvtVD); 6397 }); 6398 assert(IsRegistered && "firstprivate var already registered as private"); 6399 // Silence the warning about unused variable. 6400 (void)IsRegistered; 6401 6402 ++OrigVarIt; 6403 ++InitIt; 6404 } 6405 } 6406 6407 static const VarDecl *getBaseDecl(const Expr *Ref) { 6408 const Expr *Base = Ref->IgnoreParenImpCasts(); 6409 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 6410 Base = OASE->getBase()->IgnoreParenImpCasts(); 6411 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 6412 Base = ASE->getBase()->IgnoreParenImpCasts(); 6413 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 6414 } 6415 6416 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 6417 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 6418 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6419 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 6420 for (const Expr *Ref : C.varlists()) { 6421 const VarDecl *OrigVD = getBaseDecl(Ref); 6422 if (!Processed.insert(OrigVD).second) 6423 continue; 6424 // In order to identify the right initializer we need to match the 6425 // declaration used by the mapping logic. In some cases we may get 6426 // OMPCapturedExprDecl that refers to the original declaration. 6427 const ValueDecl *MatchingVD = OrigVD; 6428 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6429 // OMPCapturedExprDecl are used to privative fields of the current 6430 // structure. 6431 const auto *ME = cast<MemberExpr>(OED->getInit()); 6432 assert(isa<CXXThisExpr>(ME->getBase()) && 6433 "Base should be the current struct!"); 6434 MatchingVD = ME->getMemberDecl(); 6435 } 6436 6437 // If we don't have information about the current list item, move on to 6438 // the next one. 6439 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6440 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6441 continue; 6442 6443 Address PrivAddr = InitAddrIt->getSecond(); 6444 // For declrefs and variable length array need to load the pointer for 6445 // correct mapping, since the pointer to the data was passed to the runtime. 6446 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 6447 MatchingVD->getType()->isArrayType()) 6448 PrivAddr = 6449 EmitLoadOfPointer(PrivAddr, getContext() 6450 .getPointerType(OrigVD->getType()) 6451 ->castAs<PointerType>()); 6452 llvm::Type *RealTy = 6453 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 6454 ->getPointerTo(); 6455 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 6456 6457 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 6458 } 6459 } 6460 6461 // Generate the instructions for '#pragma omp target data' directive. 6462 void CodeGenFunction::EmitOMPTargetDataDirective( 6463 const OMPTargetDataDirective &S) { 6464 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true, 6465 /*SeparateBeginEndCalls=*/true); 6466 6467 // Create a pre/post action to signal the privatization of the device pointer. 6468 // This action can be replaced by the OpenMP runtime code generation to 6469 // deactivate privatization. 6470 bool PrivatizeDevicePointers = false; 6471 class DevicePointerPrivActionTy : public PrePostActionTy { 6472 bool &PrivatizeDevicePointers; 6473 6474 public: 6475 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 6476 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 6477 void Enter(CodeGenFunction &CGF) override { 6478 PrivatizeDevicePointers = true; 6479 } 6480 }; 6481 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 6482 6483 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 6484 CodeGenFunction &CGF, PrePostActionTy &Action) { 6485 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6486 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 6487 }; 6488 6489 // Codegen that selects whether to generate the privatization code or not. 6490 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 6491 &InnermostCodeGen](CodeGenFunction &CGF, 6492 PrePostActionTy &Action) { 6493 RegionCodeGenTy RCG(InnermostCodeGen); 6494 PrivatizeDevicePointers = false; 6495 6496 // Call the pre-action to change the status of PrivatizeDevicePointers if 6497 // needed. 6498 Action.Enter(CGF); 6499 6500 if (PrivatizeDevicePointers) { 6501 OMPPrivateScope PrivateScope(CGF); 6502 // Emit all instances of the use_device_ptr clause. 6503 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 6504 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 6505 Info.CaptureDeviceAddrMap); 6506 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 6507 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 6508 Info.CaptureDeviceAddrMap); 6509 (void)PrivateScope.Privatize(); 6510 RCG(CGF); 6511 } else { 6512 OMPLexicalScope Scope(CGF, S, OMPD_unknown); 6513 RCG(CGF); 6514 } 6515 }; 6516 6517 // Forward the provided action to the privatization codegen. 6518 RegionCodeGenTy PrivRCG(PrivCodeGen); 6519 PrivRCG.setAction(Action); 6520 6521 // Notwithstanding the body of the region is emitted as inlined directive, 6522 // we don't use an inline scope as changes in the references inside the 6523 // region are expected to be visible outside, so we do not privative them. 6524 OMPLexicalScope Scope(CGF, S); 6525 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 6526 PrivRCG); 6527 }; 6528 6529 RegionCodeGenTy RCG(CodeGen); 6530 6531 // If we don't have target devices, don't bother emitting the data mapping 6532 // code. 6533 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6534 RCG(*this); 6535 return; 6536 } 6537 6538 // Check if we have any if clause associated with the directive. 6539 const Expr *IfCond = nullptr; 6540 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6541 IfCond = C->getCondition(); 6542 6543 // Check if we have any device clause associated with the directive. 6544 const Expr *Device = nullptr; 6545 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6546 Device = C->getDevice(); 6547 6548 // Set the action to signal privatization of device pointers. 6549 RCG.setAction(PrivAction); 6550 6551 // Emit region code. 6552 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6553 Info); 6554 } 6555 6556 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6557 const OMPTargetEnterDataDirective &S) { 6558 // If we don't have target devices, don't bother emitting the data mapping 6559 // code. 6560 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6561 return; 6562 6563 // Check if we have any if clause associated with the directive. 6564 const Expr *IfCond = nullptr; 6565 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6566 IfCond = C->getCondition(); 6567 6568 // Check if we have any device clause associated with the directive. 6569 const Expr *Device = nullptr; 6570 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6571 Device = C->getDevice(); 6572 6573 OMPLexicalScope Scope(*this, S, OMPD_task); 6574 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6575 } 6576 6577 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6578 const OMPTargetExitDataDirective &S) { 6579 // If we don't have target devices, don't bother emitting the data mapping 6580 // code. 6581 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6582 return; 6583 6584 // Check if we have any if clause associated with the directive. 6585 const Expr *IfCond = nullptr; 6586 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6587 IfCond = C->getCondition(); 6588 6589 // Check if we have any device clause associated with the directive. 6590 const Expr *Device = nullptr; 6591 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6592 Device = C->getDevice(); 6593 6594 OMPLexicalScope Scope(*this, S, OMPD_task); 6595 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6596 } 6597 6598 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6599 const OMPTargetParallelDirective &S, 6600 PrePostActionTy &Action) { 6601 // Get the captured statement associated with the 'parallel' region. 6602 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6603 Action.Enter(CGF); 6604 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6605 Action.Enter(CGF); 6606 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6607 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6608 CGF.EmitOMPPrivateClause(S, PrivateScope); 6609 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6610 (void)PrivateScope.Privatize(); 6611 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6612 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6613 // TODO: Add support for clauses. 6614 CGF.EmitStmt(CS->getCapturedStmt()); 6615 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6616 }; 6617 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6618 emitEmptyBoundParameters); 6619 emitPostUpdateForReductionClause(CGF, S, 6620 [](CodeGenFunction &) { return nullptr; }); 6621 } 6622 6623 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6624 CodeGenModule &CGM, StringRef ParentName, 6625 const OMPTargetParallelDirective &S) { 6626 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6627 emitTargetParallelRegion(CGF, S, Action); 6628 }; 6629 llvm::Function *Fn; 6630 llvm::Constant *Addr; 6631 // Emit target region as a standalone region. 6632 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6633 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6634 assert(Fn && Addr && "Target device function emission failed."); 6635 } 6636 6637 void CodeGenFunction::EmitOMPTargetParallelDirective( 6638 const OMPTargetParallelDirective &S) { 6639 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6640 emitTargetParallelRegion(CGF, S, Action); 6641 }; 6642 emitCommonOMPTargetDirective(*this, S, CodeGen); 6643 } 6644 6645 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6646 const OMPTargetParallelForDirective &S, 6647 PrePostActionTy &Action) { 6648 Action.Enter(CGF); 6649 // Emit directive as a combined directive that consists of two implicit 6650 // directives: 'parallel' with 'for' directive. 6651 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6652 Action.Enter(CGF); 6653 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6654 CGF, OMPD_target_parallel_for, S.hasCancel()); 6655 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6656 emitDispatchForLoopBounds); 6657 }; 6658 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6659 emitEmptyBoundParameters); 6660 } 6661 6662 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6663 CodeGenModule &CGM, StringRef ParentName, 6664 const OMPTargetParallelForDirective &S) { 6665 // Emit SPMD target parallel for region as a standalone region. 6666 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6667 emitTargetParallelForRegion(CGF, S, Action); 6668 }; 6669 llvm::Function *Fn; 6670 llvm::Constant *Addr; 6671 // Emit target region as a standalone region. 6672 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6673 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6674 assert(Fn && Addr && "Target device function emission failed."); 6675 } 6676 6677 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6678 const OMPTargetParallelForDirective &S) { 6679 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6680 emitTargetParallelForRegion(CGF, S, Action); 6681 }; 6682 emitCommonOMPTargetDirective(*this, S, CodeGen); 6683 } 6684 6685 static void 6686 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6687 const OMPTargetParallelForSimdDirective &S, 6688 PrePostActionTy &Action) { 6689 Action.Enter(CGF); 6690 // Emit directive as a combined directive that consists of two implicit 6691 // directives: 'parallel' with 'for' directive. 6692 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6693 Action.Enter(CGF); 6694 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6695 emitDispatchForLoopBounds); 6696 }; 6697 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6698 emitEmptyBoundParameters); 6699 } 6700 6701 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6702 CodeGenModule &CGM, StringRef ParentName, 6703 const OMPTargetParallelForSimdDirective &S) { 6704 // Emit SPMD target parallel for region as a standalone region. 6705 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6706 emitTargetParallelForSimdRegion(CGF, S, Action); 6707 }; 6708 llvm::Function *Fn; 6709 llvm::Constant *Addr; 6710 // Emit target region as a standalone region. 6711 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6712 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6713 assert(Fn && Addr && "Target device function emission failed."); 6714 } 6715 6716 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6717 const OMPTargetParallelForSimdDirective &S) { 6718 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6719 emitTargetParallelForSimdRegion(CGF, S, Action); 6720 }; 6721 emitCommonOMPTargetDirective(*this, S, CodeGen); 6722 } 6723 6724 /// Emit a helper variable and return corresponding lvalue. 6725 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6726 const ImplicitParamDecl *PVD, 6727 CodeGenFunction::OMPPrivateScope &Privates) { 6728 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6729 Privates.addPrivate(VDecl, 6730 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6731 } 6732 6733 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6734 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6735 // Emit outlined function for task construct. 6736 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6737 Address CapturedStruct = Address::invalid(); 6738 { 6739 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6740 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6741 } 6742 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6743 const Expr *IfCond = nullptr; 6744 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6745 if (C->getNameModifier() == OMPD_unknown || 6746 C->getNameModifier() == OMPD_taskloop) { 6747 IfCond = C->getCondition(); 6748 break; 6749 } 6750 } 6751 6752 OMPTaskDataTy Data; 6753 // Check if taskloop must be emitted without taskgroup. 6754 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6755 // TODO: Check if we should emit tied or untied task. 6756 Data.Tied = true; 6757 // Set scheduling for taskloop 6758 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6759 // grainsize clause 6760 Data.Schedule.setInt(/*IntVal=*/false); 6761 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6762 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6763 // num_tasks clause 6764 Data.Schedule.setInt(/*IntVal=*/true); 6765 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6766 } 6767 6768 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6769 // if (PreCond) { 6770 // for (IV in 0..LastIteration) BODY; 6771 // <Final counter/linear vars updates>; 6772 // } 6773 // 6774 6775 // Emit: if (PreCond) - begin. 6776 // If the condition constant folds and can be elided, avoid emitting the 6777 // whole loop. 6778 bool CondConstant; 6779 llvm::BasicBlock *ContBlock = nullptr; 6780 OMPLoopScope PreInitScope(CGF, S); 6781 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6782 if (!CondConstant) 6783 return; 6784 } else { 6785 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6786 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6787 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6788 CGF.getProfileCount(&S)); 6789 CGF.EmitBlock(ThenBlock); 6790 CGF.incrementProfileCounter(&S); 6791 } 6792 6793 (void)CGF.EmitOMPLinearClauseInit(S); 6794 6795 OMPPrivateScope LoopScope(CGF); 6796 // Emit helper vars inits. 6797 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6798 auto *I = CS->getCapturedDecl()->param_begin(); 6799 auto *LBP = std::next(I, LowerBound); 6800 auto *UBP = std::next(I, UpperBound); 6801 auto *STP = std::next(I, Stride); 6802 auto *LIP = std::next(I, LastIter); 6803 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6804 LoopScope); 6805 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6806 LoopScope); 6807 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6808 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6809 LoopScope); 6810 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6811 CGF.EmitOMPLinearClause(S, LoopScope); 6812 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6813 (void)LoopScope.Privatize(); 6814 // Emit the loop iteration variable. 6815 const Expr *IVExpr = S.getIterationVariable(); 6816 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6817 CGF.EmitVarDecl(*IVDecl); 6818 CGF.EmitIgnoredExpr(S.getInit()); 6819 6820 // Emit the iterations count variable. 6821 // If it is not a variable, Sema decided to calculate iterations count on 6822 // each iteration (e.g., it is foldable into a constant). 6823 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6824 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6825 // Emit calculation of the iterations count. 6826 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6827 } 6828 6829 { 6830 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6831 emitCommonSimdLoop( 6832 CGF, S, 6833 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6834 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6835 CGF.EmitOMPSimdInit(S); 6836 }, 6837 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6838 CGF.EmitOMPInnerLoop( 6839 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6840 [&S](CodeGenFunction &CGF) { 6841 emitOMPLoopBodyWithStopPoint(CGF, S, 6842 CodeGenFunction::JumpDest()); 6843 }, 6844 [](CodeGenFunction &) {}); 6845 }); 6846 } 6847 // Emit: if (PreCond) - end. 6848 if (ContBlock) { 6849 CGF.EmitBranch(ContBlock); 6850 CGF.EmitBlock(ContBlock, true); 6851 } 6852 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6853 if (HasLastprivateClause) { 6854 CGF.EmitOMPLastprivateClauseFinal( 6855 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6856 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6857 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6858 (*LIP)->getType(), S.getBeginLoc()))); 6859 } 6860 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6861 return CGF.Builder.CreateIsNotNull( 6862 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6863 (*LIP)->getType(), S.getBeginLoc())); 6864 }); 6865 }; 6866 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6867 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6868 const OMPTaskDataTy &Data) { 6869 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6870 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6871 OMPLoopScope PreInitScope(CGF, S); 6872 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6873 OutlinedFn, SharedsTy, 6874 CapturedStruct, IfCond, Data); 6875 }; 6876 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6877 CodeGen); 6878 }; 6879 if (Data.Nogroup) { 6880 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6881 } else { 6882 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6883 *this, 6884 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6885 PrePostActionTy &Action) { 6886 Action.Enter(CGF); 6887 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6888 Data); 6889 }, 6890 S.getBeginLoc()); 6891 } 6892 } 6893 6894 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6895 auto LPCRegion = 6896 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6897 EmitOMPTaskLoopBasedDirective(S); 6898 } 6899 6900 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6901 const OMPTaskLoopSimdDirective &S) { 6902 auto LPCRegion = 6903 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6904 OMPLexicalScope Scope(*this, S); 6905 EmitOMPTaskLoopBasedDirective(S); 6906 } 6907 6908 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6909 const OMPMasterTaskLoopDirective &S) { 6910 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6911 Action.Enter(CGF); 6912 EmitOMPTaskLoopBasedDirective(S); 6913 }; 6914 auto LPCRegion = 6915 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6916 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6917 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6918 } 6919 6920 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6921 const OMPMasterTaskLoopSimdDirective &S) { 6922 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6923 Action.Enter(CGF); 6924 EmitOMPTaskLoopBasedDirective(S); 6925 }; 6926 auto LPCRegion = 6927 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6928 OMPLexicalScope Scope(*this, S); 6929 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6930 } 6931 6932 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6933 const OMPParallelMasterTaskLoopDirective &S) { 6934 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6935 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6936 PrePostActionTy &Action) { 6937 Action.Enter(CGF); 6938 CGF.EmitOMPTaskLoopBasedDirective(S); 6939 }; 6940 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6941 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6942 S.getBeginLoc()); 6943 }; 6944 auto LPCRegion = 6945 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6946 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6947 emitEmptyBoundParameters); 6948 } 6949 6950 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6951 const OMPParallelMasterTaskLoopSimdDirective &S) { 6952 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6953 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6954 PrePostActionTy &Action) { 6955 Action.Enter(CGF); 6956 CGF.EmitOMPTaskLoopBasedDirective(S); 6957 }; 6958 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6959 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6960 S.getBeginLoc()); 6961 }; 6962 auto LPCRegion = 6963 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6964 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6965 emitEmptyBoundParameters); 6966 } 6967 6968 // Generate the instructions for '#pragma omp target update' directive. 6969 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6970 const OMPTargetUpdateDirective &S) { 6971 // If we don't have target devices, don't bother emitting the data mapping 6972 // code. 6973 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6974 return; 6975 6976 // Check if we have any if clause associated with the directive. 6977 const Expr *IfCond = nullptr; 6978 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6979 IfCond = C->getCondition(); 6980 6981 // Check if we have any device clause associated with the directive. 6982 const Expr *Device = nullptr; 6983 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6984 Device = C->getDevice(); 6985 6986 OMPLexicalScope Scope(*this, S, OMPD_task); 6987 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6988 } 6989 6990 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6991 const OMPExecutableDirective &D) { 6992 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6993 EmitOMPScanDirective(*SD); 6994 return; 6995 } 6996 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6997 return; 6998 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6999 OMPPrivateScope GlobalsScope(CGF); 7000 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 7001 // Capture global firstprivates to avoid crash. 7002 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 7003 for (const Expr *Ref : C->varlists()) { 7004 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 7005 if (!DRE) 7006 continue; 7007 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 7008 if (!VD || VD->hasLocalStorage()) 7009 continue; 7010 if (!CGF.LocalDeclMap.count(VD)) { 7011 LValue GlobLVal = CGF.EmitLValue(Ref); 7012 GlobalsScope.addPrivate( 7013 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7014 } 7015 } 7016 } 7017 } 7018 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 7019 (void)GlobalsScope.Privatize(); 7020 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 7021 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 7022 } else { 7023 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 7024 for (const Expr *E : LD->counters()) { 7025 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 7026 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 7027 LValue GlobLVal = CGF.EmitLValue(E); 7028 GlobalsScope.addPrivate( 7029 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7030 } 7031 if (isa<OMPCapturedExprDecl>(VD)) { 7032 // Emit only those that were not explicitly referenced in clauses. 7033 if (!CGF.LocalDeclMap.count(VD)) 7034 CGF.EmitVarDecl(*VD); 7035 } 7036 } 7037 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 7038 if (!C->getNumForLoops()) 7039 continue; 7040 for (unsigned I = LD->getLoopsNumber(), 7041 E = C->getLoopNumIterations().size(); 7042 I < E; ++I) { 7043 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 7044 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 7045 // Emit only those that were not explicitly referenced in clauses. 7046 if (!CGF.LocalDeclMap.count(VD)) 7047 CGF.EmitVarDecl(*VD); 7048 } 7049 } 7050 } 7051 } 7052 (void)GlobalsScope.Privatize(); 7053 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 7054 } 7055 }; 7056 if (D.getDirectiveKind() == OMPD_atomic || 7057 D.getDirectiveKind() == OMPD_critical || 7058 D.getDirectiveKind() == OMPD_section || 7059 D.getDirectiveKind() == OMPD_master || 7060 D.getDirectiveKind() == OMPD_masked) { 7061 EmitStmt(D.getAssociatedStmt()); 7062 } else { 7063 auto LPCRegion = 7064 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 7065 OMPSimdLexicalScope Scope(*this, D); 7066 CGM.getOpenMPRuntime().emitInlinedDirective( 7067 *this, 7068 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 7069 : D.getDirectiveKind(), 7070 CodeGen); 7071 } 7072 // Check for outer lastprivate conditional update. 7073 checkForLastprivateConditionalUpdate(*this, D); 7074 } 7075