1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/AST/StmtVisitor.h" 25 #include "clang/Basic/OpenMPKinds.h" 26 #include "clang/Basic/PrettyStackTrace.h" 27 #include "llvm/Frontend/OpenMP/OMPConstants.h" 28 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/Support/AtomicOrdering.h" 32 using namespace clang; 33 using namespace CodeGen; 34 using namespace llvm::omp; 35 36 static const VarDecl *getBaseDecl(const Expr *Ref); 37 38 namespace { 39 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 40 /// for captured expressions. 41 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 42 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 43 for (const auto *C : S.clauses()) { 44 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 45 if (const auto *PreInit = 46 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 47 for (const auto *I : PreInit->decls()) { 48 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 49 CGF.EmitVarDecl(cast<VarDecl>(*I)); 50 } else { 51 CodeGenFunction::AutoVarEmission Emission = 52 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 53 CGF.EmitAutoVarCleanups(Emission); 54 } 55 } 56 } 57 } 58 } 59 } 60 CodeGenFunction::OMPPrivateScope InlinedShareds; 61 62 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 63 return CGF.LambdaCaptureFields.lookup(VD) || 64 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 65 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 66 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 67 } 68 69 public: 70 OMPLexicalScope( 71 CodeGenFunction &CGF, const OMPExecutableDirective &S, 72 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 73 const bool EmitPreInitStmt = true) 74 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 75 InlinedShareds(CGF) { 76 if (EmitPreInitStmt) 77 emitPreInitStmt(CGF, S); 78 if (!CapturedRegion.hasValue()) 79 return; 80 assert(S.hasAssociatedStmt() && 81 "Expected associated statement for inlined directive."); 82 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 83 for (const auto &C : CS->captures()) { 84 if (C.capturesVariable() || C.capturesVariableByCopy()) { 85 auto *VD = C.getCapturedVar(); 86 assert(VD == VD->getCanonicalDecl() && 87 "Canonical decl must be captured."); 88 DeclRefExpr DRE( 89 CGF.getContext(), const_cast<VarDecl *>(VD), 90 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 91 InlinedShareds.isGlobalVarCaptured(VD)), 92 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 93 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 94 return CGF.EmitLValue(&DRE).getAddress(CGF); 95 }); 96 } 97 } 98 (void)InlinedShareds.Privatize(); 99 } 100 }; 101 102 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 103 /// for captured expressions. 104 class OMPParallelScope final : public OMPLexicalScope { 105 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 106 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 107 return !(isOpenMPTargetExecutionDirective(Kind) || 108 isOpenMPLoopBoundSharingDirective(Kind)) && 109 isOpenMPParallelDirective(Kind); 110 } 111 112 public: 113 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 114 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 115 EmitPreInitStmt(S)) {} 116 }; 117 118 /// Lexical scope for OpenMP teams construct, that handles correct codegen 119 /// for captured expressions. 120 class OMPTeamsScope final : public OMPLexicalScope { 121 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 122 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 123 return !isOpenMPTargetExecutionDirective(Kind) && 124 isOpenMPTeamsDirective(Kind); 125 } 126 127 public: 128 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 129 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 130 EmitPreInitStmt(S)) {} 131 }; 132 133 /// Private scope for OpenMP loop-based directives, that supports capturing 134 /// of used expression from loop statement. 135 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 136 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) { 137 const DeclStmt *PreInits; 138 CodeGenFunction::OMPMapVars PreCondVars; 139 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 140 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 141 for (const auto *E : LD->counters()) { 142 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 143 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 144 (void)PreCondVars.setVarAddr( 145 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 146 } 147 // Mark private vars as undefs. 148 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) { 149 for (const Expr *IRef : C->varlists()) { 150 const auto *OrigVD = 151 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 152 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 153 (void)PreCondVars.setVarAddr( 154 CGF, OrigVD, 155 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem( 156 CGF.getContext().getPointerType( 157 OrigVD->getType().getNonReferenceType()))), 158 CGF.getContext().getDeclAlign(OrigVD))); 159 } 160 } 161 } 162 (void)PreCondVars.apply(CGF); 163 // Emit init, __range and __end variables for C++ range loops. 164 (void)OMPLoopBasedDirective::doForAllLoops( 165 LD->getInnermostCapturedStmt()->getCapturedStmt(), 166 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(), 167 [&CGF](unsigned Cnt, const Stmt *CurStmt) { 168 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) { 169 if (const Stmt *Init = CXXFor->getInit()) 170 CGF.EmitStmt(Init); 171 CGF.EmitStmt(CXXFor->getRangeStmt()); 172 CGF.EmitStmt(CXXFor->getEndStmt()); 173 } 174 return false; 175 }); 176 PreInits = cast_or_null<DeclStmt>(LD->getPreInits()); 177 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) { 178 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits()); 179 } else { 180 llvm_unreachable("Unknown loop-based directive kind."); 181 } 182 if (PreInits) { 183 for (const auto *I : PreInits->decls()) 184 CGF.EmitVarDecl(cast<VarDecl>(*I)); 185 } 186 PreCondVars.restore(CGF); 187 } 188 189 public: 190 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) 191 : CodeGenFunction::RunCleanupsScope(CGF) { 192 emitPreInitStmt(CGF, S); 193 } 194 }; 195 196 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 197 CodeGenFunction::OMPPrivateScope InlinedShareds; 198 199 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 200 return CGF.LambdaCaptureFields.lookup(VD) || 201 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 202 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 203 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 204 } 205 206 public: 207 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 208 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 209 InlinedShareds(CGF) { 210 for (const auto *C : S.clauses()) { 211 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 212 if (const auto *PreInit = 213 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 214 for (const auto *I : PreInit->decls()) { 215 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 216 CGF.EmitVarDecl(cast<VarDecl>(*I)); 217 } else { 218 CodeGenFunction::AutoVarEmission Emission = 219 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 220 CGF.EmitAutoVarCleanups(Emission); 221 } 222 } 223 } 224 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 225 for (const Expr *E : UDP->varlists()) { 226 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 227 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 228 CGF.EmitVarDecl(*OED); 229 } 230 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 231 for (const Expr *E : UDP->varlists()) { 232 const Decl *D = getBaseDecl(E); 233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 234 CGF.EmitVarDecl(*OED); 235 } 236 } 237 } 238 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 239 CGF.EmitOMPPrivateClause(S, InlinedShareds); 240 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 241 if (const Expr *E = TG->getReductionRef()) 242 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 243 } 244 // Temp copy arrays for inscan reductions should not be emitted as they are 245 // not used in simd only mode. 246 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps; 247 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 248 if (C->getModifier() != OMPC_REDUCTION_inscan) 249 continue; 250 for (const Expr *E : C->copy_array_temps()) 251 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl()); 252 } 253 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 254 while (CS) { 255 for (auto &C : CS->captures()) { 256 if (C.capturesVariable() || C.capturesVariableByCopy()) { 257 auto *VD = C.getCapturedVar(); 258 if (CopyArrayTemps.contains(VD)) 259 continue; 260 assert(VD == VD->getCanonicalDecl() && 261 "Canonical decl must be captured."); 262 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 263 isCapturedVar(CGF, VD) || 264 (CGF.CapturedStmtInfo && 265 InlinedShareds.isGlobalVarCaptured(VD)), 266 VD->getType().getNonReferenceType(), VK_LValue, 267 C.getLocation()); 268 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 269 return CGF.EmitLValue(&DRE).getAddress(CGF); 270 }); 271 } 272 } 273 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 274 } 275 (void)InlinedShareds.Privatize(); 276 } 277 }; 278 279 } // namespace 280 281 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 282 const OMPExecutableDirective &S, 283 const RegionCodeGenTy &CodeGen); 284 285 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 286 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 287 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 288 OrigVD = OrigVD->getCanonicalDecl(); 289 bool IsCaptured = 290 LambdaCaptureFields.lookup(OrigVD) || 291 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 292 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 293 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 294 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 295 return EmitLValue(&DRE); 296 } 297 } 298 return EmitLValue(E); 299 } 300 301 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 302 ASTContext &C = getContext(); 303 llvm::Value *Size = nullptr; 304 auto SizeInChars = C.getTypeSizeInChars(Ty); 305 if (SizeInChars.isZero()) { 306 // getTypeSizeInChars() returns 0 for a VLA. 307 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 308 VlaSizePair VlaSize = getVLASize(VAT); 309 Ty = VlaSize.Type; 310 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 311 : VlaSize.NumElts; 312 } 313 SizeInChars = C.getTypeSizeInChars(Ty); 314 if (SizeInChars.isZero()) 315 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 316 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 317 } 318 return CGM.getSize(SizeInChars); 319 } 320 321 void CodeGenFunction::GenerateOpenMPCapturedVars( 322 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 323 const RecordDecl *RD = S.getCapturedRecordDecl(); 324 auto CurField = RD->field_begin(); 325 auto CurCap = S.captures().begin(); 326 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 327 E = S.capture_init_end(); 328 I != E; ++I, ++CurField, ++CurCap) { 329 if (CurField->hasCapturedVLAType()) { 330 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 331 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 332 CapturedVars.push_back(Val); 333 } else if (CurCap->capturesThis()) { 334 CapturedVars.push_back(CXXThisValue); 335 } else if (CurCap->capturesVariableByCopy()) { 336 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 337 338 // If the field is not a pointer, we need to save the actual value 339 // and load it as a void pointer. 340 if (!CurField->getType()->isAnyPointerType()) { 341 ASTContext &Ctx = getContext(); 342 Address DstAddr = CreateMemTemp( 343 Ctx.getUIntPtrType(), 344 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 345 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 346 347 llvm::Value *SrcAddrVal = EmitScalarConversion( 348 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 349 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 350 LValue SrcLV = 351 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 352 353 // Store the value using the source type pointer. 354 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 355 356 // Load the value using the destination type pointer. 357 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 358 } 359 CapturedVars.push_back(CV); 360 } else { 361 assert(CurCap->capturesVariable() && "Expected capture by reference."); 362 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 363 } 364 } 365 } 366 367 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 368 QualType DstType, StringRef Name, 369 LValue AddrLV) { 370 ASTContext &Ctx = CGF.getContext(); 371 372 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 373 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 374 Ctx.getPointerType(DstType), Loc); 375 Address TmpAddr = 376 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 377 .getAddress(CGF); 378 return TmpAddr; 379 } 380 381 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 382 if (T->isLValueReferenceType()) 383 return C.getLValueReferenceType( 384 getCanonicalParamType(C, T.getNonReferenceType()), 385 /*SpelledAsLValue=*/false); 386 if (T->isPointerType()) 387 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 388 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 389 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 390 return getCanonicalParamType(C, VLA->getElementType()); 391 if (!A->isVariablyModifiedType()) 392 return C.getCanonicalType(T); 393 } 394 return C.getCanonicalParamType(T); 395 } 396 397 namespace { 398 /// Contains required data for proper outlined function codegen. 399 struct FunctionOptions { 400 /// Captured statement for which the function is generated. 401 const CapturedStmt *S = nullptr; 402 /// true if cast to/from UIntPtr is required for variables captured by 403 /// value. 404 const bool UIntPtrCastRequired = true; 405 /// true if only casted arguments must be registered as local args or VLA 406 /// sizes. 407 const bool RegisterCastedArgsOnly = false; 408 /// Name of the generated function. 409 const StringRef FunctionName; 410 /// Location of the non-debug version of the outlined function. 411 SourceLocation Loc; 412 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 413 bool RegisterCastedArgsOnly, StringRef FunctionName, 414 SourceLocation Loc) 415 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 416 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 417 FunctionName(FunctionName), Loc(Loc) {} 418 }; 419 } // namespace 420 421 static llvm::Function *emitOutlinedFunctionPrologue( 422 CodeGenFunction &CGF, FunctionArgList &Args, 423 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 424 &LocalAddrs, 425 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 426 &VLASizes, 427 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 428 const CapturedDecl *CD = FO.S->getCapturedDecl(); 429 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 430 assert(CD->hasBody() && "missing CapturedDecl body"); 431 432 CXXThisValue = nullptr; 433 // Build the argument list. 434 CodeGenModule &CGM = CGF.CGM; 435 ASTContext &Ctx = CGM.getContext(); 436 FunctionArgList TargetArgs; 437 Args.append(CD->param_begin(), 438 std::next(CD->param_begin(), CD->getContextParamPosition())); 439 TargetArgs.append( 440 CD->param_begin(), 441 std::next(CD->param_begin(), CD->getContextParamPosition())); 442 auto I = FO.S->captures().begin(); 443 FunctionDecl *DebugFunctionDecl = nullptr; 444 if (!FO.UIntPtrCastRequired) { 445 FunctionProtoType::ExtProtoInfo EPI; 446 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 447 DebugFunctionDecl = FunctionDecl::Create( 448 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 449 SourceLocation(), DeclarationName(), FunctionTy, 450 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 451 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 452 } 453 for (const FieldDecl *FD : RD->fields()) { 454 QualType ArgType = FD->getType(); 455 IdentifierInfo *II = nullptr; 456 VarDecl *CapVar = nullptr; 457 458 // If this is a capture by copy and the type is not a pointer, the outlined 459 // function argument type should be uintptr and the value properly casted to 460 // uintptr. This is necessary given that the runtime library is only able to 461 // deal with pointers. We can pass in the same way the VLA type sizes to the 462 // outlined function. 463 if (FO.UIntPtrCastRequired && 464 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 465 I->capturesVariableArrayType())) 466 ArgType = Ctx.getUIntPtrType(); 467 468 if (I->capturesVariable() || I->capturesVariableByCopy()) { 469 CapVar = I->getCapturedVar(); 470 II = CapVar->getIdentifier(); 471 } else if (I->capturesThis()) { 472 II = &Ctx.Idents.get("this"); 473 } else { 474 assert(I->capturesVariableArrayType()); 475 II = &Ctx.Idents.get("vla"); 476 } 477 if (ArgType->isVariablyModifiedType()) 478 ArgType = getCanonicalParamType(Ctx, ArgType); 479 VarDecl *Arg; 480 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 481 Arg = ParmVarDecl::Create( 482 Ctx, DebugFunctionDecl, 483 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 484 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 485 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 486 } else { 487 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 488 II, ArgType, ImplicitParamDecl::Other); 489 } 490 Args.emplace_back(Arg); 491 // Do not cast arguments if we emit function with non-original types. 492 TargetArgs.emplace_back( 493 FO.UIntPtrCastRequired 494 ? Arg 495 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 496 ++I; 497 } 498 Args.append( 499 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 500 CD->param_end()); 501 TargetArgs.append( 502 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 503 CD->param_end()); 504 505 // Create the function declaration. 506 const CGFunctionInfo &FuncInfo = 507 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 508 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 509 510 auto *F = 511 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 512 FO.FunctionName, &CGM.getModule()); 513 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 514 if (CD->isNothrow()) 515 F->setDoesNotThrow(); 516 F->setDoesNotRecurse(); 517 518 // Generate the function. 519 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 520 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 521 FO.UIntPtrCastRequired ? FO.Loc 522 : CD->getBody()->getBeginLoc()); 523 unsigned Cnt = CD->getContextParamPosition(); 524 I = FO.S->captures().begin(); 525 for (const FieldDecl *FD : RD->fields()) { 526 // Do not map arguments if we emit function with non-original types. 527 Address LocalAddr(Address::invalid()); 528 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 529 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 530 TargetArgs[Cnt]); 531 } else { 532 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 533 } 534 // If we are capturing a pointer by copy we don't need to do anything, just 535 // use the value that we get from the arguments. 536 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 537 const VarDecl *CurVD = I->getCapturedVar(); 538 if (!FO.RegisterCastedArgsOnly) 539 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 540 ++Cnt; 541 ++I; 542 continue; 543 } 544 545 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 546 AlignmentSource::Decl); 547 if (FD->hasCapturedVLAType()) { 548 if (FO.UIntPtrCastRequired) { 549 ArgLVal = CGF.MakeAddrLValue( 550 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 551 Args[Cnt]->getName(), ArgLVal), 552 FD->getType(), AlignmentSource::Decl); 553 } 554 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 555 const VariableArrayType *VAT = FD->getCapturedVLAType(); 556 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 557 } else if (I->capturesVariable()) { 558 const VarDecl *Var = I->getCapturedVar(); 559 QualType VarTy = Var->getType(); 560 Address ArgAddr = ArgLVal.getAddress(CGF); 561 if (ArgLVal.getType()->isLValueReferenceType()) { 562 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 563 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 564 assert(ArgLVal.getType()->isPointerType()); 565 ArgAddr = CGF.EmitLoadOfPointer( 566 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 567 } 568 if (!FO.RegisterCastedArgsOnly) { 569 LocalAddrs.insert( 570 {Args[Cnt], 571 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 572 } 573 } else if (I->capturesVariableByCopy()) { 574 assert(!FD->getType()->isAnyPointerType() && 575 "Not expecting a captured pointer."); 576 const VarDecl *Var = I->getCapturedVar(); 577 LocalAddrs.insert({Args[Cnt], 578 {Var, FO.UIntPtrCastRequired 579 ? castValueFromUintptr( 580 CGF, I->getLocation(), FD->getType(), 581 Args[Cnt]->getName(), ArgLVal) 582 : ArgLVal.getAddress(CGF)}}); 583 } else { 584 // If 'this' is captured, load it into CXXThisValue. 585 assert(I->capturesThis()); 586 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 587 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 588 } 589 ++Cnt; 590 ++I; 591 } 592 593 return F; 594 } 595 596 llvm::Function * 597 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 598 SourceLocation Loc) { 599 assert( 600 CapturedStmtInfo && 601 "CapturedStmtInfo should be set when generating the captured function"); 602 const CapturedDecl *CD = S.getCapturedDecl(); 603 // Build the argument list. 604 bool NeedWrapperFunction = 605 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 606 FunctionArgList Args; 607 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 608 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 609 SmallString<256> Buffer; 610 llvm::raw_svector_ostream Out(Buffer); 611 Out << CapturedStmtInfo->getHelperName(); 612 if (NeedWrapperFunction) 613 Out << "_debug__"; 614 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 615 Out.str(), Loc); 616 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 617 VLASizes, CXXThisValue, FO); 618 CodeGenFunction::OMPPrivateScope LocalScope(*this); 619 for (const auto &LocalAddrPair : LocalAddrs) { 620 if (LocalAddrPair.second.first) { 621 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 622 return LocalAddrPair.second.second; 623 }); 624 } 625 } 626 (void)LocalScope.Privatize(); 627 for (const auto &VLASizePair : VLASizes) 628 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 629 PGO.assignRegionCounters(GlobalDecl(CD), F); 630 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 631 (void)LocalScope.ForceCleanup(); 632 FinishFunction(CD->getBodyRBrace()); 633 if (!NeedWrapperFunction) 634 return F; 635 636 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 637 /*RegisterCastedArgsOnly=*/true, 638 CapturedStmtInfo->getHelperName(), Loc); 639 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 640 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 641 Args.clear(); 642 LocalAddrs.clear(); 643 VLASizes.clear(); 644 llvm::Function *WrapperF = 645 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 646 WrapperCGF.CXXThisValue, WrapperFO); 647 llvm::SmallVector<llvm::Value *, 4> CallArgs; 648 auto *PI = F->arg_begin(); 649 for (const auto *Arg : Args) { 650 llvm::Value *CallArg; 651 auto I = LocalAddrs.find(Arg); 652 if (I != LocalAddrs.end()) { 653 LValue LV = WrapperCGF.MakeAddrLValue( 654 I->second.second, 655 I->second.first ? I->second.first->getType() : Arg->getType(), 656 AlignmentSource::Decl); 657 if (LV.getType()->isAnyComplexType()) 658 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 659 LV.getAddress(WrapperCGF), 660 PI->getType()->getPointerTo( 661 LV.getAddress(WrapperCGF).getAddressSpace()))); 662 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 663 } else { 664 auto EI = VLASizes.find(Arg); 665 if (EI != VLASizes.end()) { 666 CallArg = EI->second.second; 667 } else { 668 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 669 Arg->getType(), 670 AlignmentSource::Decl); 671 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 672 } 673 } 674 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 675 ++PI; 676 } 677 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 678 WrapperCGF.FinishFunction(); 679 return WrapperF; 680 } 681 682 //===----------------------------------------------------------------------===// 683 // OpenMP Directive Emission 684 //===----------------------------------------------------------------------===// 685 void CodeGenFunction::EmitOMPAggregateAssign( 686 Address DestAddr, Address SrcAddr, QualType OriginalType, 687 const llvm::function_ref<void(Address, Address)> CopyGen) { 688 // Perform element-by-element initialization. 689 QualType ElementTy; 690 691 // Drill down to the base element type on both arrays. 692 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 693 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 694 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 695 696 llvm::Value *SrcBegin = SrcAddr.getPointer(); 697 llvm::Value *DestBegin = DestAddr.getPointer(); 698 // Cast from pointer to array type to pointer to single element. 699 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 700 // The basic structure here is a while-do loop. 701 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 702 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 703 llvm::Value *IsEmpty = 704 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 705 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 706 707 // Enter the loop body, making that address the current address. 708 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 709 EmitBlock(BodyBB); 710 711 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 712 713 llvm::PHINode *SrcElementPHI = 714 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 715 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 716 Address SrcElementCurrent = 717 Address(SrcElementPHI, 718 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 719 720 llvm::PHINode *DestElementPHI = 721 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 722 DestElementPHI->addIncoming(DestBegin, EntryBB); 723 Address DestElementCurrent = 724 Address(DestElementPHI, 725 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 726 727 // Emit copy. 728 CopyGen(DestElementCurrent, SrcElementCurrent); 729 730 // Shift the address forward by one element. 731 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 732 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 733 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 734 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 735 // Check whether we've reached the end. 736 llvm::Value *Done = 737 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 738 Builder.CreateCondBr(Done, DoneBB, BodyBB); 739 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 740 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 741 742 // Done. 743 EmitBlock(DoneBB, /*IsFinished=*/true); 744 } 745 746 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 747 Address SrcAddr, const VarDecl *DestVD, 748 const VarDecl *SrcVD, const Expr *Copy) { 749 if (OriginalType->isArrayType()) { 750 const auto *BO = dyn_cast<BinaryOperator>(Copy); 751 if (BO && BO->getOpcode() == BO_Assign) { 752 // Perform simple memcpy for simple copying. 753 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 754 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 755 EmitAggregateAssign(Dest, Src, OriginalType); 756 } else { 757 // For arrays with complex element types perform element by element 758 // copying. 759 EmitOMPAggregateAssign( 760 DestAddr, SrcAddr, OriginalType, 761 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 762 // Working with the single array element, so have to remap 763 // destination and source variables to corresponding array 764 // elements. 765 CodeGenFunction::OMPPrivateScope Remap(*this); 766 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 767 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 768 (void)Remap.Privatize(); 769 EmitIgnoredExpr(Copy); 770 }); 771 } 772 } else { 773 // Remap pseudo source variable to private copy. 774 CodeGenFunction::OMPPrivateScope Remap(*this); 775 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 776 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 777 (void)Remap.Privatize(); 778 // Emit copying of the whole variable. 779 EmitIgnoredExpr(Copy); 780 } 781 } 782 783 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 784 OMPPrivateScope &PrivateScope) { 785 if (!HaveInsertPoint()) 786 return false; 787 bool DeviceConstTarget = 788 getLangOpts().OpenMPIsDevice && 789 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 790 bool FirstprivateIsLastprivate = false; 791 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 792 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 793 for (const auto *D : C->varlists()) 794 Lastprivates.try_emplace( 795 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 796 C->getKind()); 797 } 798 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 799 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 800 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 801 // Force emission of the firstprivate copy if the directive does not emit 802 // outlined function, like omp for, omp simd, omp distribute etc. 803 bool MustEmitFirstprivateCopy = 804 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 805 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 806 const auto *IRef = C->varlist_begin(); 807 const auto *InitsRef = C->inits().begin(); 808 for (const Expr *IInit : C->private_copies()) { 809 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 810 bool ThisFirstprivateIsLastprivate = 811 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 812 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 813 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 814 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 815 !FD->getType()->isReferenceType() && 816 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 817 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 818 ++IRef; 819 ++InitsRef; 820 continue; 821 } 822 // Do not emit copy for firstprivate constant variables in target regions, 823 // captured by reference. 824 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 825 FD && FD->getType()->isReferenceType() && 826 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 827 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 828 OrigVD); 829 ++IRef; 830 ++InitsRef; 831 continue; 832 } 833 FirstprivateIsLastprivate = 834 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 835 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 836 const auto *VDInit = 837 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 838 bool IsRegistered; 839 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 840 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 841 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 842 LValue OriginalLVal; 843 if (!FD) { 844 // Check if the firstprivate variable is just a constant value. 845 ConstantEmission CE = tryEmitAsConstant(&DRE); 846 if (CE && !CE.isReference()) { 847 // Constant value, no need to create a copy. 848 ++IRef; 849 ++InitsRef; 850 continue; 851 } 852 if (CE && CE.isReference()) { 853 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 854 } else { 855 assert(!CE && "Expected non-constant firstprivate."); 856 OriginalLVal = EmitLValue(&DRE); 857 } 858 } else { 859 OriginalLVal = EmitLValue(&DRE); 860 } 861 QualType Type = VD->getType(); 862 if (Type->isArrayType()) { 863 // Emit VarDecl with copy init for arrays. 864 // Get the address of the original variable captured in current 865 // captured region. 866 IsRegistered = PrivateScope.addPrivate( 867 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 868 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 869 const Expr *Init = VD->getInit(); 870 if (!isa<CXXConstructExpr>(Init) || 871 isTrivialInitializer(Init)) { 872 // Perform simple memcpy. 873 LValue Dest = 874 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 875 EmitAggregateAssign(Dest, OriginalLVal, Type); 876 } else { 877 EmitOMPAggregateAssign( 878 Emission.getAllocatedAddress(), 879 OriginalLVal.getAddress(*this), Type, 880 [this, VDInit, Init](Address DestElement, 881 Address SrcElement) { 882 // Clean up any temporaries needed by the 883 // initialization. 884 RunCleanupsScope InitScope(*this); 885 // Emit initialization for single element. 886 setAddrOfLocalVar(VDInit, SrcElement); 887 EmitAnyExprToMem(Init, DestElement, 888 Init->getType().getQualifiers(), 889 /*IsInitializer*/ false); 890 LocalDeclMap.erase(VDInit); 891 }); 892 } 893 EmitAutoVarCleanups(Emission); 894 return Emission.getAllocatedAddress(); 895 }); 896 } else { 897 Address OriginalAddr = OriginalLVal.getAddress(*this); 898 IsRegistered = 899 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 900 ThisFirstprivateIsLastprivate, 901 OrigVD, &Lastprivates, IRef]() { 902 // Emit private VarDecl with copy init. 903 // Remap temp VDInit variable to the address of the original 904 // variable (for proper handling of captured global variables). 905 setAddrOfLocalVar(VDInit, OriginalAddr); 906 EmitDecl(*VD); 907 LocalDeclMap.erase(VDInit); 908 if (ThisFirstprivateIsLastprivate && 909 Lastprivates[OrigVD->getCanonicalDecl()] == 910 OMPC_LASTPRIVATE_conditional) { 911 // Create/init special variable for lastprivate conditionals. 912 Address VDAddr = 913 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 914 *this, OrigVD); 915 llvm::Value *V = EmitLoadOfScalar( 916 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 917 AlignmentSource::Decl), 918 (*IRef)->getExprLoc()); 919 EmitStoreOfScalar(V, 920 MakeAddrLValue(VDAddr, (*IRef)->getType(), 921 AlignmentSource::Decl)); 922 LocalDeclMap.erase(VD); 923 setAddrOfLocalVar(VD, VDAddr); 924 return VDAddr; 925 } 926 return GetAddrOfLocalVar(VD); 927 }); 928 } 929 assert(IsRegistered && 930 "firstprivate var already registered as private"); 931 // Silence the warning about unused variable. 932 (void)IsRegistered; 933 } 934 ++IRef; 935 ++InitsRef; 936 } 937 } 938 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 939 } 940 941 void CodeGenFunction::EmitOMPPrivateClause( 942 const OMPExecutableDirective &D, 943 CodeGenFunction::OMPPrivateScope &PrivateScope) { 944 if (!HaveInsertPoint()) 945 return; 946 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 947 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 948 auto IRef = C->varlist_begin(); 949 for (const Expr *IInit : C->private_copies()) { 950 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 951 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 952 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 953 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 954 // Emit private VarDecl with copy init. 955 EmitDecl(*VD); 956 return GetAddrOfLocalVar(VD); 957 }); 958 assert(IsRegistered && "private var already registered as private"); 959 // Silence the warning about unused variable. 960 (void)IsRegistered; 961 } 962 ++IRef; 963 } 964 } 965 } 966 967 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 968 if (!HaveInsertPoint()) 969 return false; 970 // threadprivate_var1 = master_threadprivate_var1; 971 // operator=(threadprivate_var2, master_threadprivate_var2); 972 // ... 973 // __kmpc_barrier(&loc, global_tid); 974 llvm::DenseSet<const VarDecl *> CopiedVars; 975 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 976 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 977 auto IRef = C->varlist_begin(); 978 auto ISrcRef = C->source_exprs().begin(); 979 auto IDestRef = C->destination_exprs().begin(); 980 for (const Expr *AssignOp : C->assignment_ops()) { 981 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 982 QualType Type = VD->getType(); 983 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 984 // Get the address of the master variable. If we are emitting code with 985 // TLS support, the address is passed from the master as field in the 986 // captured declaration. 987 Address MasterAddr = Address::invalid(); 988 if (getLangOpts().OpenMPUseTLS && 989 getContext().getTargetInfo().isTLSSupported()) { 990 assert(CapturedStmtInfo->lookup(VD) && 991 "Copyin threadprivates should have been captured!"); 992 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 993 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 994 MasterAddr = EmitLValue(&DRE).getAddress(*this); 995 LocalDeclMap.erase(VD); 996 } else { 997 MasterAddr = 998 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 999 : CGM.GetAddrOfGlobal(VD), 1000 getContext().getDeclAlign(VD)); 1001 } 1002 // Get the address of the threadprivate variable. 1003 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 1004 if (CopiedVars.size() == 1) { 1005 // At first check if current thread is a master thread. If it is, no 1006 // need to copy data. 1007 CopyBegin = createBasicBlock("copyin.not.master"); 1008 CopyEnd = createBasicBlock("copyin.not.master.end"); 1009 // TODO: Avoid ptrtoint conversion. 1010 auto *MasterAddrInt = 1011 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy); 1012 auto *PrivateAddrInt = 1013 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy); 1014 Builder.CreateCondBr( 1015 Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin, 1016 CopyEnd); 1017 EmitBlock(CopyBegin); 1018 } 1019 const auto *SrcVD = 1020 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1021 const auto *DestVD = 1022 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1023 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 1024 } 1025 ++IRef; 1026 ++ISrcRef; 1027 ++IDestRef; 1028 } 1029 } 1030 if (CopyEnd) { 1031 // Exit out of copying procedure for non-master thread. 1032 EmitBlock(CopyEnd, /*IsFinished=*/true); 1033 return true; 1034 } 1035 return false; 1036 } 1037 1038 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1039 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1040 if (!HaveInsertPoint()) 1041 return false; 1042 bool HasAtLeastOneLastprivate = false; 1043 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1044 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1045 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1046 for (const Expr *C : LoopDirective->counters()) { 1047 SIMDLCVs.insert( 1048 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1049 } 1050 } 1051 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1052 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1053 HasAtLeastOneLastprivate = true; 1054 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1055 !getLangOpts().OpenMPSimd) 1056 break; 1057 const auto *IRef = C->varlist_begin(); 1058 const auto *IDestRef = C->destination_exprs().begin(); 1059 for (const Expr *IInit : C->private_copies()) { 1060 // Keep the address of the original variable for future update at the end 1061 // of the loop. 1062 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1063 // Taskloops do not require additional initialization, it is done in 1064 // runtime support library. 1065 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1066 const auto *DestVD = 1067 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1068 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1069 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1070 /*RefersToEnclosingVariableOrCapture=*/ 1071 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1072 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1073 return EmitLValue(&DRE).getAddress(*this); 1074 }); 1075 // Check if the variable is also a firstprivate: in this case IInit is 1076 // not generated. Initialization of this variable will happen in codegen 1077 // for 'firstprivate' clause. 1078 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1079 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1080 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1081 OrigVD]() { 1082 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1083 Address VDAddr = 1084 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1085 OrigVD); 1086 setAddrOfLocalVar(VD, VDAddr); 1087 return VDAddr; 1088 } 1089 // Emit private VarDecl with copy init. 1090 EmitDecl(*VD); 1091 return GetAddrOfLocalVar(VD); 1092 }); 1093 assert(IsRegistered && 1094 "lastprivate var already registered as private"); 1095 (void)IsRegistered; 1096 } 1097 } 1098 ++IRef; 1099 ++IDestRef; 1100 } 1101 } 1102 return HasAtLeastOneLastprivate; 1103 } 1104 1105 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1106 const OMPExecutableDirective &D, bool NoFinals, 1107 llvm::Value *IsLastIterCond) { 1108 if (!HaveInsertPoint()) 1109 return; 1110 // Emit following code: 1111 // if (<IsLastIterCond>) { 1112 // orig_var1 = private_orig_var1; 1113 // ... 1114 // orig_varn = private_orig_varn; 1115 // } 1116 llvm::BasicBlock *ThenBB = nullptr; 1117 llvm::BasicBlock *DoneBB = nullptr; 1118 if (IsLastIterCond) { 1119 // Emit implicit barrier if at least one lastprivate conditional is found 1120 // and this is not a simd mode. 1121 if (!getLangOpts().OpenMPSimd && 1122 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1123 [](const OMPLastprivateClause *C) { 1124 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1125 })) { 1126 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1127 OMPD_unknown, 1128 /*EmitChecks=*/false, 1129 /*ForceSimpleCall=*/true); 1130 } 1131 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1132 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1133 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1134 EmitBlock(ThenBB); 1135 } 1136 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1137 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1138 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1139 auto IC = LoopDirective->counters().begin(); 1140 for (const Expr *F : LoopDirective->finals()) { 1141 const auto *D = 1142 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1143 if (NoFinals) 1144 AlreadyEmittedVars.insert(D); 1145 else 1146 LoopCountersAndUpdates[D] = F; 1147 ++IC; 1148 } 1149 } 1150 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1151 auto IRef = C->varlist_begin(); 1152 auto ISrcRef = C->source_exprs().begin(); 1153 auto IDestRef = C->destination_exprs().begin(); 1154 for (const Expr *AssignOp : C->assignment_ops()) { 1155 const auto *PrivateVD = 1156 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1157 QualType Type = PrivateVD->getType(); 1158 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1159 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1160 // If lastprivate variable is a loop control variable for loop-based 1161 // directive, update its value before copyin back to original 1162 // variable. 1163 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1164 EmitIgnoredExpr(FinalExpr); 1165 const auto *SrcVD = 1166 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1167 const auto *DestVD = 1168 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1169 // Get the address of the private variable. 1170 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1171 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1172 PrivateAddr = 1173 Address(Builder.CreateLoad(PrivateAddr), 1174 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1175 // Store the last value to the private copy in the last iteration. 1176 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1177 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1178 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1179 (*IRef)->getExprLoc()); 1180 // Get the address of the original variable. 1181 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1182 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1183 } 1184 ++IRef; 1185 ++ISrcRef; 1186 ++IDestRef; 1187 } 1188 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1189 EmitIgnoredExpr(PostUpdate); 1190 } 1191 if (IsLastIterCond) 1192 EmitBlock(DoneBB, /*IsFinished=*/true); 1193 } 1194 1195 void CodeGenFunction::EmitOMPReductionClauseInit( 1196 const OMPExecutableDirective &D, 1197 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1198 if (!HaveInsertPoint()) 1199 return; 1200 SmallVector<const Expr *, 4> Shareds; 1201 SmallVector<const Expr *, 4> Privates; 1202 SmallVector<const Expr *, 4> ReductionOps; 1203 SmallVector<const Expr *, 4> LHSs; 1204 SmallVector<const Expr *, 4> RHSs; 1205 OMPTaskDataTy Data; 1206 SmallVector<const Expr *, 4> TaskLHSs; 1207 SmallVector<const Expr *, 4> TaskRHSs; 1208 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1209 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1210 continue; 1211 Shareds.append(C->varlist_begin(), C->varlist_end()); 1212 Privates.append(C->privates().begin(), C->privates().end()); 1213 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1214 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1215 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1216 if (C->getModifier() == OMPC_REDUCTION_task) { 1217 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1218 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1219 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1220 Data.ReductionOps.append(C->reduction_ops().begin(), 1221 C->reduction_ops().end()); 1222 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1223 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1224 } 1225 } 1226 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1227 unsigned Count = 0; 1228 auto *ILHS = LHSs.begin(); 1229 auto *IRHS = RHSs.begin(); 1230 auto *IPriv = Privates.begin(); 1231 for (const Expr *IRef : Shareds) { 1232 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1233 // Emit private VarDecl with reduction init. 1234 RedCG.emitSharedOrigLValue(*this, Count); 1235 RedCG.emitAggregateType(*this, Count); 1236 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1237 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1238 RedCG.getSharedLValue(Count), 1239 [&Emission](CodeGenFunction &CGF) { 1240 CGF.EmitAutoVarInit(Emission); 1241 return true; 1242 }); 1243 EmitAutoVarCleanups(Emission); 1244 Address BaseAddr = RedCG.adjustPrivateAddress( 1245 *this, Count, Emission.getAllocatedAddress()); 1246 bool IsRegistered = PrivateScope.addPrivate( 1247 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1248 assert(IsRegistered && "private var already registered as private"); 1249 // Silence the warning about unused variable. 1250 (void)IsRegistered; 1251 1252 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1253 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1254 QualType Type = PrivateVD->getType(); 1255 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1256 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1257 // Store the address of the original variable associated with the LHS 1258 // implicit variable. 1259 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1260 return RedCG.getSharedLValue(Count).getAddress(*this); 1261 }); 1262 PrivateScope.addPrivate( 1263 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1264 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1265 isa<ArraySubscriptExpr>(IRef)) { 1266 // Store the address of the original variable associated with the LHS 1267 // implicit variable. 1268 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1269 return RedCG.getSharedLValue(Count).getAddress(*this); 1270 }); 1271 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1272 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1273 ConvertTypeForMem(RHSVD->getType()), 1274 "rhs.begin"); 1275 }); 1276 } else { 1277 QualType Type = PrivateVD->getType(); 1278 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1279 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1280 // Store the address of the original variable associated with the LHS 1281 // implicit variable. 1282 if (IsArray) { 1283 OriginalAddr = Builder.CreateElementBitCast( 1284 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1285 } 1286 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1287 PrivateScope.addPrivate( 1288 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1289 return IsArray 1290 ? Builder.CreateElementBitCast( 1291 GetAddrOfLocalVar(PrivateVD), 1292 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1293 : GetAddrOfLocalVar(PrivateVD); 1294 }); 1295 } 1296 ++ILHS; 1297 ++IRHS; 1298 ++IPriv; 1299 ++Count; 1300 } 1301 if (!Data.ReductionVars.empty()) { 1302 Data.IsReductionWithTaskMod = true; 1303 Data.IsWorksharingReduction = 1304 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1305 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1306 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1307 const Expr *TaskRedRef = nullptr; 1308 switch (D.getDirectiveKind()) { 1309 case OMPD_parallel: 1310 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_for: 1313 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1314 break; 1315 case OMPD_sections: 1316 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1317 break; 1318 case OMPD_parallel_for: 1319 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1320 break; 1321 case OMPD_parallel_master: 1322 TaskRedRef = 1323 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1324 break; 1325 case OMPD_parallel_sections: 1326 TaskRedRef = 1327 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1328 break; 1329 case OMPD_target_parallel: 1330 TaskRedRef = 1331 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1332 break; 1333 case OMPD_target_parallel_for: 1334 TaskRedRef = 1335 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1336 break; 1337 case OMPD_distribute_parallel_for: 1338 TaskRedRef = 1339 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1340 break; 1341 case OMPD_teams_distribute_parallel_for: 1342 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1343 .getTaskReductionRefExpr(); 1344 break; 1345 case OMPD_target_teams_distribute_parallel_for: 1346 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1347 .getTaskReductionRefExpr(); 1348 break; 1349 case OMPD_simd: 1350 case OMPD_for_simd: 1351 case OMPD_section: 1352 case OMPD_single: 1353 case OMPD_master: 1354 case OMPD_critical: 1355 case OMPD_parallel_for_simd: 1356 case OMPD_task: 1357 case OMPD_taskyield: 1358 case OMPD_barrier: 1359 case OMPD_taskwait: 1360 case OMPD_taskgroup: 1361 case OMPD_flush: 1362 case OMPD_depobj: 1363 case OMPD_scan: 1364 case OMPD_ordered: 1365 case OMPD_atomic: 1366 case OMPD_teams: 1367 case OMPD_target: 1368 case OMPD_cancellation_point: 1369 case OMPD_cancel: 1370 case OMPD_target_data: 1371 case OMPD_target_enter_data: 1372 case OMPD_target_exit_data: 1373 case OMPD_taskloop: 1374 case OMPD_taskloop_simd: 1375 case OMPD_master_taskloop: 1376 case OMPD_master_taskloop_simd: 1377 case OMPD_parallel_master_taskloop: 1378 case OMPD_parallel_master_taskloop_simd: 1379 case OMPD_distribute: 1380 case OMPD_target_update: 1381 case OMPD_distribute_parallel_for_simd: 1382 case OMPD_distribute_simd: 1383 case OMPD_target_parallel_for_simd: 1384 case OMPD_target_simd: 1385 case OMPD_teams_distribute: 1386 case OMPD_teams_distribute_simd: 1387 case OMPD_teams_distribute_parallel_for_simd: 1388 case OMPD_target_teams: 1389 case OMPD_target_teams_distribute: 1390 case OMPD_target_teams_distribute_parallel_for_simd: 1391 case OMPD_target_teams_distribute_simd: 1392 case OMPD_declare_target: 1393 case OMPD_end_declare_target: 1394 case OMPD_threadprivate: 1395 case OMPD_allocate: 1396 case OMPD_declare_reduction: 1397 case OMPD_declare_mapper: 1398 case OMPD_declare_simd: 1399 case OMPD_requires: 1400 case OMPD_declare_variant: 1401 case OMPD_begin_declare_variant: 1402 case OMPD_end_declare_variant: 1403 case OMPD_unknown: 1404 default: 1405 llvm_unreachable("Enexpected directive with task reductions."); 1406 } 1407 1408 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1409 EmitVarDecl(*VD); 1410 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1411 /*Volatile=*/false, TaskRedRef->getType()); 1412 } 1413 } 1414 1415 void CodeGenFunction::EmitOMPReductionClauseFinal( 1416 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1417 if (!HaveInsertPoint()) 1418 return; 1419 llvm::SmallVector<const Expr *, 8> Privates; 1420 llvm::SmallVector<const Expr *, 8> LHSExprs; 1421 llvm::SmallVector<const Expr *, 8> RHSExprs; 1422 llvm::SmallVector<const Expr *, 8> ReductionOps; 1423 bool HasAtLeastOneReduction = false; 1424 bool IsReductionWithTaskMod = false; 1425 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1426 // Do not emit for inscan reductions. 1427 if (C->getModifier() == OMPC_REDUCTION_inscan) 1428 continue; 1429 HasAtLeastOneReduction = true; 1430 Privates.append(C->privates().begin(), C->privates().end()); 1431 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1432 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1433 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1434 IsReductionWithTaskMod = 1435 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1436 } 1437 if (HasAtLeastOneReduction) { 1438 if (IsReductionWithTaskMod) { 1439 CGM.getOpenMPRuntime().emitTaskReductionFini( 1440 *this, D.getBeginLoc(), 1441 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1442 } 1443 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1444 isOpenMPParallelDirective(D.getDirectiveKind()) || 1445 ReductionKind == OMPD_simd; 1446 bool SimpleReduction = ReductionKind == OMPD_simd; 1447 // Emit nowait reduction if nowait clause is present or directive is a 1448 // parallel directive (it always has implicit barrier). 1449 CGM.getOpenMPRuntime().emitReduction( 1450 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1451 {WithNowait, SimpleReduction, ReductionKind}); 1452 } 1453 } 1454 1455 static void emitPostUpdateForReductionClause( 1456 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1457 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1458 if (!CGF.HaveInsertPoint()) 1459 return; 1460 llvm::BasicBlock *DoneBB = nullptr; 1461 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1462 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1463 if (!DoneBB) { 1464 if (llvm::Value *Cond = CondGen(CGF)) { 1465 // If the first post-update expression is found, emit conditional 1466 // block if it was requested. 1467 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1468 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1469 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1470 CGF.EmitBlock(ThenBB); 1471 } 1472 } 1473 CGF.EmitIgnoredExpr(PostUpdate); 1474 } 1475 } 1476 if (DoneBB) 1477 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1478 } 1479 1480 namespace { 1481 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1482 /// parallel function. This is necessary for combined constructs such as 1483 /// 'distribute parallel for' 1484 typedef llvm::function_ref<void(CodeGenFunction &, 1485 const OMPExecutableDirective &, 1486 llvm::SmallVectorImpl<llvm::Value *> &)> 1487 CodeGenBoundParametersTy; 1488 } // anonymous namespace 1489 1490 static void 1491 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1492 const OMPExecutableDirective &S) { 1493 if (CGF.getLangOpts().OpenMP < 50) 1494 return; 1495 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1496 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1497 for (const Expr *Ref : C->varlists()) { 1498 if (!Ref->getType()->isScalarType()) 1499 continue; 1500 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1501 if (!DRE) 1502 continue; 1503 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1504 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1505 } 1506 } 1507 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1508 for (const Expr *Ref : C->varlists()) { 1509 if (!Ref->getType()->isScalarType()) 1510 continue; 1511 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1512 if (!DRE) 1513 continue; 1514 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1515 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1516 } 1517 } 1518 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1519 for (const Expr *Ref : C->varlists()) { 1520 if (!Ref->getType()->isScalarType()) 1521 continue; 1522 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1523 if (!DRE) 1524 continue; 1525 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1526 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1527 } 1528 } 1529 // Privates should ne analyzed since they are not captured at all. 1530 // Task reductions may be skipped - tasks are ignored. 1531 // Firstprivates do not return value but may be passed by reference - no need 1532 // to check for updated lastprivate conditional. 1533 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1534 for (const Expr *Ref : C->varlists()) { 1535 if (!Ref->getType()->isScalarType()) 1536 continue; 1537 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1538 if (!DRE) 1539 continue; 1540 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1541 } 1542 } 1543 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1544 CGF, S, PrivateDecls); 1545 } 1546 1547 static void emitCommonOMPParallelDirective( 1548 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1549 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1550 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1551 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1552 llvm::Function *OutlinedFn = 1553 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1554 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1555 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1556 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1557 llvm::Value *NumThreads = 1558 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1559 /*IgnoreResultAssign=*/true); 1560 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1561 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1562 } 1563 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1564 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1565 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1566 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1567 } 1568 const Expr *IfCond = nullptr; 1569 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1570 if (C->getNameModifier() == OMPD_unknown || 1571 C->getNameModifier() == OMPD_parallel) { 1572 IfCond = C->getCondition(); 1573 break; 1574 } 1575 } 1576 1577 OMPParallelScope Scope(CGF, S); 1578 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1579 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1580 // lower and upper bounds with the pragma 'for' chunking mechanism. 1581 // The following lambda takes care of appending the lower and upper bound 1582 // parameters when necessary 1583 CodeGenBoundParameters(CGF, S, CapturedVars); 1584 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1585 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1586 CapturedVars, IfCond); 1587 } 1588 1589 static bool isAllocatableDecl(const VarDecl *VD) { 1590 const VarDecl *CVD = VD->getCanonicalDecl(); 1591 if (!CVD->hasAttr<OMPAllocateDeclAttr>()) 1592 return false; 1593 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1594 // Use the default allocation. 1595 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || 1596 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && 1597 !AA->getAllocator()); 1598 } 1599 1600 static void emitEmptyBoundParameters(CodeGenFunction &, 1601 const OMPExecutableDirective &, 1602 llvm::SmallVectorImpl<llvm::Value *> &) {} 1603 1604 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable( 1605 CodeGenFunction &CGF, const VarDecl *VD) { 1606 CodeGenModule &CGM = CGF.CGM; 1607 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1608 1609 if (!VD) 1610 return Address::invalid(); 1611 const VarDecl *CVD = VD->getCanonicalDecl(); 1612 if (!isAllocatableDecl(CVD)) 1613 return Address::invalid(); 1614 llvm::Value *Size; 1615 CharUnits Align = CGM.getContext().getDeclAlign(CVD); 1616 if (CVD->getType()->isVariablyModifiedType()) { 1617 Size = CGF.getTypeSize(CVD->getType()); 1618 // Align the size: ((size + align - 1) / align) * align 1619 Size = CGF.Builder.CreateNUWAdd( 1620 Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); 1621 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); 1622 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); 1623 } else { 1624 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); 1625 Size = CGM.getSize(Sz.alignTo(Align)); 1626 } 1627 1628 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1629 assert(AA->getAllocator() && 1630 "Expected allocator expression for non-default allocator."); 1631 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); 1632 // According to the standard, the original allocator type is a enum (integer). 1633 // Convert to pointer type, if required. 1634 if (Allocator->getType()->isIntegerTy()) 1635 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy); 1636 else if (Allocator->getType()->isPointerTy()) 1637 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator, 1638 CGM.VoidPtrTy); 1639 1640 llvm::Value *Addr = OMPBuilder.createOMPAlloc( 1641 CGF.Builder, Size, Allocator, 1642 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", ".")); 1643 llvm::CallInst *FreeCI = 1644 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator); 1645 1646 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI); 1647 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1648 Addr, 1649 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())), 1650 getNameWithSeparators({CVD->getName(), ".addr"}, ".", ".")); 1651 return Address(Addr, Align); 1652 } 1653 1654 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 1655 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, 1656 SourceLocation Loc) { 1657 CodeGenModule &CGM = CGF.CGM; 1658 if (CGM.getLangOpts().OpenMPUseTLS && 1659 CGM.getContext().getTargetInfo().isTLSSupported()) 1660 return VDAddr; 1661 1662 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1663 1664 llvm::Type *VarTy = VDAddr.getElementType(); 1665 llvm::Value *Data = 1666 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); 1667 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); 1668 std::string Suffix = getNameWithSeparators({"cache", ""}); 1669 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); 1670 1671 llvm::CallInst *ThreadPrivateCacheCall = 1672 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName); 1673 1674 return Address(ThreadPrivateCacheCall, VDAddr.getAlignment()); 1675 } 1676 1677 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators( 1678 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) { 1679 SmallString<128> Buffer; 1680 llvm::raw_svector_ostream OS(Buffer); 1681 StringRef Sep = FirstSeparator; 1682 for (StringRef Part : Parts) { 1683 OS << Sep << Part; 1684 Sep = Separator; 1685 } 1686 return OS.str().str(); 1687 } 1688 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1689 if (CGM.getLangOpts().OpenMPIRBuilder) { 1690 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1691 // Check if we have any if clause associated with the directive. 1692 llvm::Value *IfCond = nullptr; 1693 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1694 IfCond = EmitScalarExpr(C->getCondition(), 1695 /*IgnoreResultAssign=*/true); 1696 1697 llvm::Value *NumThreads = nullptr; 1698 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1699 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1700 /*IgnoreResultAssign=*/true); 1701 1702 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1703 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1704 ProcBind = ProcBindClause->getProcBindKind(); 1705 1706 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1707 1708 // The cleanup callback that finalizes all variabels at the given location, 1709 // thus calls destructors etc. 1710 auto FiniCB = [this](InsertPointTy IP) { 1711 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1712 }; 1713 1714 // Privatization callback that performs appropriate action for 1715 // shared/private/firstprivate/lastprivate/copyin/... variables. 1716 // 1717 // TODO: This defaults to shared right now. 1718 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1719 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 1720 // The next line is appropriate only for variables (Val) with the 1721 // data-sharing attribute "shared". 1722 ReplVal = &Val; 1723 1724 return CodeGenIP; 1725 }; 1726 1727 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1728 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1729 1730 auto BodyGenCB = [ParallelRegionBodyStmt, 1731 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1732 llvm::BasicBlock &ContinuationBB) { 1733 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1734 ContinuationBB); 1735 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1736 CodeGenIP, ContinuationBB); 1737 }; 1738 1739 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1740 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1741 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 1742 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 1743 Builder.restoreIP( 1744 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB, 1745 IfCond, NumThreads, ProcBind, S.hasCancel())); 1746 return; 1747 } 1748 1749 // Emit parallel region as a standalone region. 1750 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1751 Action.Enter(CGF); 1752 OMPPrivateScope PrivateScope(CGF); 1753 bool Copyins = CGF.EmitOMPCopyinClause(S); 1754 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1755 if (Copyins) { 1756 // Emit implicit barrier to synchronize threads and avoid data races on 1757 // propagation master's thread values of threadprivate variables to local 1758 // instances of that variables of all other implicit threads. 1759 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1760 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1761 /*ForceSimpleCall=*/true); 1762 } 1763 CGF.EmitOMPPrivateClause(S, PrivateScope); 1764 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1765 (void)PrivateScope.Privatize(); 1766 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1767 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1768 }; 1769 { 1770 auto LPCRegion = 1771 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1772 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1773 emitEmptyBoundParameters); 1774 emitPostUpdateForReductionClause(*this, S, 1775 [](CodeGenFunction &) { return nullptr; }); 1776 } 1777 // Check for outer lastprivate conditional update. 1778 checkForLastprivateConditionalUpdate(*this, S); 1779 } 1780 1781 namespace { 1782 /// RAII to handle scopes for loop transformation directives. 1783 class OMPTransformDirectiveScopeRAII { 1784 OMPLoopScope *Scope = nullptr; 1785 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr; 1786 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr; 1787 1788 public: 1789 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) { 1790 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) { 1791 Scope = new OMPLoopScope(CGF, *Dir); 1792 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); 1793 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); 1794 } 1795 } 1796 ~OMPTransformDirectiveScopeRAII() { 1797 if (!Scope) 1798 return; 1799 delete CapInfoRAII; 1800 delete CGSI; 1801 delete Scope; 1802 } 1803 }; 1804 } // namespace 1805 1806 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1807 int MaxLevel, int Level = 0) { 1808 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1809 const Stmt *SimplifiedS = S->IgnoreContainers(); 1810 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1811 PrettyStackTraceLoc CrashInfo( 1812 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1813 "LLVM IR generation of compound statement ('{}')"); 1814 1815 // Keep track of the current cleanup stack depth, including debug scopes. 1816 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1817 for (const Stmt *CurStmt : CS->body()) 1818 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1819 return; 1820 } 1821 if (SimplifiedS == NextLoop) { 1822 if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS)) 1823 SimplifiedS = Dir->getTransformedStmt(); 1824 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS)) 1825 SimplifiedS = CanonLoop->getLoopStmt(); 1826 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1827 S = For->getBody(); 1828 } else { 1829 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1830 "Expected canonical for loop or range-based for loop."); 1831 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1832 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1833 S = CXXFor->getBody(); 1834 } 1835 if (Level + 1 < MaxLevel) { 1836 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1837 S, /*TryImperfectlyNestedLoops=*/true); 1838 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1839 return; 1840 } 1841 } 1842 CGF.EmitStmt(S); 1843 } 1844 1845 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1846 JumpDest LoopExit) { 1847 RunCleanupsScope BodyScope(*this); 1848 // Update counters values on current iteration. 1849 for (const Expr *UE : D.updates()) 1850 EmitIgnoredExpr(UE); 1851 // Update the linear variables. 1852 // In distribute directives only loop counters may be marked as linear, no 1853 // need to generate the code for them. 1854 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1855 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1856 for (const Expr *UE : C->updates()) 1857 EmitIgnoredExpr(UE); 1858 } 1859 } 1860 1861 // On a continue in the body, jump to the end. 1862 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1863 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1864 for (const Expr *E : D.finals_conditions()) { 1865 if (!E) 1866 continue; 1867 // Check that loop counter in non-rectangular nest fits into the iteration 1868 // space. 1869 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1870 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1871 getProfileCount(D.getBody())); 1872 EmitBlock(NextBB); 1873 } 1874 1875 OMPPrivateScope InscanScope(*this); 1876 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1877 bool IsInscanRegion = InscanScope.Privatize(); 1878 if (IsInscanRegion) { 1879 // Need to remember the block before and after scan directive 1880 // to dispatch them correctly depending on the clause used in 1881 // this directive, inclusive or exclusive. For inclusive scan the natural 1882 // order of the blocks is used, for exclusive clause the blocks must be 1883 // executed in reverse order. 1884 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1885 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1886 // No need to allocate inscan exit block, in simd mode it is selected in the 1887 // codegen for the scan directive. 1888 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1889 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1890 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1891 EmitBranch(OMPScanDispatch); 1892 EmitBlock(OMPBeforeScanBlock); 1893 } 1894 1895 // Emit loop variables for C++ range loops. 1896 const Stmt *Body = 1897 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1898 // Emit loop body. 1899 emitBody(*this, Body, 1900 OMPLoopBasedDirective::tryToFindNextInnerLoop( 1901 Body, /*TryImperfectlyNestedLoops=*/true), 1902 D.getLoopsNumber()); 1903 1904 // Jump to the dispatcher at the end of the loop body. 1905 if (IsInscanRegion) 1906 EmitBranch(OMPScanExitBlock); 1907 1908 // The end (updates/cleanups). 1909 EmitBlock(Continue.getBlock()); 1910 BreakContinueStack.pop_back(); 1911 } 1912 1913 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>; 1914 1915 /// Emit a captured statement and return the function as well as its captured 1916 /// closure context. 1917 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, 1918 const CapturedStmt *S) { 1919 LValue CapStruct = ParentCGF.InitCapturedStruct(*S); 1920 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true); 1921 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI = 1922 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S); 1923 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get()); 1924 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S); 1925 1926 return {F, CapStruct.getPointer(ParentCGF)}; 1927 } 1928 1929 /// Emit a call to a previously captured closure. 1930 static llvm::CallInst * 1931 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, 1932 llvm::ArrayRef<llvm::Value *> Args) { 1933 // Append the closure context to the argument. 1934 SmallVector<llvm::Value *> EffectiveArgs; 1935 EffectiveArgs.reserve(Args.size() + 1); 1936 llvm::append_range(EffectiveArgs, Args); 1937 EffectiveArgs.push_back(Cap.second); 1938 1939 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs); 1940 } 1941 1942 llvm::CanonicalLoopInfo * 1943 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) { 1944 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented"); 1945 1946 EmitStmt(S); 1947 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops"); 1948 1949 // The last added loop is the outermost one. 1950 return OMPLoopNestStack.back(); 1951 } 1952 1953 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { 1954 const Stmt *SyntacticalLoop = S->getLoopStmt(); 1955 if (!getLangOpts().OpenMPIRBuilder) { 1956 // Ignore if OpenMPIRBuilder is not enabled. 1957 EmitStmt(SyntacticalLoop); 1958 return; 1959 } 1960 1961 LexicalScope ForScope(*this, S->getSourceRange()); 1962 1963 // Emit init statements. The Distance/LoopVar funcs may reference variable 1964 // declarations they contain. 1965 const Stmt *BodyStmt; 1966 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) { 1967 if (const Stmt *InitStmt = For->getInit()) 1968 EmitStmt(InitStmt); 1969 BodyStmt = For->getBody(); 1970 } else if (const auto *RangeFor = 1971 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) { 1972 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt()) 1973 EmitStmt(RangeStmt); 1974 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt()) 1975 EmitStmt(BeginStmt); 1976 if (const DeclStmt *EndStmt = RangeFor->getEndStmt()) 1977 EmitStmt(EndStmt); 1978 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt()) 1979 EmitStmt(LoopVarStmt); 1980 BodyStmt = RangeFor->getBody(); 1981 } else 1982 llvm_unreachable("Expected for-stmt or range-based for-stmt"); 1983 1984 // Emit closure for later use. By-value captures will be captured here. 1985 const CapturedStmt *DistanceFunc = S->getDistanceFunc(); 1986 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc); 1987 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc(); 1988 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc); 1989 1990 // Call the distance function to get the number of iterations of the loop to 1991 // come. 1992 QualType LogicalTy = DistanceFunc->getCapturedDecl() 1993 ->getParam(0) 1994 ->getType() 1995 .getNonReferenceType(); 1996 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); 1997 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); 1998 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); 1999 2000 // Emit the loop structure. 2001 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 2002 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, 2003 llvm::Value *IndVar) { 2004 Builder.restoreIP(CodeGenIP); 2005 2006 // Emit the loop body: Convert the logical iteration number to the loop 2007 // variable and emit the body. 2008 const DeclRefExpr *LoopVarRef = S->getLoopVarRef(); 2009 LValue LCVal = EmitLValue(LoopVarRef); 2010 Address LoopVarAddress = LCVal.getAddress(*this); 2011 emitCapturedStmtCall(*this, LoopVarClosure, 2012 {LoopVarAddress.getPointer(), IndVar}); 2013 2014 RunCleanupsScope BodyScope(*this); 2015 EmitStmt(BodyStmt); 2016 }; 2017 llvm::CanonicalLoopInfo *CL = 2018 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal); 2019 2020 // Finish up the loop. 2021 Builder.restoreIP(CL->getAfterIP()); 2022 ForScope.ForceCleanup(); 2023 2024 // Remember the CanonicalLoopInfo for parent AST nodes consuming it. 2025 OMPLoopNestStack.push_back(CL); 2026 } 2027 2028 void CodeGenFunction::EmitOMPInnerLoop( 2029 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 2030 const Expr *IncExpr, 2031 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 2032 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 2033 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 2034 2035 // Start the loop with a block that tests the condition. 2036 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 2037 EmitBlock(CondBlock); 2038 const SourceRange R = S.getSourceRange(); 2039 2040 // If attributes are attached, push to the basic block with them. 2041 const auto &OMPED = cast<OMPExecutableDirective>(S); 2042 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 2043 const Stmt *SS = ICS->getCapturedStmt(); 2044 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 2045 OMPLoopNestStack.clear(); 2046 if (AS) 2047 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 2048 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 2049 SourceLocToDebugLoc(R.getEnd())); 2050 else 2051 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2052 SourceLocToDebugLoc(R.getEnd())); 2053 2054 // If there are any cleanups between here and the loop-exit scope, 2055 // create a block to stage a loop exit along. 2056 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2057 if (RequiresCleanup) 2058 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 2059 2060 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 2061 2062 // Emit condition. 2063 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 2064 if (ExitBlock != LoopExit.getBlock()) { 2065 EmitBlock(ExitBlock); 2066 EmitBranchThroughCleanup(LoopExit); 2067 } 2068 2069 EmitBlock(LoopBody); 2070 incrementProfileCounter(&S); 2071 2072 // Create a block for the increment. 2073 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 2074 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2075 2076 BodyGen(*this); 2077 2078 // Emit "IV = IV + 1" and a back-edge to the condition block. 2079 EmitBlock(Continue.getBlock()); 2080 EmitIgnoredExpr(IncExpr); 2081 PostIncGen(*this); 2082 BreakContinueStack.pop_back(); 2083 EmitBranch(CondBlock); 2084 LoopStack.pop(); 2085 // Emit the fall-through block. 2086 EmitBlock(LoopExit.getBlock()); 2087 } 2088 2089 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 2090 if (!HaveInsertPoint()) 2091 return false; 2092 // Emit inits for the linear variables. 2093 bool HasLinears = false; 2094 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2095 for (const Expr *Init : C->inits()) { 2096 HasLinears = true; 2097 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 2098 if (const auto *Ref = 2099 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 2100 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 2101 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 2102 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2103 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2104 VD->getInit()->getType(), VK_LValue, 2105 VD->getInit()->getExprLoc()); 2106 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 2107 VD->getType()), 2108 /*capturedByInit=*/false); 2109 EmitAutoVarCleanups(Emission); 2110 } else { 2111 EmitVarDecl(*VD); 2112 } 2113 } 2114 // Emit the linear steps for the linear clauses. 2115 // If a step is not constant, it is pre-calculated before the loop. 2116 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 2117 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 2118 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 2119 // Emit calculation of the linear step. 2120 EmitIgnoredExpr(CS); 2121 } 2122 } 2123 return HasLinears; 2124 } 2125 2126 void CodeGenFunction::EmitOMPLinearClauseFinal( 2127 const OMPLoopDirective &D, 2128 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2129 if (!HaveInsertPoint()) 2130 return; 2131 llvm::BasicBlock *DoneBB = nullptr; 2132 // Emit the final values of the linear variables. 2133 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2134 auto IC = C->varlist_begin(); 2135 for (const Expr *F : C->finals()) { 2136 if (!DoneBB) { 2137 if (llvm::Value *Cond = CondGen(*this)) { 2138 // If the first post-update expression is found, emit conditional 2139 // block if it was requested. 2140 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 2141 DoneBB = createBasicBlock(".omp.linear.pu.done"); 2142 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2143 EmitBlock(ThenBB); 2144 } 2145 } 2146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 2147 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2148 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2149 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 2150 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 2151 CodeGenFunction::OMPPrivateScope VarScope(*this); 2152 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2153 (void)VarScope.Privatize(); 2154 EmitIgnoredExpr(F); 2155 ++IC; 2156 } 2157 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 2158 EmitIgnoredExpr(PostUpdate); 2159 } 2160 if (DoneBB) 2161 EmitBlock(DoneBB, /*IsFinished=*/true); 2162 } 2163 2164 static void emitAlignedClause(CodeGenFunction &CGF, 2165 const OMPExecutableDirective &D) { 2166 if (!CGF.HaveInsertPoint()) 2167 return; 2168 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 2169 llvm::APInt ClauseAlignment(64, 0); 2170 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 2171 auto *AlignmentCI = 2172 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 2173 ClauseAlignment = AlignmentCI->getValue(); 2174 } 2175 for (const Expr *E : Clause->varlists()) { 2176 llvm::APInt Alignment(ClauseAlignment); 2177 if (Alignment == 0) { 2178 // OpenMP [2.8.1, Description] 2179 // If no optional parameter is specified, implementation-defined default 2180 // alignments for SIMD instructions on the target platforms are assumed. 2181 Alignment = 2182 CGF.getContext() 2183 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2184 E->getType()->getPointeeType())) 2185 .getQuantity(); 2186 } 2187 assert((Alignment == 0 || Alignment.isPowerOf2()) && 2188 "alignment is not power of 2"); 2189 if (Alignment != 0) { 2190 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 2191 CGF.emitAlignmentAssumption( 2192 PtrValue, E, /*No second loc needed*/ SourceLocation(), 2193 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 2194 } 2195 } 2196 } 2197 } 2198 2199 void CodeGenFunction::EmitOMPPrivateLoopCounters( 2200 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 2201 if (!HaveInsertPoint()) 2202 return; 2203 auto I = S.private_counters().begin(); 2204 for (const Expr *E : S.counters()) { 2205 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2206 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 2207 // Emit var without initialization. 2208 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 2209 EmitAutoVarCleanups(VarEmission); 2210 LocalDeclMap.erase(PrivateVD); 2211 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 2212 return VarEmission.getAllocatedAddress(); 2213 }); 2214 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 2215 VD->hasGlobalStorage()) { 2216 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 2217 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 2218 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 2219 E->getType(), VK_LValue, E->getExprLoc()); 2220 return EmitLValue(&DRE).getAddress(*this); 2221 }); 2222 } else { 2223 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 2224 return VarEmission.getAllocatedAddress(); 2225 }); 2226 } 2227 ++I; 2228 } 2229 // Privatize extra loop counters used in loops for ordered(n) clauses. 2230 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 2231 if (!C->getNumForLoops()) 2232 continue; 2233 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size(); 2234 I < E; ++I) { 2235 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 2236 const auto *VD = cast<VarDecl>(DRE->getDecl()); 2237 // Override only those variables that can be captured to avoid re-emission 2238 // of the variables declared within the loops. 2239 if (DRE->refersToEnclosingVariableOrCapture()) { 2240 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 2241 return CreateMemTemp(DRE->getType(), VD->getName()); 2242 }); 2243 } 2244 } 2245 } 2246 } 2247 2248 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 2249 const Expr *Cond, llvm::BasicBlock *TrueBlock, 2250 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 2251 if (!CGF.HaveInsertPoint()) 2252 return; 2253 { 2254 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 2255 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 2256 (void)PreCondScope.Privatize(); 2257 // Get initial values of real counters. 2258 for (const Expr *I : S.inits()) { 2259 CGF.EmitIgnoredExpr(I); 2260 } 2261 } 2262 // Create temp loop control variables with their init values to support 2263 // non-rectangular loops. 2264 CodeGenFunction::OMPMapVars PreCondVars; 2265 for (const Expr * E: S.dependent_counters()) { 2266 if (!E) 2267 continue; 2268 assert(!E->getType().getNonReferenceType()->isRecordType() && 2269 "dependent counter must not be an iterator."); 2270 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2271 Address CounterAddr = 2272 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2273 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2274 } 2275 (void)PreCondVars.apply(CGF); 2276 for (const Expr *E : S.dependent_inits()) { 2277 if (!E) 2278 continue; 2279 CGF.EmitIgnoredExpr(E); 2280 } 2281 // Check that loop is executed at least one time. 2282 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2283 PreCondVars.restore(CGF); 2284 } 2285 2286 void CodeGenFunction::EmitOMPLinearClause( 2287 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2288 if (!HaveInsertPoint()) 2289 return; 2290 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2291 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2292 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2293 for (const Expr *C : LoopDirective->counters()) { 2294 SIMDLCVs.insert( 2295 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2296 } 2297 } 2298 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2299 auto CurPrivate = C->privates().begin(); 2300 for (const Expr *E : C->varlists()) { 2301 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2302 const auto *PrivateVD = 2303 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2304 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2305 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2306 // Emit private VarDecl with copy init. 2307 EmitVarDecl(*PrivateVD); 2308 return GetAddrOfLocalVar(PrivateVD); 2309 }); 2310 assert(IsRegistered && "linear var already registered as private"); 2311 // Silence the warning about unused variable. 2312 (void)IsRegistered; 2313 } else { 2314 EmitVarDecl(*PrivateVD); 2315 } 2316 ++CurPrivate; 2317 } 2318 } 2319 } 2320 2321 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2322 const OMPExecutableDirective &D, 2323 bool IsMonotonic) { 2324 if (!CGF.HaveInsertPoint()) 2325 return; 2326 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2327 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2328 /*ignoreResult=*/true); 2329 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2330 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2331 // In presence of finite 'safelen', it may be unsafe to mark all 2332 // the memory instructions parallel, because loop-carried 2333 // dependences of 'safelen' iterations are possible. 2334 if (!IsMonotonic) 2335 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2336 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2337 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2338 /*ignoreResult=*/true); 2339 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2340 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2341 // In presence of finite 'safelen', it may be unsafe to mark all 2342 // the memory instructions parallel, because loop-carried 2343 // dependences of 'safelen' iterations are possible. 2344 CGF.LoopStack.setParallel(/*Enable=*/false); 2345 } 2346 } 2347 2348 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2349 bool IsMonotonic) { 2350 // Walk clauses and process safelen/lastprivate. 2351 LoopStack.setParallel(!IsMonotonic); 2352 LoopStack.setVectorizeEnable(); 2353 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2354 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2355 if (C->getKind() == OMPC_ORDER_concurrent) 2356 LoopStack.setParallel(/*Enable=*/true); 2357 if ((D.getDirectiveKind() == OMPD_simd || 2358 (getLangOpts().OpenMPSimd && 2359 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2360 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2361 [](const OMPReductionClause *C) { 2362 return C->getModifier() == OMPC_REDUCTION_inscan; 2363 })) 2364 // Disable parallel access in case of prefix sum. 2365 LoopStack.setParallel(/*Enable=*/false); 2366 } 2367 2368 void CodeGenFunction::EmitOMPSimdFinal( 2369 const OMPLoopDirective &D, 2370 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2371 if (!HaveInsertPoint()) 2372 return; 2373 llvm::BasicBlock *DoneBB = nullptr; 2374 auto IC = D.counters().begin(); 2375 auto IPC = D.private_counters().begin(); 2376 for (const Expr *F : D.finals()) { 2377 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2378 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2379 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2380 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2381 OrigVD->hasGlobalStorage() || CED) { 2382 if (!DoneBB) { 2383 if (llvm::Value *Cond = CondGen(*this)) { 2384 // If the first post-update expression is found, emit conditional 2385 // block if it was requested. 2386 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2387 DoneBB = createBasicBlock(".omp.final.done"); 2388 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2389 EmitBlock(ThenBB); 2390 } 2391 } 2392 Address OrigAddr = Address::invalid(); 2393 if (CED) { 2394 OrigAddr = 2395 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2396 } else { 2397 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2398 /*RefersToEnclosingVariableOrCapture=*/false, 2399 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2400 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2401 } 2402 OMPPrivateScope VarScope(*this); 2403 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2404 (void)VarScope.Privatize(); 2405 EmitIgnoredExpr(F); 2406 } 2407 ++IC; 2408 ++IPC; 2409 } 2410 if (DoneBB) 2411 EmitBlock(DoneBB, /*IsFinished=*/true); 2412 } 2413 2414 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2415 const OMPLoopDirective &S, 2416 CodeGenFunction::JumpDest LoopExit) { 2417 CGF.EmitOMPLoopBody(S, LoopExit); 2418 CGF.EmitStopPoint(&S); 2419 } 2420 2421 /// Emit a helper variable and return corresponding lvalue. 2422 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2423 const DeclRefExpr *Helper) { 2424 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2425 CGF.EmitVarDecl(*VDecl); 2426 return CGF.EmitLValue(Helper); 2427 } 2428 2429 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2430 const RegionCodeGenTy &SimdInitGen, 2431 const RegionCodeGenTy &BodyCodeGen) { 2432 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2433 PrePostActionTy &) { 2434 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2435 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2436 SimdInitGen(CGF); 2437 2438 BodyCodeGen(CGF); 2439 }; 2440 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2441 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2442 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2443 2444 BodyCodeGen(CGF); 2445 }; 2446 const Expr *IfCond = nullptr; 2447 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2448 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2449 if (CGF.getLangOpts().OpenMP >= 50 && 2450 (C->getNameModifier() == OMPD_unknown || 2451 C->getNameModifier() == OMPD_simd)) { 2452 IfCond = C->getCondition(); 2453 break; 2454 } 2455 } 2456 } 2457 if (IfCond) { 2458 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2459 } else { 2460 RegionCodeGenTy ThenRCG(ThenGen); 2461 ThenRCG(CGF); 2462 } 2463 } 2464 2465 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2466 PrePostActionTy &Action) { 2467 Action.Enter(CGF); 2468 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2469 "Expected simd directive"); 2470 OMPLoopScope PreInitScope(CGF, S); 2471 // if (PreCond) { 2472 // for (IV in 0..LastIteration) BODY; 2473 // <Final counter/linear vars updates>; 2474 // } 2475 // 2476 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2477 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2478 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2479 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2480 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2481 } 2482 2483 // Emit: if (PreCond) - begin. 2484 // If the condition constant folds and can be elided, avoid emitting the 2485 // whole loop. 2486 bool CondConstant; 2487 llvm::BasicBlock *ContBlock = nullptr; 2488 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2489 if (!CondConstant) 2490 return; 2491 } else { 2492 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2493 ContBlock = CGF.createBasicBlock("simd.if.end"); 2494 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2495 CGF.getProfileCount(&S)); 2496 CGF.EmitBlock(ThenBlock); 2497 CGF.incrementProfileCounter(&S); 2498 } 2499 2500 // Emit the loop iteration variable. 2501 const Expr *IVExpr = S.getIterationVariable(); 2502 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2503 CGF.EmitVarDecl(*IVDecl); 2504 CGF.EmitIgnoredExpr(S.getInit()); 2505 2506 // Emit the iterations count variable. 2507 // If it is not a variable, Sema decided to calculate iterations count on 2508 // each iteration (e.g., it is foldable into a constant). 2509 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2510 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2511 // Emit calculation of the iterations count. 2512 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2513 } 2514 2515 emitAlignedClause(CGF, S); 2516 (void)CGF.EmitOMPLinearClauseInit(S); 2517 { 2518 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2519 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2520 CGF.EmitOMPLinearClause(S, LoopScope); 2521 CGF.EmitOMPPrivateClause(S, LoopScope); 2522 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2523 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2524 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2525 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2526 (void)LoopScope.Privatize(); 2527 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2528 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2529 2530 emitCommonSimdLoop( 2531 CGF, S, 2532 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2533 CGF.EmitOMPSimdInit(S); 2534 }, 2535 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2536 CGF.EmitOMPInnerLoop( 2537 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2538 [&S](CodeGenFunction &CGF) { 2539 emitOMPLoopBodyWithStopPoint(CGF, S, 2540 CodeGenFunction::JumpDest()); 2541 }, 2542 [](CodeGenFunction &) {}); 2543 }); 2544 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2545 // Emit final copy of the lastprivate variables at the end of loops. 2546 if (HasLastprivateClause) 2547 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2548 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2549 emitPostUpdateForReductionClause(CGF, S, 2550 [](CodeGenFunction &) { return nullptr; }); 2551 } 2552 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2553 // Emit: if (PreCond) - end. 2554 if (ContBlock) { 2555 CGF.EmitBranch(ContBlock); 2556 CGF.EmitBlock(ContBlock, true); 2557 } 2558 } 2559 2560 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2561 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2562 OMPFirstScanLoop = true; 2563 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2564 emitOMPSimdRegion(CGF, S, Action); 2565 }; 2566 { 2567 auto LPCRegion = 2568 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2569 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2570 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2571 } 2572 // Check for outer lastprivate conditional update. 2573 checkForLastprivateConditionalUpdate(*this, S); 2574 } 2575 2576 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) { 2577 // Emit the de-sugared statement. 2578 OMPTransformDirectiveScopeRAII TileScope(*this, &S); 2579 EmitStmt(S.getTransformedStmt()); 2580 } 2581 2582 void CodeGenFunction::EmitOMPOuterLoop( 2583 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2584 CodeGenFunction::OMPPrivateScope &LoopScope, 2585 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2586 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2587 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2588 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2589 2590 const Expr *IVExpr = S.getIterationVariable(); 2591 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2592 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2593 2594 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2595 2596 // Start the loop with a block that tests the condition. 2597 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2598 EmitBlock(CondBlock); 2599 const SourceRange R = S.getSourceRange(); 2600 OMPLoopNestStack.clear(); 2601 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2602 SourceLocToDebugLoc(R.getEnd())); 2603 2604 llvm::Value *BoolCondVal = nullptr; 2605 if (!DynamicOrOrdered) { 2606 // UB = min(UB, GlobalUB) or 2607 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2608 // 'distribute parallel for') 2609 EmitIgnoredExpr(LoopArgs.EUB); 2610 // IV = LB 2611 EmitIgnoredExpr(LoopArgs.Init); 2612 // IV < UB 2613 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2614 } else { 2615 BoolCondVal = 2616 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2617 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2618 } 2619 2620 // If there are any cleanups between here and the loop-exit scope, 2621 // create a block to stage a loop exit along. 2622 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2623 if (LoopScope.requiresCleanups()) 2624 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2625 2626 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2627 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2628 if (ExitBlock != LoopExit.getBlock()) { 2629 EmitBlock(ExitBlock); 2630 EmitBranchThroughCleanup(LoopExit); 2631 } 2632 EmitBlock(LoopBody); 2633 2634 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2635 // LB for loop condition and emitted it above). 2636 if (DynamicOrOrdered) 2637 EmitIgnoredExpr(LoopArgs.Init); 2638 2639 // Create a block for the increment. 2640 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2641 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2642 2643 emitCommonSimdLoop( 2644 *this, S, 2645 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2646 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2647 // with dynamic/guided scheduling and without ordered clause. 2648 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2649 CGF.LoopStack.setParallel(!IsMonotonic); 2650 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2651 if (C->getKind() == OMPC_ORDER_concurrent) 2652 CGF.LoopStack.setParallel(/*Enable=*/true); 2653 } else { 2654 CGF.EmitOMPSimdInit(S, IsMonotonic); 2655 } 2656 }, 2657 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2658 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2659 SourceLocation Loc = S.getBeginLoc(); 2660 // when 'distribute' is not combined with a 'for': 2661 // while (idx <= UB) { BODY; ++idx; } 2662 // when 'distribute' is combined with a 'for' 2663 // (e.g. 'distribute parallel for') 2664 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2665 CGF.EmitOMPInnerLoop( 2666 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2667 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2668 CodeGenLoop(CGF, S, LoopExit); 2669 }, 2670 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2671 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2672 }); 2673 }); 2674 2675 EmitBlock(Continue.getBlock()); 2676 BreakContinueStack.pop_back(); 2677 if (!DynamicOrOrdered) { 2678 // Emit "LB = LB + Stride", "UB = UB + Stride". 2679 EmitIgnoredExpr(LoopArgs.NextLB); 2680 EmitIgnoredExpr(LoopArgs.NextUB); 2681 } 2682 2683 EmitBranch(CondBlock); 2684 OMPLoopNestStack.clear(); 2685 LoopStack.pop(); 2686 // Emit the fall-through block. 2687 EmitBlock(LoopExit.getBlock()); 2688 2689 // Tell the runtime we are done. 2690 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2691 if (!DynamicOrOrdered) 2692 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2693 S.getDirectiveKind()); 2694 }; 2695 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2696 } 2697 2698 void CodeGenFunction::EmitOMPForOuterLoop( 2699 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2700 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2701 const OMPLoopArguments &LoopArgs, 2702 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2703 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2704 2705 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2706 const bool DynamicOrOrdered = 2707 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2708 2709 assert((Ordered || 2710 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2711 LoopArgs.Chunk != nullptr)) && 2712 "static non-chunked schedule does not need outer loop"); 2713 2714 // Emit outer loop. 2715 // 2716 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2717 // When schedule(dynamic,chunk_size) is specified, the iterations are 2718 // distributed to threads in the team in chunks as the threads request them. 2719 // Each thread executes a chunk of iterations, then requests another chunk, 2720 // until no chunks remain to be distributed. Each chunk contains chunk_size 2721 // iterations, except for the last chunk to be distributed, which may have 2722 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2723 // 2724 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2725 // to threads in the team in chunks as the executing threads request them. 2726 // Each thread executes a chunk of iterations, then requests another chunk, 2727 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2728 // each chunk is proportional to the number of unassigned iterations divided 2729 // by the number of threads in the team, decreasing to 1. For a chunk_size 2730 // with value k (greater than 1), the size of each chunk is determined in the 2731 // same way, with the restriction that the chunks do not contain fewer than k 2732 // iterations (except for the last chunk to be assigned, which may have fewer 2733 // than k iterations). 2734 // 2735 // When schedule(auto) is specified, the decision regarding scheduling is 2736 // delegated to the compiler and/or runtime system. The programmer gives the 2737 // implementation the freedom to choose any possible mapping of iterations to 2738 // threads in the team. 2739 // 2740 // When schedule(runtime) is specified, the decision regarding scheduling is 2741 // deferred until run time, and the schedule and chunk size are taken from the 2742 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2743 // implementation defined 2744 // 2745 // while(__kmpc_dispatch_next(&LB, &UB)) { 2746 // idx = LB; 2747 // while (idx <= UB) { BODY; ++idx; 2748 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2749 // } // inner loop 2750 // } 2751 // 2752 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2753 // When schedule(static, chunk_size) is specified, iterations are divided into 2754 // chunks of size chunk_size, and the chunks are assigned to the threads in 2755 // the team in a round-robin fashion in the order of the thread number. 2756 // 2757 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2758 // while (idx <= UB) { BODY; ++idx; } // inner loop 2759 // LB = LB + ST; 2760 // UB = UB + ST; 2761 // } 2762 // 2763 2764 const Expr *IVExpr = S.getIterationVariable(); 2765 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2766 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2767 2768 if (DynamicOrOrdered) { 2769 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2770 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2771 llvm::Value *LBVal = DispatchBounds.first; 2772 llvm::Value *UBVal = DispatchBounds.second; 2773 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2774 LoopArgs.Chunk}; 2775 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2776 IVSigned, Ordered, DipatchRTInputValues); 2777 } else { 2778 CGOpenMPRuntime::StaticRTInput StaticInit( 2779 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2780 LoopArgs.ST, LoopArgs.Chunk); 2781 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2782 ScheduleKind, StaticInit); 2783 } 2784 2785 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2786 const unsigned IVSize, 2787 const bool IVSigned) { 2788 if (Ordered) { 2789 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2790 IVSigned); 2791 } 2792 }; 2793 2794 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2795 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2796 OuterLoopArgs.IncExpr = S.getInc(); 2797 OuterLoopArgs.Init = S.getInit(); 2798 OuterLoopArgs.Cond = S.getCond(); 2799 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2800 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2801 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2802 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2803 } 2804 2805 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2806 const unsigned IVSize, const bool IVSigned) {} 2807 2808 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2809 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2810 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2811 const CodeGenLoopTy &CodeGenLoopContent) { 2812 2813 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2814 2815 // Emit outer loop. 2816 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2817 // dynamic 2818 // 2819 2820 const Expr *IVExpr = S.getIterationVariable(); 2821 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2822 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2823 2824 CGOpenMPRuntime::StaticRTInput StaticInit( 2825 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2826 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2827 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2828 2829 // for combined 'distribute' and 'for' the increment expression of distribute 2830 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2831 Expr *IncExpr; 2832 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2833 IncExpr = S.getDistInc(); 2834 else 2835 IncExpr = S.getInc(); 2836 2837 // this routine is shared by 'omp distribute parallel for' and 2838 // 'omp distribute': select the right EUB expression depending on the 2839 // directive 2840 OMPLoopArguments OuterLoopArgs; 2841 OuterLoopArgs.LB = LoopArgs.LB; 2842 OuterLoopArgs.UB = LoopArgs.UB; 2843 OuterLoopArgs.ST = LoopArgs.ST; 2844 OuterLoopArgs.IL = LoopArgs.IL; 2845 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2846 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2847 ? S.getCombinedEnsureUpperBound() 2848 : S.getEnsureUpperBound(); 2849 OuterLoopArgs.IncExpr = IncExpr; 2850 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2851 ? S.getCombinedInit() 2852 : S.getInit(); 2853 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2854 ? S.getCombinedCond() 2855 : S.getCond(); 2856 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2857 ? S.getCombinedNextLowerBound() 2858 : S.getNextLowerBound(); 2859 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2860 ? S.getCombinedNextUpperBound() 2861 : S.getNextUpperBound(); 2862 2863 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2864 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2865 emitEmptyOrdered); 2866 } 2867 2868 static std::pair<LValue, LValue> 2869 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2870 const OMPExecutableDirective &S) { 2871 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2872 LValue LB = 2873 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2874 LValue UB = 2875 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2876 2877 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2878 // parallel for') we need to use the 'distribute' 2879 // chunk lower and upper bounds rather than the whole loop iteration 2880 // space. These are parameters to the outlined function for 'parallel' 2881 // and we copy the bounds of the previous schedule into the 2882 // the current ones. 2883 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2884 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2885 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2886 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2887 PrevLBVal = CGF.EmitScalarConversion( 2888 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2889 LS.getIterationVariable()->getType(), 2890 LS.getPrevLowerBoundVariable()->getExprLoc()); 2891 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2892 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2893 PrevUBVal = CGF.EmitScalarConversion( 2894 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2895 LS.getIterationVariable()->getType(), 2896 LS.getPrevUpperBoundVariable()->getExprLoc()); 2897 2898 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2899 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2900 2901 return {LB, UB}; 2902 } 2903 2904 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2905 /// we need to use the LB and UB expressions generated by the worksharing 2906 /// code generation support, whereas in non combined situations we would 2907 /// just emit 0 and the LastIteration expression 2908 /// This function is necessary due to the difference of the LB and UB 2909 /// types for the RT emission routines for 'for_static_init' and 2910 /// 'for_dispatch_init' 2911 static std::pair<llvm::Value *, llvm::Value *> 2912 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2913 const OMPExecutableDirective &S, 2914 Address LB, Address UB) { 2915 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2916 const Expr *IVExpr = LS.getIterationVariable(); 2917 // when implementing a dynamic schedule for a 'for' combined with a 2918 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2919 // is not normalized as each team only executes its own assigned 2920 // distribute chunk 2921 QualType IteratorTy = IVExpr->getType(); 2922 llvm::Value *LBVal = 2923 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2924 llvm::Value *UBVal = 2925 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2926 return {LBVal, UBVal}; 2927 } 2928 2929 static void emitDistributeParallelForDistributeInnerBoundParams( 2930 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2931 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2932 const auto &Dir = cast<OMPLoopDirective>(S); 2933 LValue LB = 2934 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2935 llvm::Value *LBCast = 2936 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2937 CGF.SizeTy, /*isSigned=*/false); 2938 CapturedVars.push_back(LBCast); 2939 LValue UB = 2940 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2941 2942 llvm::Value *UBCast = 2943 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2944 CGF.SizeTy, /*isSigned=*/false); 2945 CapturedVars.push_back(UBCast); 2946 } 2947 2948 static void 2949 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2950 const OMPLoopDirective &S, 2951 CodeGenFunction::JumpDest LoopExit) { 2952 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2953 PrePostActionTy &Action) { 2954 Action.Enter(CGF); 2955 bool HasCancel = false; 2956 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2957 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2958 HasCancel = D->hasCancel(); 2959 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2960 HasCancel = D->hasCancel(); 2961 else if (const auto *D = 2962 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2963 HasCancel = D->hasCancel(); 2964 } 2965 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2966 HasCancel); 2967 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2968 emitDistributeParallelForInnerBounds, 2969 emitDistributeParallelForDispatchBounds); 2970 }; 2971 2972 emitCommonOMPParallelDirective( 2973 CGF, S, 2974 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2975 CGInlinedWorksharingLoop, 2976 emitDistributeParallelForDistributeInnerBoundParams); 2977 } 2978 2979 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2980 const OMPDistributeParallelForDirective &S) { 2981 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2982 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2983 S.getDistInc()); 2984 }; 2985 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2986 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2987 } 2988 2989 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2990 const OMPDistributeParallelForSimdDirective &S) { 2991 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2992 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2993 S.getDistInc()); 2994 }; 2995 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2996 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2997 } 2998 2999 void CodeGenFunction::EmitOMPDistributeSimdDirective( 3000 const OMPDistributeSimdDirective &S) { 3001 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3002 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3003 }; 3004 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3005 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3006 } 3007 3008 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 3009 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 3010 // Emit SPMD target parallel for region as a standalone region. 3011 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3012 emitOMPSimdRegion(CGF, S, Action); 3013 }; 3014 llvm::Function *Fn; 3015 llvm::Constant *Addr; 3016 // Emit target region as a standalone region. 3017 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3018 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3019 assert(Fn && Addr && "Target device function emission failed."); 3020 } 3021 3022 void CodeGenFunction::EmitOMPTargetSimdDirective( 3023 const OMPTargetSimdDirective &S) { 3024 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3025 emitOMPSimdRegion(CGF, S, Action); 3026 }; 3027 emitCommonOMPTargetDirective(*this, S, CodeGen); 3028 } 3029 3030 namespace { 3031 struct ScheduleKindModifiersTy { 3032 OpenMPScheduleClauseKind Kind; 3033 OpenMPScheduleClauseModifier M1; 3034 OpenMPScheduleClauseModifier M2; 3035 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 3036 OpenMPScheduleClauseModifier M1, 3037 OpenMPScheduleClauseModifier M2) 3038 : Kind(Kind), M1(M1), M2(M2) {} 3039 }; 3040 } // namespace 3041 3042 bool CodeGenFunction::EmitOMPWorksharingLoop( 3043 const OMPLoopDirective &S, Expr *EUB, 3044 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 3045 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 3046 // Emit the loop iteration variable. 3047 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3048 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3049 EmitVarDecl(*IVDecl); 3050 3051 // Emit the iterations count variable. 3052 // If it is not a variable, Sema decided to calculate iterations count on each 3053 // iteration (e.g., it is foldable into a constant). 3054 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3055 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3056 // Emit calculation of the iterations count. 3057 EmitIgnoredExpr(S.getCalcLastIteration()); 3058 } 3059 3060 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3061 3062 bool HasLastprivateClause; 3063 // Check pre-condition. 3064 { 3065 OMPLoopScope PreInitScope(*this, S); 3066 // Skip the entire loop if we don't meet the precondition. 3067 // If the condition constant folds and can be elided, avoid emitting the 3068 // whole loop. 3069 bool CondConstant; 3070 llvm::BasicBlock *ContBlock = nullptr; 3071 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3072 if (!CondConstant) 3073 return false; 3074 } else { 3075 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3076 ContBlock = createBasicBlock("omp.precond.end"); 3077 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3078 getProfileCount(&S)); 3079 EmitBlock(ThenBlock); 3080 incrementProfileCounter(&S); 3081 } 3082 3083 RunCleanupsScope DoacrossCleanupScope(*this); 3084 bool Ordered = false; 3085 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 3086 if (OrderedClause->getNumForLoops()) 3087 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 3088 else 3089 Ordered = true; 3090 } 3091 3092 llvm::DenseSet<const Expr *> EmittedFinals; 3093 emitAlignedClause(*this, S); 3094 bool HasLinears = EmitOMPLinearClauseInit(S); 3095 // Emit helper vars inits. 3096 3097 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 3098 LValue LB = Bounds.first; 3099 LValue UB = Bounds.second; 3100 LValue ST = 3101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3102 LValue IL = 3103 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3104 3105 // Emit 'then' code. 3106 { 3107 OMPPrivateScope LoopScope(*this); 3108 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 3109 // Emit implicit barrier to synchronize threads and avoid data races on 3110 // initialization of firstprivate variables and post-update of 3111 // lastprivate variables. 3112 CGM.getOpenMPRuntime().emitBarrierCall( 3113 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3114 /*ForceSimpleCall=*/true); 3115 } 3116 EmitOMPPrivateClause(S, LoopScope); 3117 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 3118 *this, S, EmitLValue(S.getIterationVariable())); 3119 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3120 EmitOMPReductionClauseInit(S, LoopScope); 3121 EmitOMPPrivateLoopCounters(S, LoopScope); 3122 EmitOMPLinearClause(S, LoopScope); 3123 (void)LoopScope.Privatize(); 3124 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3125 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3126 3127 // Detect the loop schedule kind and chunk. 3128 const Expr *ChunkExpr = nullptr; 3129 OpenMPScheduleTy ScheduleKind; 3130 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 3131 ScheduleKind.Schedule = C->getScheduleKind(); 3132 ScheduleKind.M1 = C->getFirstScheduleModifier(); 3133 ScheduleKind.M2 = C->getSecondScheduleModifier(); 3134 ChunkExpr = C->getChunkSize(); 3135 } else { 3136 // Default behaviour for schedule clause. 3137 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 3138 *this, S, ScheduleKind.Schedule, ChunkExpr); 3139 } 3140 bool HasChunkSizeOne = false; 3141 llvm::Value *Chunk = nullptr; 3142 if (ChunkExpr) { 3143 Chunk = EmitScalarExpr(ChunkExpr); 3144 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 3145 S.getIterationVariable()->getType(), 3146 S.getBeginLoc()); 3147 Expr::EvalResult Result; 3148 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 3149 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 3150 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 3151 } 3152 } 3153 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3154 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3155 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 3156 // If the static schedule kind is specified or if the ordered clause is 3157 // specified, and if no monotonic modifier is specified, the effect will 3158 // be as if the monotonic modifier was specified. 3159 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 3160 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 3161 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3162 bool IsMonotonic = 3163 Ordered || 3164 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 3165 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 3166 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 3167 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 3168 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 3169 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 3170 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 3171 /* Chunked */ Chunk != nullptr) || 3172 StaticChunkedOne) && 3173 !Ordered) { 3174 JumpDest LoopExit = 3175 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3176 emitCommonSimdLoop( 3177 *this, S, 3178 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 3179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3180 CGF.EmitOMPSimdInit(S, IsMonotonic); 3181 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 3182 if (C->getKind() == OMPC_ORDER_concurrent) 3183 CGF.LoopStack.setParallel(/*Enable=*/true); 3184 } 3185 }, 3186 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 3187 &S, ScheduleKind, LoopExit, 3188 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 3189 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 3190 // When no chunk_size is specified, the iteration space is divided 3191 // into chunks that are approximately equal in size, and at most 3192 // one chunk is distributed to each thread. Note that the size of 3193 // the chunks is unspecified in this case. 3194 CGOpenMPRuntime::StaticRTInput StaticInit( 3195 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 3196 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 3197 StaticChunkedOne ? Chunk : nullptr); 3198 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3199 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 3200 StaticInit); 3201 // UB = min(UB, GlobalUB); 3202 if (!StaticChunkedOne) 3203 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 3204 // IV = LB; 3205 CGF.EmitIgnoredExpr(S.getInit()); 3206 // For unchunked static schedule generate: 3207 // 3208 // while (idx <= UB) { 3209 // BODY; 3210 // ++idx; 3211 // } 3212 // 3213 // For static schedule with chunk one: 3214 // 3215 // while (IV <= PrevUB) { 3216 // BODY; 3217 // IV += ST; 3218 // } 3219 CGF.EmitOMPInnerLoop( 3220 S, LoopScope.requiresCleanups(), 3221 StaticChunkedOne ? S.getCombinedParForInDistCond() 3222 : S.getCond(), 3223 StaticChunkedOne ? S.getDistInc() : S.getInc(), 3224 [&S, LoopExit](CodeGenFunction &CGF) { 3225 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 3226 }, 3227 [](CodeGenFunction &) {}); 3228 }); 3229 EmitBlock(LoopExit.getBlock()); 3230 // Tell the runtime we are done. 3231 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3232 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3233 S.getDirectiveKind()); 3234 }; 3235 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 3236 } else { 3237 // Emit the outer loop, which requests its work chunk [LB..UB] from 3238 // runtime and runs the inner loop to process it. 3239 const OMPLoopArguments LoopArguments( 3240 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3241 IL.getAddress(*this), Chunk, EUB); 3242 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 3243 LoopArguments, CGDispatchBounds); 3244 } 3245 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3246 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3247 return CGF.Builder.CreateIsNotNull( 3248 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3249 }); 3250 } 3251 EmitOMPReductionClauseFinal( 3252 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 3253 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 3254 : /*Parallel only*/ OMPD_parallel); 3255 // Emit post-update of the reduction variables if IsLastIter != 0. 3256 emitPostUpdateForReductionClause( 3257 *this, S, [IL, &S](CodeGenFunction &CGF) { 3258 return CGF.Builder.CreateIsNotNull( 3259 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3260 }); 3261 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3262 if (HasLastprivateClause) 3263 EmitOMPLastprivateClauseFinal( 3264 S, isOpenMPSimdDirective(S.getDirectiveKind()), 3265 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3266 } 3267 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 3268 return CGF.Builder.CreateIsNotNull( 3269 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3270 }); 3271 DoacrossCleanupScope.ForceCleanup(); 3272 // We're now done with the loop, so jump to the continuation block. 3273 if (ContBlock) { 3274 EmitBranch(ContBlock); 3275 EmitBlock(ContBlock, /*IsFinished=*/true); 3276 } 3277 } 3278 return HasLastprivateClause; 3279 } 3280 3281 /// The following two functions generate expressions for the loop lower 3282 /// and upper bounds in case of static and dynamic (dispatch) schedule 3283 /// of the associated 'for' or 'distribute' loop. 3284 static std::pair<LValue, LValue> 3285 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3286 const auto &LS = cast<OMPLoopDirective>(S); 3287 LValue LB = 3288 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3289 LValue UB = 3290 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3291 return {LB, UB}; 3292 } 3293 3294 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3295 /// consider the lower and upper bound expressions generated by the 3296 /// worksharing loop support, but we use 0 and the iteration space size as 3297 /// constants 3298 static std::pair<llvm::Value *, llvm::Value *> 3299 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3300 Address LB, Address UB) { 3301 const auto &LS = cast<OMPLoopDirective>(S); 3302 const Expr *IVExpr = LS.getIterationVariable(); 3303 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3304 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3305 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3306 return {LBVal, UBVal}; 3307 } 3308 3309 /// Emits internal temp array declarations for the directive with inscan 3310 /// reductions. 3311 /// The code is the following: 3312 /// \code 3313 /// size num_iters = <num_iters>; 3314 /// <type> buffer[num_iters]; 3315 /// \endcode 3316 static void emitScanBasedDirectiveDecls( 3317 CodeGenFunction &CGF, const OMPLoopDirective &S, 3318 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) { 3319 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3320 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3321 SmallVector<const Expr *, 4> Shareds; 3322 SmallVector<const Expr *, 4> Privates; 3323 SmallVector<const Expr *, 4> ReductionOps; 3324 SmallVector<const Expr *, 4> CopyArrayTemps; 3325 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3326 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3327 "Only inscan reductions are expected."); 3328 Shareds.append(C->varlist_begin(), C->varlist_end()); 3329 Privates.append(C->privates().begin(), C->privates().end()); 3330 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3331 CopyArrayTemps.append(C->copy_array_temps().begin(), 3332 C->copy_array_temps().end()); 3333 } 3334 { 3335 // Emit buffers for each reduction variables. 3336 // ReductionCodeGen is required to emit correctly the code for array 3337 // reductions. 3338 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3339 unsigned Count = 0; 3340 auto *ITA = CopyArrayTemps.begin(); 3341 for (const Expr *IRef : Privates) { 3342 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3343 // Emit variably modified arrays, used for arrays/array sections 3344 // reductions. 3345 if (PrivateVD->getType()->isVariablyModifiedType()) { 3346 RedCG.emitSharedOrigLValue(CGF, Count); 3347 RedCG.emitAggregateType(CGF, Count); 3348 } 3349 CodeGenFunction::OpaqueValueMapping DimMapping( 3350 CGF, 3351 cast<OpaqueValueExpr>( 3352 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3353 ->getSizeExpr()), 3354 RValue::get(OMPScanNumIterations)); 3355 // Emit temp buffer. 3356 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3357 ++ITA; 3358 ++Count; 3359 } 3360 } 3361 } 3362 3363 /// Emits the code for the directive with inscan reductions. 3364 /// The code is the following: 3365 /// \code 3366 /// #pragma omp ... 3367 /// for (i: 0..<num_iters>) { 3368 /// <input phase>; 3369 /// buffer[i] = red; 3370 /// } 3371 /// #pragma omp master // in parallel region 3372 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3373 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3374 /// buffer[i] op= buffer[i-pow(2,k)]; 3375 /// #pragma omp barrier // in parallel region 3376 /// #pragma omp ... 3377 /// for (0..<num_iters>) { 3378 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3379 /// <scan phase>; 3380 /// } 3381 /// \endcode 3382 static void emitScanBasedDirective( 3383 CodeGenFunction &CGF, const OMPLoopDirective &S, 3384 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3385 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3386 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3387 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3388 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3389 SmallVector<const Expr *, 4> Privates; 3390 SmallVector<const Expr *, 4> ReductionOps; 3391 SmallVector<const Expr *, 4> LHSs; 3392 SmallVector<const Expr *, 4> RHSs; 3393 SmallVector<const Expr *, 4> CopyArrayElems; 3394 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3395 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3396 "Only inscan reductions are expected."); 3397 Privates.append(C->privates().begin(), C->privates().end()); 3398 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3399 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3400 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3401 CopyArrayElems.append(C->copy_array_elems().begin(), 3402 C->copy_array_elems().end()); 3403 } 3404 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3405 { 3406 // Emit loop with input phase: 3407 // #pragma omp ... 3408 // for (i: 0..<num_iters>) { 3409 // <input phase>; 3410 // buffer[i] = red; 3411 // } 3412 CGF.OMPFirstScanLoop = true; 3413 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3414 FirstGen(CGF); 3415 } 3416 // #pragma omp barrier // in parallel region 3417 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems, 3418 &ReductionOps, 3419 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) { 3420 Action.Enter(CGF); 3421 // Emit prefix reduction: 3422 // #pragma omp master // in parallel region 3423 // for (int k = 0; k <= ceil(log2(n)); ++k) 3424 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3425 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3426 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3427 llvm::Function *F = 3428 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3429 llvm::Value *Arg = 3430 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3431 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3432 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3433 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3434 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3435 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3436 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3437 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3438 CGF.EmitBlock(LoopBB); 3439 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3440 // size pow2k = 1; 3441 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3442 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3443 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3444 // for (size i = n - 1; i >= 2 ^ k; --i) 3445 // tmp[i] op= tmp[i-pow2k]; 3446 llvm::BasicBlock *InnerLoopBB = 3447 CGF.createBasicBlock("omp.inner.log.scan.body"); 3448 llvm::BasicBlock *InnerExitBB = 3449 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3450 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3451 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3452 CGF.EmitBlock(InnerLoopBB); 3453 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3454 IVal->addIncoming(NMin1, LoopBB); 3455 { 3456 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3457 auto *ILHS = LHSs.begin(); 3458 auto *IRHS = RHSs.begin(); 3459 for (const Expr *CopyArrayElem : CopyArrayElems) { 3460 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3461 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3462 Address LHSAddr = Address::invalid(); 3463 { 3464 CodeGenFunction::OpaqueValueMapping IdxMapping( 3465 CGF, 3466 cast<OpaqueValueExpr>( 3467 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3468 RValue::get(IVal)); 3469 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3470 } 3471 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3472 Address RHSAddr = Address::invalid(); 3473 { 3474 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3475 CodeGenFunction::OpaqueValueMapping IdxMapping( 3476 CGF, 3477 cast<OpaqueValueExpr>( 3478 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3479 RValue::get(OffsetIVal)); 3480 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3481 } 3482 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3483 ++ILHS; 3484 ++IRHS; 3485 } 3486 PrivScope.Privatize(); 3487 CGF.CGM.getOpenMPRuntime().emitReduction( 3488 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3489 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3490 } 3491 llvm::Value *NextIVal = 3492 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3493 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3494 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3495 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3496 CGF.EmitBlock(InnerExitBB); 3497 llvm::Value *Next = 3498 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3499 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3500 // pow2k <<= 1; 3501 llvm::Value *NextPow2K = 3502 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3503 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3504 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3505 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3506 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3507 CGF.EmitBlock(ExitBB); 3508 }; 3509 if (isOpenMPParallelDirective(S.getDirectiveKind())) { 3510 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3511 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3512 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3513 /*ForceSimpleCall=*/true); 3514 } else { 3515 RegionCodeGenTy RCG(CodeGen); 3516 RCG(CGF); 3517 } 3518 3519 CGF.OMPFirstScanLoop = false; 3520 SecondGen(CGF); 3521 } 3522 3523 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3524 const OMPLoopDirective &S, 3525 bool HasCancel) { 3526 bool HasLastprivates; 3527 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3528 [](const OMPReductionClause *C) { 3529 return C->getModifier() == OMPC_REDUCTION_inscan; 3530 })) { 3531 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3532 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3533 OMPLoopScope LoopScope(CGF, S); 3534 return CGF.EmitScalarExpr(S.getNumIterations()); 3535 }; 3536 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3537 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3538 CGF, S.getDirectiveKind(), HasCancel); 3539 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3540 emitForLoopBounds, 3541 emitDispatchForLoopBounds); 3542 // Emit an implicit barrier at the end. 3543 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3544 OMPD_for); 3545 }; 3546 const auto &&SecondGen = [&S, HasCancel, 3547 &HasLastprivates](CodeGenFunction &CGF) { 3548 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3549 CGF, S.getDirectiveKind(), HasCancel); 3550 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3551 emitForLoopBounds, 3552 emitDispatchForLoopBounds); 3553 }; 3554 if (!isOpenMPParallelDirective(S.getDirectiveKind())) 3555 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen); 3556 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3557 } else { 3558 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3559 HasCancel); 3560 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3561 emitForLoopBounds, 3562 emitDispatchForLoopBounds); 3563 } 3564 return HasLastprivates; 3565 } 3566 3567 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) { 3568 if (S.hasCancel()) 3569 return false; 3570 for (OMPClause *C : S.clauses()) 3571 if (!isa<OMPNowaitClause>(C)) 3572 return false; 3573 3574 return true; 3575 } 3576 3577 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3578 bool HasLastprivates = false; 3579 bool UseOMPIRBuilder = 3580 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 3581 auto &&CodeGen = [this, &S, &HasLastprivates, 3582 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) { 3583 // Use the OpenMPIRBuilder if enabled. 3584 if (UseOMPIRBuilder) { 3585 // Emit the associated statement and get its loop representation. 3586 const Stmt *Inner = S.getRawStmt(); 3587 llvm::CanonicalLoopInfo *CLI = 3588 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 3589 3590 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>(); 3591 llvm::OpenMPIRBuilder &OMPBuilder = 3592 CGM.getOpenMPRuntime().getOMPBuilder(); 3593 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3594 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3595 OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier); 3596 return; 3597 } 3598 3599 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3600 }; 3601 { 3602 auto LPCRegion = 3603 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3604 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3605 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3606 S.hasCancel()); 3607 } 3608 3609 if (!UseOMPIRBuilder) { 3610 // Emit an implicit barrier at the end. 3611 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3612 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3613 } 3614 // Check for outer lastprivate conditional update. 3615 checkForLastprivateConditionalUpdate(*this, S); 3616 } 3617 3618 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3619 bool HasLastprivates = false; 3620 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3621 PrePostActionTy &) { 3622 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3623 }; 3624 { 3625 auto LPCRegion = 3626 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3627 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3628 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3629 } 3630 3631 // Emit an implicit barrier at the end. 3632 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3633 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3634 // Check for outer lastprivate conditional update. 3635 checkForLastprivateConditionalUpdate(*this, S); 3636 } 3637 3638 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3639 const Twine &Name, 3640 llvm::Value *Init = nullptr) { 3641 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3642 if (Init) 3643 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3644 return LVal; 3645 } 3646 3647 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3648 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3649 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3650 bool HasLastprivates = false; 3651 auto &&CodeGen = [&S, CapturedStmt, CS, 3652 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3653 const ASTContext &C = CGF.getContext(); 3654 QualType KmpInt32Ty = 3655 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3656 // Emit helper vars inits. 3657 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3658 CGF.Builder.getInt32(0)); 3659 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3660 ? CGF.Builder.getInt32(CS->size() - 1) 3661 : CGF.Builder.getInt32(0); 3662 LValue UB = 3663 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3664 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3665 CGF.Builder.getInt32(1)); 3666 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3667 CGF.Builder.getInt32(0)); 3668 // Loop counter. 3669 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3670 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3671 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3672 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3673 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3674 // Generate condition for loop. 3675 BinaryOperator *Cond = BinaryOperator::Create( 3676 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary, 3677 S.getBeginLoc(), FPOptionsOverride()); 3678 // Increment for loop counter. 3679 UnaryOperator *Inc = UnaryOperator::Create( 3680 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary, 3681 S.getBeginLoc(), true, FPOptionsOverride()); 3682 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3683 // Iterate through all sections and emit a switch construct: 3684 // switch (IV) { 3685 // case 0: 3686 // <SectionStmt[0]>; 3687 // break; 3688 // ... 3689 // case <NumSection> - 1: 3690 // <SectionStmt[<NumSection> - 1]>; 3691 // break; 3692 // } 3693 // .omp.sections.exit: 3694 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3695 llvm::SwitchInst *SwitchStmt = 3696 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3697 ExitBB, CS == nullptr ? 1 : CS->size()); 3698 if (CS) { 3699 unsigned CaseNumber = 0; 3700 for (const Stmt *SubStmt : CS->children()) { 3701 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3702 CGF.EmitBlock(CaseBB); 3703 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3704 CGF.EmitStmt(SubStmt); 3705 CGF.EmitBranch(ExitBB); 3706 ++CaseNumber; 3707 } 3708 } else { 3709 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3710 CGF.EmitBlock(CaseBB); 3711 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3712 CGF.EmitStmt(CapturedStmt); 3713 CGF.EmitBranch(ExitBB); 3714 } 3715 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3716 }; 3717 3718 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3719 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3720 // Emit implicit barrier to synchronize threads and avoid data races on 3721 // initialization of firstprivate variables and post-update of lastprivate 3722 // variables. 3723 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3724 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3725 /*ForceSimpleCall=*/true); 3726 } 3727 CGF.EmitOMPPrivateClause(S, LoopScope); 3728 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3729 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3730 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3731 (void)LoopScope.Privatize(); 3732 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3733 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3734 3735 // Emit static non-chunked loop. 3736 OpenMPScheduleTy ScheduleKind; 3737 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3738 CGOpenMPRuntime::StaticRTInput StaticInit( 3739 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3740 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3741 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3742 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3743 // UB = min(UB, GlobalUB); 3744 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3745 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3746 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3747 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3748 // IV = LB; 3749 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3750 // while (idx <= UB) { BODY; ++idx; } 3751 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3752 [](CodeGenFunction &) {}); 3753 // Tell the runtime we are done. 3754 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3755 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3756 S.getDirectiveKind()); 3757 }; 3758 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3759 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3760 // Emit post-update of the reduction variables if IsLastIter != 0. 3761 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3762 return CGF.Builder.CreateIsNotNull( 3763 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3764 }); 3765 3766 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3767 if (HasLastprivates) 3768 CGF.EmitOMPLastprivateClauseFinal( 3769 S, /*NoFinals=*/false, 3770 CGF.Builder.CreateIsNotNull( 3771 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3772 }; 3773 3774 bool HasCancel = false; 3775 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3776 HasCancel = OSD->hasCancel(); 3777 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3778 HasCancel = OPSD->hasCancel(); 3779 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3780 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3781 HasCancel); 3782 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3783 // clause. Otherwise the barrier will be generated by the codegen for the 3784 // directive. 3785 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3786 // Emit implicit barrier to synchronize threads and avoid data races on 3787 // initialization of firstprivate variables. 3788 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3789 OMPD_unknown); 3790 } 3791 } 3792 3793 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3794 if (CGM.getLangOpts().OpenMPIRBuilder) { 3795 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3796 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3797 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy; 3798 3799 auto FiniCB = [this](InsertPointTy IP) { 3800 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3801 }; 3802 3803 const CapturedStmt *ICS = S.getInnermostCapturedStmt(); 3804 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3805 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3806 llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector; 3807 if (CS) { 3808 for (const Stmt *SubStmt : CS->children()) { 3809 auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP, 3810 InsertPointTy CodeGenIP, 3811 llvm::BasicBlock &FiniBB) { 3812 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, 3813 FiniBB); 3814 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP, 3815 FiniBB); 3816 }; 3817 SectionCBVector.push_back(SectionCB); 3818 } 3819 } else { 3820 auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP, 3821 InsertPointTy CodeGenIP, 3822 llvm::BasicBlock &FiniBB) { 3823 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3824 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP, 3825 FiniBB); 3826 }; 3827 SectionCBVector.push_back(SectionCB); 3828 } 3829 3830 // Privatization callback that performs appropriate action for 3831 // shared/private/firstprivate/lastprivate/copyin/... variables. 3832 // 3833 // TODO: This defaults to shared right now. 3834 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 3835 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 3836 // The next line is appropriate only for variables (Val) with the 3837 // data-sharing attribute "shared". 3838 ReplVal = &Val; 3839 3840 return CodeGenIP; 3841 }; 3842 3843 CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP); 3844 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3845 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3846 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3847 Builder.restoreIP(OMPBuilder.createSections( 3848 Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(), 3849 S.getSingleClause<OMPNowaitClause>())); 3850 return; 3851 } 3852 { 3853 auto LPCRegion = 3854 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3855 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3856 EmitSections(S); 3857 } 3858 // Emit an implicit barrier at the end. 3859 if (!S.getSingleClause<OMPNowaitClause>()) { 3860 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3861 OMPD_sections); 3862 } 3863 // Check for outer lastprivate conditional update. 3864 checkForLastprivateConditionalUpdate(*this, S); 3865 } 3866 3867 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3868 if (CGM.getLangOpts().OpenMPIRBuilder) { 3869 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3870 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3871 3872 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt(); 3873 auto FiniCB = [this](InsertPointTy IP) { 3874 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3875 }; 3876 3877 auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP, 3878 InsertPointTy CodeGenIP, 3879 llvm::BasicBlock &FiniBB) { 3880 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3881 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt, 3882 CodeGenIP, FiniBB); 3883 }; 3884 3885 LexicalScope Scope(*this, S.getSourceRange()); 3886 EmitStopPoint(&S); 3887 Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB)); 3888 3889 return; 3890 } 3891 LexicalScope Scope(*this, S.getSourceRange()); 3892 EmitStopPoint(&S); 3893 EmitStmt(S.getAssociatedStmt()); 3894 } 3895 3896 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3897 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3898 llvm::SmallVector<const Expr *, 8> DestExprs; 3899 llvm::SmallVector<const Expr *, 8> SrcExprs; 3900 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3901 // Check if there are any 'copyprivate' clauses associated with this 3902 // 'single' construct. 3903 // Build a list of copyprivate variables along with helper expressions 3904 // (<source>, <destination>, <destination>=<source> expressions) 3905 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3906 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3907 DestExprs.append(C->destination_exprs().begin(), 3908 C->destination_exprs().end()); 3909 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3910 AssignmentOps.append(C->assignment_ops().begin(), 3911 C->assignment_ops().end()); 3912 } 3913 // Emit code for 'single' region along with 'copyprivate' clauses 3914 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3915 Action.Enter(CGF); 3916 OMPPrivateScope SingleScope(CGF); 3917 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3918 CGF.EmitOMPPrivateClause(S, SingleScope); 3919 (void)SingleScope.Privatize(); 3920 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3921 }; 3922 { 3923 auto LPCRegion = 3924 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3925 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3926 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3927 CopyprivateVars, DestExprs, 3928 SrcExprs, AssignmentOps); 3929 } 3930 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3931 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3932 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3933 CGM.getOpenMPRuntime().emitBarrierCall( 3934 *this, S.getBeginLoc(), 3935 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3936 } 3937 // Check for outer lastprivate conditional update. 3938 checkForLastprivateConditionalUpdate(*this, S); 3939 } 3940 3941 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3942 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3943 Action.Enter(CGF); 3944 CGF.EmitStmt(S.getRawStmt()); 3945 }; 3946 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3947 } 3948 3949 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3950 if (CGM.getLangOpts().OpenMPIRBuilder) { 3951 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3952 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3953 3954 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt(); 3955 3956 auto FiniCB = [this](InsertPointTy IP) { 3957 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3958 }; 3959 3960 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3961 InsertPointTy CodeGenIP, 3962 llvm::BasicBlock &FiniBB) { 3963 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3964 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3965 CodeGenIP, FiniBB); 3966 }; 3967 3968 LexicalScope Scope(*this, S.getSourceRange()); 3969 EmitStopPoint(&S); 3970 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB)); 3971 3972 return; 3973 } 3974 LexicalScope Scope(*this, S.getSourceRange()); 3975 EmitStopPoint(&S); 3976 emitMaster(*this, S); 3977 } 3978 3979 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3980 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3981 Action.Enter(CGF); 3982 CGF.EmitStmt(S.getRawStmt()); 3983 }; 3984 Expr *Filter = nullptr; 3985 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3986 Filter = FilterClause->getThreadID(); 3987 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(), 3988 Filter); 3989 } 3990 3991 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) { 3992 if (CGM.getLangOpts().OpenMPIRBuilder) { 3993 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3994 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3995 3996 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt(); 3997 const Expr *Filter = nullptr; 3998 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3999 Filter = FilterClause->getThreadID(); 4000 llvm::Value *FilterVal = Filter 4001 ? EmitScalarExpr(Filter, CGM.Int32Ty) 4002 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); 4003 4004 auto FiniCB = [this](InsertPointTy IP) { 4005 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4006 }; 4007 4008 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP, 4009 InsertPointTy CodeGenIP, 4010 llvm::BasicBlock &FiniBB) { 4011 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4012 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt, 4013 CodeGenIP, FiniBB); 4014 }; 4015 4016 LexicalScope Scope(*this, S.getSourceRange()); 4017 EmitStopPoint(&S); 4018 Builder.restoreIP( 4019 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal)); 4020 4021 return; 4022 } 4023 LexicalScope Scope(*this, S.getSourceRange()); 4024 EmitStopPoint(&S); 4025 emitMasked(*this, S); 4026 } 4027 4028 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 4029 if (CGM.getLangOpts().OpenMPIRBuilder) { 4030 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4031 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4032 4033 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt(); 4034 const Expr *Hint = nullptr; 4035 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4036 Hint = HintClause->getHint(); 4037 4038 // TODO: This is slightly different from what's currently being done in 4039 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 4040 // about typing is final. 4041 llvm::Value *HintInst = nullptr; 4042 if (Hint) 4043 HintInst = 4044 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 4045 4046 auto FiniCB = [this](InsertPointTy IP) { 4047 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4048 }; 4049 4050 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 4051 InsertPointTy CodeGenIP, 4052 llvm::BasicBlock &FiniBB) { 4053 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4054 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 4055 CodeGenIP, FiniBB); 4056 }; 4057 4058 LexicalScope Scope(*this, S.getSourceRange()); 4059 EmitStopPoint(&S); 4060 Builder.restoreIP(OMPBuilder.createCritical( 4061 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 4062 HintInst)); 4063 4064 return; 4065 } 4066 4067 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4068 Action.Enter(CGF); 4069 CGF.EmitStmt(S.getAssociatedStmt()); 4070 }; 4071 const Expr *Hint = nullptr; 4072 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4073 Hint = HintClause->getHint(); 4074 LexicalScope Scope(*this, S.getSourceRange()); 4075 EmitStopPoint(&S); 4076 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 4077 S.getDirectiveName().getAsString(), 4078 CodeGen, S.getBeginLoc(), Hint); 4079 } 4080 4081 void CodeGenFunction::EmitOMPParallelForDirective( 4082 const OMPParallelForDirective &S) { 4083 // Emit directive as a combined directive that consists of two implicit 4084 // directives: 'parallel' with 'for' directive. 4085 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4086 Action.Enter(CGF); 4087 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 4088 }; 4089 { 4090 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4091 [](const OMPReductionClause *C) { 4092 return C->getModifier() == OMPC_REDUCTION_inscan; 4093 })) { 4094 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4095 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4096 CGCapturedStmtInfo CGSI(CR_OpenMP); 4097 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4098 OMPLoopScope LoopScope(CGF, S); 4099 return CGF.EmitScalarExpr(S.getNumIterations()); 4100 }; 4101 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4102 } 4103 auto LPCRegion = 4104 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4105 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 4106 emitEmptyBoundParameters); 4107 } 4108 // Check for outer lastprivate conditional update. 4109 checkForLastprivateConditionalUpdate(*this, S); 4110 } 4111 4112 void CodeGenFunction::EmitOMPParallelForSimdDirective( 4113 const OMPParallelForSimdDirective &S) { 4114 // Emit directive as a combined directive that consists of two implicit 4115 // directives: 'parallel' with 'for' directive. 4116 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4117 Action.Enter(CGF); 4118 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 4119 }; 4120 { 4121 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4122 [](const OMPReductionClause *C) { 4123 return C->getModifier() == OMPC_REDUCTION_inscan; 4124 })) { 4125 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4126 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4127 CGCapturedStmtInfo CGSI(CR_OpenMP); 4128 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4129 OMPLoopScope LoopScope(CGF, S); 4130 return CGF.EmitScalarExpr(S.getNumIterations()); 4131 }; 4132 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4133 } 4134 auto LPCRegion = 4135 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4136 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 4137 emitEmptyBoundParameters); 4138 } 4139 // Check for outer lastprivate conditional update. 4140 checkForLastprivateConditionalUpdate(*this, S); 4141 } 4142 4143 void CodeGenFunction::EmitOMPParallelMasterDirective( 4144 const OMPParallelMasterDirective &S) { 4145 // Emit directive as a combined directive that consists of two implicit 4146 // directives: 'parallel' with 'master' directive. 4147 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4148 Action.Enter(CGF); 4149 OMPPrivateScope PrivateScope(CGF); 4150 bool Copyins = CGF.EmitOMPCopyinClause(S); 4151 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4152 if (Copyins) { 4153 // Emit implicit barrier to synchronize threads and avoid data races on 4154 // propagation master's thread values of threadprivate variables to local 4155 // instances of that variables of all other implicit threads. 4156 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 4157 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4158 /*ForceSimpleCall=*/true); 4159 } 4160 CGF.EmitOMPPrivateClause(S, PrivateScope); 4161 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4162 (void)PrivateScope.Privatize(); 4163 emitMaster(CGF, S); 4164 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4165 }; 4166 { 4167 auto LPCRegion = 4168 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4169 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 4170 emitEmptyBoundParameters); 4171 emitPostUpdateForReductionClause(*this, S, 4172 [](CodeGenFunction &) { return nullptr; }); 4173 } 4174 // Check for outer lastprivate conditional update. 4175 checkForLastprivateConditionalUpdate(*this, S); 4176 } 4177 4178 void CodeGenFunction::EmitOMPParallelSectionsDirective( 4179 const OMPParallelSectionsDirective &S) { 4180 // Emit directive as a combined directive that consists of two implicit 4181 // directives: 'parallel' with 'sections' directive. 4182 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4183 Action.Enter(CGF); 4184 CGF.EmitSections(S); 4185 }; 4186 { 4187 auto LPCRegion = 4188 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4189 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 4190 emitEmptyBoundParameters); 4191 } 4192 // Check for outer lastprivate conditional update. 4193 checkForLastprivateConditionalUpdate(*this, S); 4194 } 4195 4196 namespace { 4197 /// Get the list of variables declared in the context of the untied tasks. 4198 class CheckVarsEscapingUntiedTaskDeclContext final 4199 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> { 4200 llvm::SmallVector<const VarDecl *, 4> PrivateDecls; 4201 4202 public: 4203 explicit CheckVarsEscapingUntiedTaskDeclContext() = default; 4204 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default; 4205 void VisitDeclStmt(const DeclStmt *S) { 4206 if (!S) 4207 return; 4208 // Need to privatize only local vars, static locals can be processed as is. 4209 for (const Decl *D : S->decls()) { 4210 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) 4211 if (VD->hasLocalStorage()) 4212 PrivateDecls.push_back(VD); 4213 } 4214 } 4215 void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; } 4216 void VisitCapturedStmt(const CapturedStmt *) { return; } 4217 void VisitLambdaExpr(const LambdaExpr *) { return; } 4218 void VisitBlockExpr(const BlockExpr *) { return; } 4219 void VisitStmt(const Stmt *S) { 4220 if (!S) 4221 return; 4222 for (const Stmt *Child : S->children()) 4223 if (Child) 4224 Visit(Child); 4225 } 4226 4227 /// Swaps list of vars with the provided one. 4228 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; } 4229 }; 4230 } // anonymous namespace 4231 4232 void CodeGenFunction::EmitOMPTaskBasedDirective( 4233 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 4234 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 4235 OMPTaskDataTy &Data) { 4236 // Emit outlined function for task construct. 4237 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 4238 auto I = CS->getCapturedDecl()->param_begin(); 4239 auto PartId = std::next(I); 4240 auto TaskT = std::next(I, 4); 4241 // Check if the task is final 4242 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 4243 // If the condition constant folds and can be elided, try to avoid emitting 4244 // the condition and the dead arm of the if/else. 4245 const Expr *Cond = Clause->getCondition(); 4246 bool CondConstant; 4247 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 4248 Data.Final.setInt(CondConstant); 4249 else 4250 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 4251 } else { 4252 // By default the task is not final. 4253 Data.Final.setInt(/*IntVal=*/false); 4254 } 4255 // Check if the task has 'priority' clause. 4256 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 4257 const Expr *Prio = Clause->getPriority(); 4258 Data.Priority.setInt(/*IntVal=*/true); 4259 Data.Priority.setPointer(EmitScalarConversion( 4260 EmitScalarExpr(Prio), Prio->getType(), 4261 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 4262 Prio->getExprLoc())); 4263 } 4264 // The first function argument for tasks is a thread id, the second one is a 4265 // part id (0 for tied tasks, >=0 for untied task). 4266 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 4267 // Get list of private variables. 4268 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 4269 auto IRef = C->varlist_begin(); 4270 for (const Expr *IInit : C->private_copies()) { 4271 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4272 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4273 Data.PrivateVars.push_back(*IRef); 4274 Data.PrivateCopies.push_back(IInit); 4275 } 4276 ++IRef; 4277 } 4278 } 4279 EmittedAsPrivate.clear(); 4280 // Get list of firstprivate variables. 4281 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4282 auto IRef = C->varlist_begin(); 4283 auto IElemInitRef = C->inits().begin(); 4284 for (const Expr *IInit : C->private_copies()) { 4285 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4286 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4287 Data.FirstprivateVars.push_back(*IRef); 4288 Data.FirstprivateCopies.push_back(IInit); 4289 Data.FirstprivateInits.push_back(*IElemInitRef); 4290 } 4291 ++IRef; 4292 ++IElemInitRef; 4293 } 4294 } 4295 // Get list of lastprivate variables (for taskloops). 4296 llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 4297 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 4298 auto IRef = C->varlist_begin(); 4299 auto ID = C->destination_exprs().begin(); 4300 for (const Expr *IInit : C->private_copies()) { 4301 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4302 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4303 Data.LastprivateVars.push_back(*IRef); 4304 Data.LastprivateCopies.push_back(IInit); 4305 } 4306 LastprivateDstsOrigs.insert( 4307 std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 4308 cast<DeclRefExpr>(*IRef))); 4309 ++IRef; 4310 ++ID; 4311 } 4312 } 4313 SmallVector<const Expr *, 4> LHSs; 4314 SmallVector<const Expr *, 4> RHSs; 4315 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 4316 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4317 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4318 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4319 Data.ReductionOps.append(C->reduction_ops().begin(), 4320 C->reduction_ops().end()); 4321 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4322 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4323 } 4324 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 4325 *this, S.getBeginLoc(), LHSs, RHSs, Data); 4326 // Build list of dependences. 4327 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4328 OMPTaskDataTy::DependData &DD = 4329 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4330 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4331 } 4332 // Get list of local vars for untied tasks. 4333 if (!Data.Tied) { 4334 CheckVarsEscapingUntiedTaskDeclContext Checker; 4335 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt()); 4336 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(), 4337 Checker.getPrivateDecls().end()); 4338 } 4339 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 4340 CapturedRegion](CodeGenFunction &CGF, 4341 PrePostActionTy &Action) { 4342 llvm::MapVector<CanonicalDeclPtr<const VarDecl>, 4343 std::pair<Address, Address>> 4344 UntiedLocalVars; 4345 // Set proper addresses for generated private copies. 4346 OMPPrivateScope Scope(CGF); 4347 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 4348 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 4349 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) { 4350 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4351 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4352 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4353 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4354 CS->getCapturedDecl()->getParam(PrivatesParam))); 4355 // Map privates. 4356 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4357 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4358 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4359 CallArgs.push_back(PrivatesPtr); 4360 ParamTypes.push_back(PrivatesPtr->getType()); 4361 for (const Expr *E : Data.PrivateVars) { 4362 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4363 Address PrivatePtr = CGF.CreateMemTemp( 4364 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 4365 PrivatePtrs.emplace_back(VD, PrivatePtr); 4366 CallArgs.push_back(PrivatePtr.getPointer()); 4367 ParamTypes.push_back(PrivatePtr.getType()); 4368 } 4369 for (const Expr *E : Data.FirstprivateVars) { 4370 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4371 Address PrivatePtr = 4372 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4373 ".firstpriv.ptr.addr"); 4374 PrivatePtrs.emplace_back(VD, PrivatePtr); 4375 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 4376 CallArgs.push_back(PrivatePtr.getPointer()); 4377 ParamTypes.push_back(PrivatePtr.getType()); 4378 } 4379 for (const Expr *E : Data.LastprivateVars) { 4380 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4381 Address PrivatePtr = 4382 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4383 ".lastpriv.ptr.addr"); 4384 PrivatePtrs.emplace_back(VD, PrivatePtr); 4385 CallArgs.push_back(PrivatePtr.getPointer()); 4386 ParamTypes.push_back(PrivatePtr.getType()); 4387 } 4388 for (const VarDecl *VD : Data.PrivateLocals) { 4389 QualType Ty = VD->getType().getNonReferenceType(); 4390 if (VD->getType()->isLValueReferenceType()) 4391 Ty = CGF.getContext().getPointerType(Ty); 4392 if (isAllocatableDecl(VD)) 4393 Ty = CGF.getContext().getPointerType(Ty); 4394 Address PrivatePtr = CGF.CreateMemTemp( 4395 CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); 4396 auto Result = UntiedLocalVars.insert( 4397 std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid()))); 4398 // If key exists update in place. 4399 if (Result.second == false) 4400 *Result.first = std::make_pair( 4401 VD, std::make_pair(PrivatePtr, Address::invalid())); 4402 CallArgs.push_back(PrivatePtr.getPointer()); 4403 ParamTypes.push_back(PrivatePtr.getType()); 4404 } 4405 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4406 ParamTypes, /*isVarArg=*/false); 4407 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4408 CopyFn, CopyFnTy->getPointerTo()); 4409 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4410 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4411 for (const auto &Pair : LastprivateDstsOrigs) { 4412 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 4413 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 4414 /*RefersToEnclosingVariableOrCapture=*/ 4415 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 4416 Pair.second->getType(), VK_LValue, 4417 Pair.second->getExprLoc()); 4418 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 4419 return CGF.EmitLValue(&DRE).getAddress(CGF); 4420 }); 4421 } 4422 for (const auto &Pair : PrivatePtrs) { 4423 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4424 CGF.getContext().getDeclAlign(Pair.first)); 4425 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4426 } 4427 // Adjust mapping for internal locals by mapping actual memory instead of 4428 // a pointer to this memory. 4429 for (auto &Pair : UntiedLocalVars) { 4430 if (isAllocatableDecl(Pair.first)) { 4431 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4432 Address Replacement(Ptr, CGF.getPointerAlign()); 4433 Pair.second.first = Replacement; 4434 Ptr = CGF.Builder.CreateLoad(Replacement); 4435 Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4436 Pair.second.second = Replacement; 4437 } else { 4438 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4439 Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4440 Pair.second.first = Replacement; 4441 } 4442 } 4443 } 4444 if (Data.Reductions) { 4445 OMPPrivateScope FirstprivateScope(CGF); 4446 for (const auto &Pair : FirstprivatePtrs) { 4447 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4448 CGF.getContext().getDeclAlign(Pair.first)); 4449 FirstprivateScope.addPrivate(Pair.first, 4450 [Replacement]() { return Replacement; }); 4451 } 4452 (void)FirstprivateScope.Privatize(); 4453 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 4454 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 4455 Data.ReductionCopies, Data.ReductionOps); 4456 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 4457 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 4458 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 4459 RedCG.emitSharedOrigLValue(CGF, Cnt); 4460 RedCG.emitAggregateType(CGF, Cnt); 4461 // FIXME: This must removed once the runtime library is fixed. 4462 // Emit required threadprivate variables for 4463 // initializer/combiner/finalizer. 4464 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4465 RedCG, Cnt); 4466 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4467 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4468 Replacement = 4469 Address(CGF.EmitScalarConversion( 4470 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4471 CGF.getContext().getPointerType( 4472 Data.ReductionCopies[Cnt]->getType()), 4473 Data.ReductionCopies[Cnt]->getExprLoc()), 4474 Replacement.getAlignment()); 4475 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4476 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 4477 [Replacement]() { return Replacement; }); 4478 } 4479 } 4480 // Privatize all private variables except for in_reduction items. 4481 (void)Scope.Privatize(); 4482 SmallVector<const Expr *, 4> InRedVars; 4483 SmallVector<const Expr *, 4> InRedPrivs; 4484 SmallVector<const Expr *, 4> InRedOps; 4485 SmallVector<const Expr *, 4> TaskgroupDescriptors; 4486 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 4487 auto IPriv = C->privates().begin(); 4488 auto IRed = C->reduction_ops().begin(); 4489 auto ITD = C->taskgroup_descriptors().begin(); 4490 for (const Expr *Ref : C->varlists()) { 4491 InRedVars.emplace_back(Ref); 4492 InRedPrivs.emplace_back(*IPriv); 4493 InRedOps.emplace_back(*IRed); 4494 TaskgroupDescriptors.emplace_back(*ITD); 4495 std::advance(IPriv, 1); 4496 std::advance(IRed, 1); 4497 std::advance(ITD, 1); 4498 } 4499 } 4500 // Privatize in_reduction items here, because taskgroup descriptors must be 4501 // privatized earlier. 4502 OMPPrivateScope InRedScope(CGF); 4503 if (!InRedVars.empty()) { 4504 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 4505 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 4506 RedCG.emitSharedOrigLValue(CGF, Cnt); 4507 RedCG.emitAggregateType(CGF, Cnt); 4508 // The taskgroup descriptor variable is always implicit firstprivate and 4509 // privatized already during processing of the firstprivates. 4510 // FIXME: This must removed once the runtime library is fixed. 4511 // Emit required threadprivate variables for 4512 // initializer/combiner/finalizer. 4513 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4514 RedCG, Cnt); 4515 llvm::Value *ReductionsPtr; 4516 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 4517 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 4518 TRExpr->getExprLoc()); 4519 } else { 4520 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4521 } 4522 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4523 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4524 Replacement = Address( 4525 CGF.EmitScalarConversion( 4526 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4527 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 4528 InRedPrivs[Cnt]->getExprLoc()), 4529 Replacement.getAlignment()); 4530 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4531 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 4532 [Replacement]() { return Replacement; }); 4533 } 4534 } 4535 (void)InRedScope.Privatize(); 4536 4537 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF, 4538 UntiedLocalVars); 4539 Action.Enter(CGF); 4540 BodyGen(CGF); 4541 }; 4542 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4543 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 4544 Data.NumberOfParts); 4545 OMPLexicalScope Scope(*this, S, llvm::None, 4546 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4547 !isOpenMPSimdDirective(S.getDirectiveKind())); 4548 TaskGen(*this, OutlinedFn, Data); 4549 } 4550 4551 static ImplicitParamDecl * 4552 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 4553 QualType Ty, CapturedDecl *CD, 4554 SourceLocation Loc) { 4555 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4556 ImplicitParamDecl::Other); 4557 auto *OrigRef = DeclRefExpr::Create( 4558 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 4559 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4560 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4561 ImplicitParamDecl::Other); 4562 auto *PrivateRef = DeclRefExpr::Create( 4563 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 4564 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4565 QualType ElemType = C.getBaseElementType(Ty); 4566 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 4567 ImplicitParamDecl::Other); 4568 auto *InitRef = DeclRefExpr::Create( 4569 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 4570 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 4571 PrivateVD->setInitStyle(VarDecl::CInit); 4572 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 4573 InitRef, /*BasePath=*/nullptr, 4574 VK_PRValue, FPOptionsOverride())); 4575 Data.FirstprivateVars.emplace_back(OrigRef); 4576 Data.FirstprivateCopies.emplace_back(PrivateRef); 4577 Data.FirstprivateInits.emplace_back(InitRef); 4578 return OrigVD; 4579 } 4580 4581 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4582 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4583 OMPTargetDataInfo &InputInfo) { 4584 // Emit outlined function for task construct. 4585 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4586 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4587 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4588 auto I = CS->getCapturedDecl()->param_begin(); 4589 auto PartId = std::next(I); 4590 auto TaskT = std::next(I, 4); 4591 OMPTaskDataTy Data; 4592 // The task is not final. 4593 Data.Final.setInt(/*IntVal=*/false); 4594 // Get list of firstprivate variables. 4595 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4596 auto IRef = C->varlist_begin(); 4597 auto IElemInitRef = C->inits().begin(); 4598 for (auto *IInit : C->private_copies()) { 4599 Data.FirstprivateVars.push_back(*IRef); 4600 Data.FirstprivateCopies.push_back(IInit); 4601 Data.FirstprivateInits.push_back(*IElemInitRef); 4602 ++IRef; 4603 ++IElemInitRef; 4604 } 4605 } 4606 OMPPrivateScope TargetScope(*this); 4607 VarDecl *BPVD = nullptr; 4608 VarDecl *PVD = nullptr; 4609 VarDecl *SVD = nullptr; 4610 VarDecl *MVD = nullptr; 4611 if (InputInfo.NumberOfTargetItems > 0) { 4612 auto *CD = CapturedDecl::Create( 4613 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4614 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4615 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType( 4616 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4617 /*IndexTypeQuals=*/0); 4618 BPVD = createImplicitFirstprivateForType( 4619 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4620 PVD = createImplicitFirstprivateForType( 4621 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4622 QualType SizesType = getContext().getConstantArrayType( 4623 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4624 ArrSize, nullptr, ArrayType::Normal, 4625 /*IndexTypeQuals=*/0); 4626 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4627 S.getBeginLoc()); 4628 TargetScope.addPrivate( 4629 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4630 TargetScope.addPrivate(PVD, 4631 [&InputInfo]() { return InputInfo.PointersArray; }); 4632 TargetScope.addPrivate(SVD, 4633 [&InputInfo]() { return InputInfo.SizesArray; }); 4634 // If there is no user-defined mapper, the mapper array will be nullptr. In 4635 // this case, we don't need to privatize it. 4636 if (!dyn_cast_or_null<llvm::ConstantPointerNull>( 4637 InputInfo.MappersArray.getPointer())) { 4638 MVD = createImplicitFirstprivateForType( 4639 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4640 TargetScope.addPrivate(MVD, 4641 [&InputInfo]() { return InputInfo.MappersArray; }); 4642 } 4643 } 4644 (void)TargetScope.Privatize(); 4645 // Build list of dependences. 4646 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4647 OMPTaskDataTy::DependData &DD = 4648 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4649 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4650 } 4651 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, 4652 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4653 // Set proper addresses for generated private copies. 4654 OMPPrivateScope Scope(CGF); 4655 if (!Data.FirstprivateVars.empty()) { 4656 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4657 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4658 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4659 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4660 CS->getCapturedDecl()->getParam(PrivatesParam))); 4661 // Map privates. 4662 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4663 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4664 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4665 CallArgs.push_back(PrivatesPtr); 4666 ParamTypes.push_back(PrivatesPtr->getType()); 4667 for (const Expr *E : Data.FirstprivateVars) { 4668 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4669 Address PrivatePtr = 4670 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4671 ".firstpriv.ptr.addr"); 4672 PrivatePtrs.emplace_back(VD, PrivatePtr); 4673 CallArgs.push_back(PrivatePtr.getPointer()); 4674 ParamTypes.push_back(PrivatePtr.getType()); 4675 } 4676 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4677 ParamTypes, /*isVarArg=*/false); 4678 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4679 CopyFn, CopyFnTy->getPointerTo()); 4680 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4681 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4682 for (const auto &Pair : PrivatePtrs) { 4683 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4684 CGF.getContext().getDeclAlign(Pair.first)); 4685 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4686 } 4687 } 4688 // Privatize all private variables except for in_reduction items. 4689 (void)Scope.Privatize(); 4690 if (InputInfo.NumberOfTargetItems > 0) { 4691 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4692 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4693 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4694 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4695 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4696 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4697 // If MVD is nullptr, the mapper array is not privatized 4698 if (MVD) 4699 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP( 4700 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0); 4701 } 4702 4703 Action.Enter(CGF); 4704 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4705 BodyGen(CGF); 4706 }; 4707 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4708 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4709 Data.NumberOfParts); 4710 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4711 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4712 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4713 SourceLocation()); 4714 4715 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4716 SharedsTy, CapturedStruct, &IfCond, Data); 4717 } 4718 4719 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4720 // Emit outlined function for task construct. 4721 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4722 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4723 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4724 const Expr *IfCond = nullptr; 4725 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4726 if (C->getNameModifier() == OMPD_unknown || 4727 C->getNameModifier() == OMPD_task) { 4728 IfCond = C->getCondition(); 4729 break; 4730 } 4731 } 4732 4733 OMPTaskDataTy Data; 4734 // Check if we should emit tied or untied task. 4735 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4736 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4737 CGF.EmitStmt(CS->getCapturedStmt()); 4738 }; 4739 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4740 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4741 const OMPTaskDataTy &Data) { 4742 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4743 SharedsTy, CapturedStruct, IfCond, 4744 Data); 4745 }; 4746 auto LPCRegion = 4747 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4748 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4749 } 4750 4751 void CodeGenFunction::EmitOMPTaskyieldDirective( 4752 const OMPTaskyieldDirective &S) { 4753 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4754 } 4755 4756 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4757 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4758 } 4759 4760 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4761 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4762 } 4763 4764 void CodeGenFunction::EmitOMPTaskgroupDirective( 4765 const OMPTaskgroupDirective &S) { 4766 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4767 Action.Enter(CGF); 4768 if (const Expr *E = S.getReductionRef()) { 4769 SmallVector<const Expr *, 4> LHSs; 4770 SmallVector<const Expr *, 4> RHSs; 4771 OMPTaskDataTy Data; 4772 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4773 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4774 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4775 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4776 Data.ReductionOps.append(C->reduction_ops().begin(), 4777 C->reduction_ops().end()); 4778 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4779 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4780 } 4781 llvm::Value *ReductionDesc = 4782 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4783 LHSs, RHSs, Data); 4784 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4785 CGF.EmitVarDecl(*VD); 4786 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4787 /*Volatile=*/false, E->getType()); 4788 } 4789 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4790 }; 4791 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4792 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4793 } 4794 4795 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4796 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4797 ? llvm::AtomicOrdering::NotAtomic 4798 : llvm::AtomicOrdering::AcquireRelease; 4799 CGM.getOpenMPRuntime().emitFlush( 4800 *this, 4801 [&S]() -> ArrayRef<const Expr *> { 4802 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4803 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4804 FlushClause->varlist_end()); 4805 return llvm::None; 4806 }(), 4807 S.getBeginLoc(), AO); 4808 } 4809 4810 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4811 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4812 LValue DOLVal = EmitLValue(DO->getDepobj()); 4813 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4814 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4815 DC->getModifier()); 4816 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4817 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4818 *this, Dependencies, DC->getBeginLoc()); 4819 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4820 return; 4821 } 4822 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4823 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4824 return; 4825 } 4826 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4827 CGM.getOpenMPRuntime().emitUpdateClause( 4828 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4829 return; 4830 } 4831 } 4832 4833 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4834 if (!OMPParentLoopDirectiveForScan) 4835 return; 4836 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4837 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4838 SmallVector<const Expr *, 4> Shareds; 4839 SmallVector<const Expr *, 4> Privates; 4840 SmallVector<const Expr *, 4> LHSs; 4841 SmallVector<const Expr *, 4> RHSs; 4842 SmallVector<const Expr *, 4> ReductionOps; 4843 SmallVector<const Expr *, 4> CopyOps; 4844 SmallVector<const Expr *, 4> CopyArrayTemps; 4845 SmallVector<const Expr *, 4> CopyArrayElems; 4846 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4847 if (C->getModifier() != OMPC_REDUCTION_inscan) 4848 continue; 4849 Shareds.append(C->varlist_begin(), C->varlist_end()); 4850 Privates.append(C->privates().begin(), C->privates().end()); 4851 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4852 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4853 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4854 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4855 CopyArrayTemps.append(C->copy_array_temps().begin(), 4856 C->copy_array_temps().end()); 4857 CopyArrayElems.append(C->copy_array_elems().begin(), 4858 C->copy_array_elems().end()); 4859 } 4860 if (ParentDir.getDirectiveKind() == OMPD_simd || 4861 (getLangOpts().OpenMPSimd && 4862 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4863 // For simd directive and simd-based directives in simd only mode, use the 4864 // following codegen: 4865 // int x = 0; 4866 // #pragma omp simd reduction(inscan, +: x) 4867 // for (..) { 4868 // <first part> 4869 // #pragma omp scan inclusive(x) 4870 // <second part> 4871 // } 4872 // is transformed to: 4873 // int x = 0; 4874 // for (..) { 4875 // int x_priv = 0; 4876 // <first part> 4877 // x = x_priv + x; 4878 // x_priv = x; 4879 // <second part> 4880 // } 4881 // and 4882 // int x = 0; 4883 // #pragma omp simd reduction(inscan, +: x) 4884 // for (..) { 4885 // <first part> 4886 // #pragma omp scan exclusive(x) 4887 // <second part> 4888 // } 4889 // to 4890 // int x = 0; 4891 // for (..) { 4892 // int x_priv = 0; 4893 // <second part> 4894 // int temp = x; 4895 // x = x_priv + x; 4896 // x_priv = temp; 4897 // <first part> 4898 // } 4899 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4900 EmitBranch(IsInclusive 4901 ? OMPScanReduce 4902 : BreakContinueStack.back().ContinueBlock.getBlock()); 4903 EmitBlock(OMPScanDispatch); 4904 { 4905 // New scope for correct construction/destruction of temp variables for 4906 // exclusive scan. 4907 LexicalScope Scope(*this, S.getSourceRange()); 4908 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4909 EmitBlock(OMPScanReduce); 4910 if (!IsInclusive) { 4911 // Create temp var and copy LHS value to this temp value. 4912 // TMP = LHS; 4913 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4914 const Expr *PrivateExpr = Privates[I]; 4915 const Expr *TempExpr = CopyArrayTemps[I]; 4916 EmitAutoVarDecl( 4917 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4918 LValue DestLVal = EmitLValue(TempExpr); 4919 LValue SrcLVal = EmitLValue(LHSs[I]); 4920 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4921 SrcLVal.getAddress(*this), 4922 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4923 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4924 CopyOps[I]); 4925 } 4926 } 4927 CGM.getOpenMPRuntime().emitReduction( 4928 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4929 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4930 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4931 const Expr *PrivateExpr = Privates[I]; 4932 LValue DestLVal; 4933 LValue SrcLVal; 4934 if (IsInclusive) { 4935 DestLVal = EmitLValue(RHSs[I]); 4936 SrcLVal = EmitLValue(LHSs[I]); 4937 } else { 4938 const Expr *TempExpr = CopyArrayTemps[I]; 4939 DestLVal = EmitLValue(RHSs[I]); 4940 SrcLVal = EmitLValue(TempExpr); 4941 } 4942 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4943 SrcLVal.getAddress(*this), 4944 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4945 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4946 CopyOps[I]); 4947 } 4948 } 4949 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4950 OMPScanExitBlock = IsInclusive 4951 ? BreakContinueStack.back().ContinueBlock.getBlock() 4952 : OMPScanReduce; 4953 EmitBlock(OMPAfterScanBlock); 4954 return; 4955 } 4956 if (!IsInclusive) { 4957 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4958 EmitBlock(OMPScanExitBlock); 4959 } 4960 if (OMPFirstScanLoop) { 4961 // Emit buffer[i] = red; at the end of the input phase. 4962 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4963 .getIterationVariable() 4964 ->IgnoreParenImpCasts(); 4965 LValue IdxLVal = EmitLValue(IVExpr); 4966 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4967 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4968 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4969 const Expr *PrivateExpr = Privates[I]; 4970 const Expr *OrigExpr = Shareds[I]; 4971 const Expr *CopyArrayElem = CopyArrayElems[I]; 4972 OpaqueValueMapping IdxMapping( 4973 *this, 4974 cast<OpaqueValueExpr>( 4975 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4976 RValue::get(IdxVal)); 4977 LValue DestLVal = EmitLValue(CopyArrayElem); 4978 LValue SrcLVal = EmitLValue(OrigExpr); 4979 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4980 SrcLVal.getAddress(*this), 4981 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4982 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4983 CopyOps[I]); 4984 } 4985 } 4986 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4987 if (IsInclusive) { 4988 EmitBlock(OMPScanExitBlock); 4989 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4990 } 4991 EmitBlock(OMPScanDispatch); 4992 if (!OMPFirstScanLoop) { 4993 // Emit red = buffer[i]; at the entrance to the scan phase. 4994 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4995 .getIterationVariable() 4996 ->IgnoreParenImpCasts(); 4997 LValue IdxLVal = EmitLValue(IVExpr); 4998 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4999 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 5000 llvm::BasicBlock *ExclusiveExitBB = nullptr; 5001 if (!IsInclusive) { 5002 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 5003 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 5004 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 5005 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 5006 EmitBlock(ContBB); 5007 // Use idx - 1 iteration for exclusive scan. 5008 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 5009 } 5010 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5011 const Expr *PrivateExpr = Privates[I]; 5012 const Expr *OrigExpr = Shareds[I]; 5013 const Expr *CopyArrayElem = CopyArrayElems[I]; 5014 OpaqueValueMapping IdxMapping( 5015 *this, 5016 cast<OpaqueValueExpr>( 5017 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 5018 RValue::get(IdxVal)); 5019 LValue SrcLVal = EmitLValue(CopyArrayElem); 5020 LValue DestLVal = EmitLValue(OrigExpr); 5021 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5022 SrcLVal.getAddress(*this), 5023 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5024 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5025 CopyOps[I]); 5026 } 5027 if (!IsInclusive) { 5028 EmitBlock(ExclusiveExitBB); 5029 } 5030 } 5031 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 5032 : OMPAfterScanBlock); 5033 EmitBlock(OMPAfterScanBlock); 5034 } 5035 5036 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 5037 const CodeGenLoopTy &CodeGenLoop, 5038 Expr *IncExpr) { 5039 // Emit the loop iteration variable. 5040 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 5041 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 5042 EmitVarDecl(*IVDecl); 5043 5044 // Emit the iterations count variable. 5045 // If it is not a variable, Sema decided to calculate iterations count on each 5046 // iteration (e.g., it is foldable into a constant). 5047 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5048 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5049 // Emit calculation of the iterations count. 5050 EmitIgnoredExpr(S.getCalcLastIteration()); 5051 } 5052 5053 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 5054 5055 bool HasLastprivateClause = false; 5056 // Check pre-condition. 5057 { 5058 OMPLoopScope PreInitScope(*this, S); 5059 // Skip the entire loop if we don't meet the precondition. 5060 // If the condition constant folds and can be elided, avoid emitting the 5061 // whole loop. 5062 bool CondConstant; 5063 llvm::BasicBlock *ContBlock = nullptr; 5064 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5065 if (!CondConstant) 5066 return; 5067 } else { 5068 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 5069 ContBlock = createBasicBlock("omp.precond.end"); 5070 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 5071 getProfileCount(&S)); 5072 EmitBlock(ThenBlock); 5073 incrementProfileCounter(&S); 5074 } 5075 5076 emitAlignedClause(*this, S); 5077 // Emit 'then' code. 5078 { 5079 // Emit helper vars inits. 5080 5081 LValue LB = EmitOMPHelperVar( 5082 *this, cast<DeclRefExpr>( 5083 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5084 ? S.getCombinedLowerBoundVariable() 5085 : S.getLowerBoundVariable()))); 5086 LValue UB = EmitOMPHelperVar( 5087 *this, cast<DeclRefExpr>( 5088 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5089 ? S.getCombinedUpperBoundVariable() 5090 : S.getUpperBoundVariable()))); 5091 LValue ST = 5092 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 5093 LValue IL = 5094 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 5095 5096 OMPPrivateScope LoopScope(*this); 5097 if (EmitOMPFirstprivateClause(S, LoopScope)) { 5098 // Emit implicit barrier to synchronize threads and avoid data races 5099 // on initialization of firstprivate variables and post-update of 5100 // lastprivate variables. 5101 CGM.getOpenMPRuntime().emitBarrierCall( 5102 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 5103 /*ForceSimpleCall=*/true); 5104 } 5105 EmitOMPPrivateClause(S, LoopScope); 5106 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5107 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5108 !isOpenMPTeamsDirective(S.getDirectiveKind())) 5109 EmitOMPReductionClauseInit(S, LoopScope); 5110 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 5111 EmitOMPPrivateLoopCounters(S, LoopScope); 5112 (void)LoopScope.Privatize(); 5113 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5114 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 5115 5116 // Detect the distribute schedule kind and chunk. 5117 llvm::Value *Chunk = nullptr; 5118 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 5119 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 5120 ScheduleKind = C->getDistScheduleKind(); 5121 if (const Expr *Ch = C->getChunkSize()) { 5122 Chunk = EmitScalarExpr(Ch); 5123 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 5124 S.getIterationVariable()->getType(), 5125 S.getBeginLoc()); 5126 } 5127 } else { 5128 // Default behaviour for dist_schedule clause. 5129 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 5130 *this, S, ScheduleKind, Chunk); 5131 } 5132 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 5133 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 5134 5135 // OpenMP [2.10.8, distribute Construct, Description] 5136 // If dist_schedule is specified, kind must be static. If specified, 5137 // iterations are divided into chunks of size chunk_size, chunks are 5138 // assigned to the teams of the league in a round-robin fashion in the 5139 // order of the team number. When no chunk_size is specified, the 5140 // iteration space is divided into chunks that are approximately equal 5141 // in size, and at most one chunk is distributed to each team of the 5142 // league. The size of the chunks is unspecified in this case. 5143 bool StaticChunked = RT.isStaticChunked( 5144 ScheduleKind, /* Chunked */ Chunk != nullptr) && 5145 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 5146 if (RT.isStaticNonchunked(ScheduleKind, 5147 /* Chunked */ Chunk != nullptr) || 5148 StaticChunked) { 5149 CGOpenMPRuntime::StaticRTInput StaticInit( 5150 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 5151 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5152 StaticChunked ? Chunk : nullptr); 5153 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 5154 StaticInit); 5155 JumpDest LoopExit = 5156 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 5157 // UB = min(UB, GlobalUB); 5158 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5159 ? S.getCombinedEnsureUpperBound() 5160 : S.getEnsureUpperBound()); 5161 // IV = LB; 5162 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5163 ? S.getCombinedInit() 5164 : S.getInit()); 5165 5166 const Expr *Cond = 5167 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5168 ? S.getCombinedCond() 5169 : S.getCond(); 5170 5171 if (StaticChunked) 5172 Cond = S.getCombinedDistCond(); 5173 5174 // For static unchunked schedules generate: 5175 // 5176 // 1. For distribute alone, codegen 5177 // while (idx <= UB) { 5178 // BODY; 5179 // ++idx; 5180 // } 5181 // 5182 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 5183 // while (idx <= UB) { 5184 // <CodeGen rest of pragma>(LB, UB); 5185 // idx += ST; 5186 // } 5187 // 5188 // For static chunk one schedule generate: 5189 // 5190 // while (IV <= GlobalUB) { 5191 // <CodeGen rest of pragma>(LB, UB); 5192 // LB += ST; 5193 // UB += ST; 5194 // UB = min(UB, GlobalUB); 5195 // IV = LB; 5196 // } 5197 // 5198 emitCommonSimdLoop( 5199 *this, S, 5200 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5201 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5202 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 5203 }, 5204 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 5205 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 5206 CGF.EmitOMPInnerLoop( 5207 S, LoopScope.requiresCleanups(), Cond, IncExpr, 5208 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 5209 CodeGenLoop(CGF, S, LoopExit); 5210 }, 5211 [&S, StaticChunked](CodeGenFunction &CGF) { 5212 if (StaticChunked) { 5213 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 5214 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 5215 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 5216 CGF.EmitIgnoredExpr(S.getCombinedInit()); 5217 } 5218 }); 5219 }); 5220 EmitBlock(LoopExit.getBlock()); 5221 // Tell the runtime we are done. 5222 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 5223 } else { 5224 // Emit the outer loop, which requests its work chunk [LB..UB] from 5225 // runtime and runs the inner loop to process it. 5226 const OMPLoopArguments LoopArguments = { 5227 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5228 IL.getAddress(*this), Chunk}; 5229 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 5230 CodeGenLoop); 5231 } 5232 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 5233 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 5234 return CGF.Builder.CreateIsNotNull( 5235 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5236 }); 5237 } 5238 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5239 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5240 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 5241 EmitOMPReductionClauseFinal(S, OMPD_simd); 5242 // Emit post-update of the reduction variables if IsLastIter != 0. 5243 emitPostUpdateForReductionClause( 5244 *this, S, [IL, &S](CodeGenFunction &CGF) { 5245 return CGF.Builder.CreateIsNotNull( 5246 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5247 }); 5248 } 5249 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5250 if (HasLastprivateClause) { 5251 EmitOMPLastprivateClauseFinal( 5252 S, /*NoFinals=*/false, 5253 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 5254 } 5255 } 5256 5257 // We're now done with the loop, so jump to the continuation block. 5258 if (ContBlock) { 5259 EmitBranch(ContBlock); 5260 EmitBlock(ContBlock, true); 5261 } 5262 } 5263 } 5264 5265 void CodeGenFunction::EmitOMPDistributeDirective( 5266 const OMPDistributeDirective &S) { 5267 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5268 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5269 }; 5270 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5271 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 5272 } 5273 5274 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 5275 const CapturedStmt *S, 5276 SourceLocation Loc) { 5277 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 5278 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 5279 CGF.CapturedStmtInfo = &CapStmtInfo; 5280 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 5281 Fn->setDoesNotRecurse(); 5282 return Fn; 5283 } 5284 5285 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 5286 if (S.hasClausesOfKind<OMPDependClause>()) { 5287 assert(!S.hasAssociatedStmt() && 5288 "No associated statement must be in ordered depend construct."); 5289 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 5290 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 5291 return; 5292 } 5293 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5294 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 5295 PrePostActionTy &Action) { 5296 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5297 if (C) { 5298 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5299 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5300 llvm::Function *OutlinedFn = 5301 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5302 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 5303 OutlinedFn, CapturedVars); 5304 } else { 5305 Action.Enter(CGF); 5306 CGF.EmitStmt(CS->getCapturedStmt()); 5307 } 5308 }; 5309 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5310 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 5311 } 5312 5313 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 5314 QualType SrcType, QualType DestType, 5315 SourceLocation Loc) { 5316 assert(CGF.hasScalarEvaluationKind(DestType) && 5317 "DestType must have scalar evaluation kind."); 5318 assert(!Val.isAggregate() && "Must be a scalar or complex."); 5319 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 5320 DestType, Loc) 5321 : CGF.EmitComplexToScalarConversion( 5322 Val.getComplexVal(), SrcType, DestType, Loc); 5323 } 5324 5325 static CodeGenFunction::ComplexPairTy 5326 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 5327 QualType DestType, SourceLocation Loc) { 5328 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 5329 "DestType must have complex evaluation kind."); 5330 CodeGenFunction::ComplexPairTy ComplexVal; 5331 if (Val.isScalar()) { 5332 // Convert the input element to the element type of the complex. 5333 QualType DestElementType = 5334 DestType->castAs<ComplexType>()->getElementType(); 5335 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 5336 Val.getScalarVal(), SrcType, DestElementType, Loc); 5337 ComplexVal = CodeGenFunction::ComplexPairTy( 5338 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 5339 } else { 5340 assert(Val.isComplex() && "Must be a scalar or complex."); 5341 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 5342 QualType DestElementType = 5343 DestType->castAs<ComplexType>()->getElementType(); 5344 ComplexVal.first = CGF.EmitScalarConversion( 5345 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 5346 ComplexVal.second = CGF.EmitScalarConversion( 5347 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 5348 } 5349 return ComplexVal; 5350 } 5351 5352 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5353 LValue LVal, RValue RVal) { 5354 if (LVal.isGlobalReg()) 5355 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 5356 else 5357 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 5358 } 5359 5360 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 5361 llvm::AtomicOrdering AO, LValue LVal, 5362 SourceLocation Loc) { 5363 if (LVal.isGlobalReg()) 5364 return CGF.EmitLoadOfLValue(LVal, Loc); 5365 return CGF.EmitAtomicLoad( 5366 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 5367 LVal.isVolatile()); 5368 } 5369 5370 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 5371 QualType RValTy, SourceLocation Loc) { 5372 switch (getEvaluationKind(LVal.getType())) { 5373 case TEK_Scalar: 5374 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 5375 *this, RVal, RValTy, LVal.getType(), Loc)), 5376 LVal); 5377 break; 5378 case TEK_Complex: 5379 EmitStoreOfComplex( 5380 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 5381 /*isInit=*/false); 5382 break; 5383 case TEK_Aggregate: 5384 llvm_unreachable("Must be a scalar or complex."); 5385 } 5386 } 5387 5388 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5389 const Expr *X, const Expr *V, 5390 SourceLocation Loc) { 5391 // v = x; 5392 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 5393 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 5394 LValue XLValue = CGF.EmitLValue(X); 5395 LValue VLValue = CGF.EmitLValue(V); 5396 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 5397 // OpenMP, 2.17.7, atomic Construct 5398 // If the read or capture clause is specified and the acquire, acq_rel, or 5399 // seq_cst clause is specified then the strong flush on exit from the atomic 5400 // operation is also an acquire flush. 5401 switch (AO) { 5402 case llvm::AtomicOrdering::Acquire: 5403 case llvm::AtomicOrdering::AcquireRelease: 5404 case llvm::AtomicOrdering::SequentiallyConsistent: 5405 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5406 llvm::AtomicOrdering::Acquire); 5407 break; 5408 case llvm::AtomicOrdering::Monotonic: 5409 case llvm::AtomicOrdering::Release: 5410 break; 5411 case llvm::AtomicOrdering::NotAtomic: 5412 case llvm::AtomicOrdering::Unordered: 5413 llvm_unreachable("Unexpected ordering."); 5414 } 5415 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 5416 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5417 } 5418 5419 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 5420 llvm::AtomicOrdering AO, const Expr *X, 5421 const Expr *E, SourceLocation Loc) { 5422 // x = expr; 5423 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 5424 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 5425 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5426 // OpenMP, 2.17.7, atomic Construct 5427 // If the write, update, or capture clause is specified and the release, 5428 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5429 // the atomic operation is also a release flush. 5430 switch (AO) { 5431 case llvm::AtomicOrdering::Release: 5432 case llvm::AtomicOrdering::AcquireRelease: 5433 case llvm::AtomicOrdering::SequentiallyConsistent: 5434 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5435 llvm::AtomicOrdering::Release); 5436 break; 5437 case llvm::AtomicOrdering::Acquire: 5438 case llvm::AtomicOrdering::Monotonic: 5439 break; 5440 case llvm::AtomicOrdering::NotAtomic: 5441 case llvm::AtomicOrdering::Unordered: 5442 llvm_unreachable("Unexpected ordering."); 5443 } 5444 } 5445 5446 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 5447 RValue Update, 5448 BinaryOperatorKind BO, 5449 llvm::AtomicOrdering AO, 5450 bool IsXLHSInRHSPart) { 5451 ASTContext &Context = CGF.getContext(); 5452 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 5453 // expression is simple and atomic is allowed for the given type for the 5454 // target platform. 5455 if (BO == BO_Comma || !Update.isScalar() || 5456 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 5457 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 5458 (Update.getScalarVal()->getType() != 5459 X.getAddress(CGF).getElementType())) || 5460 !X.getAddress(CGF).getElementType()->isIntegerTy() || 5461 !Context.getTargetInfo().hasBuiltinAtomic( 5462 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 5463 return std::make_pair(false, RValue::get(nullptr)); 5464 5465 llvm::AtomicRMWInst::BinOp RMWOp; 5466 switch (BO) { 5467 case BO_Add: 5468 RMWOp = llvm::AtomicRMWInst::Add; 5469 break; 5470 case BO_Sub: 5471 if (!IsXLHSInRHSPart) 5472 return std::make_pair(false, RValue::get(nullptr)); 5473 RMWOp = llvm::AtomicRMWInst::Sub; 5474 break; 5475 case BO_And: 5476 RMWOp = llvm::AtomicRMWInst::And; 5477 break; 5478 case BO_Or: 5479 RMWOp = llvm::AtomicRMWInst::Or; 5480 break; 5481 case BO_Xor: 5482 RMWOp = llvm::AtomicRMWInst::Xor; 5483 break; 5484 case BO_LT: 5485 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5486 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 5487 : llvm::AtomicRMWInst::Max) 5488 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 5489 : llvm::AtomicRMWInst::UMax); 5490 break; 5491 case BO_GT: 5492 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5493 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 5494 : llvm::AtomicRMWInst::Min) 5495 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 5496 : llvm::AtomicRMWInst::UMin); 5497 break; 5498 case BO_Assign: 5499 RMWOp = llvm::AtomicRMWInst::Xchg; 5500 break; 5501 case BO_Mul: 5502 case BO_Div: 5503 case BO_Rem: 5504 case BO_Shl: 5505 case BO_Shr: 5506 case BO_LAnd: 5507 case BO_LOr: 5508 return std::make_pair(false, RValue::get(nullptr)); 5509 case BO_PtrMemD: 5510 case BO_PtrMemI: 5511 case BO_LE: 5512 case BO_GE: 5513 case BO_EQ: 5514 case BO_NE: 5515 case BO_Cmp: 5516 case BO_AddAssign: 5517 case BO_SubAssign: 5518 case BO_AndAssign: 5519 case BO_OrAssign: 5520 case BO_XorAssign: 5521 case BO_MulAssign: 5522 case BO_DivAssign: 5523 case BO_RemAssign: 5524 case BO_ShlAssign: 5525 case BO_ShrAssign: 5526 case BO_Comma: 5527 llvm_unreachable("Unsupported atomic update operation"); 5528 } 5529 llvm::Value *UpdateVal = Update.getScalarVal(); 5530 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 5531 UpdateVal = CGF.Builder.CreateIntCast( 5532 IC, X.getAddress(CGF).getElementType(), 5533 X.getType()->hasSignedIntegerRepresentation()); 5534 } 5535 llvm::Value *Res = 5536 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 5537 return std::make_pair(true, RValue::get(Res)); 5538 } 5539 5540 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 5541 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 5542 llvm::AtomicOrdering AO, SourceLocation Loc, 5543 const llvm::function_ref<RValue(RValue)> CommonGen) { 5544 // Update expressions are allowed to have the following forms: 5545 // x binop= expr; -> xrval + expr; 5546 // x++, ++x -> xrval + 1; 5547 // x--, --x -> xrval - 1; 5548 // x = x binop expr; -> xrval binop expr 5549 // x = expr Op x; - > expr binop xrval; 5550 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 5551 if (!Res.first) { 5552 if (X.isGlobalReg()) { 5553 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 5554 // 'xrval'. 5555 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 5556 } else { 5557 // Perform compare-and-swap procedure. 5558 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 5559 } 5560 } 5561 return Res; 5562 } 5563 5564 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 5565 llvm::AtomicOrdering AO, const Expr *X, 5566 const Expr *E, const Expr *UE, 5567 bool IsXLHSInRHSPart, SourceLocation Loc) { 5568 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5569 "Update expr in 'atomic update' must be a binary operator."); 5570 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5571 // Update expressions are allowed to have the following forms: 5572 // x binop= expr; -> xrval + expr; 5573 // x++, ++x -> xrval + 1; 5574 // x--, --x -> xrval - 1; 5575 // x = x binop expr; -> xrval binop expr 5576 // x = expr Op x; - > expr binop xrval; 5577 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 5578 LValue XLValue = CGF.EmitLValue(X); 5579 RValue ExprRValue = CGF.EmitAnyExpr(E); 5580 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5581 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5582 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5583 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5584 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 5585 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5586 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5587 return CGF.EmitAnyExpr(UE); 5588 }; 5589 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 5590 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5591 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5592 // OpenMP, 2.17.7, atomic Construct 5593 // If the write, update, or capture clause is specified and the release, 5594 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5595 // the atomic operation is also a release flush. 5596 switch (AO) { 5597 case llvm::AtomicOrdering::Release: 5598 case llvm::AtomicOrdering::AcquireRelease: 5599 case llvm::AtomicOrdering::SequentiallyConsistent: 5600 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5601 llvm::AtomicOrdering::Release); 5602 break; 5603 case llvm::AtomicOrdering::Acquire: 5604 case llvm::AtomicOrdering::Monotonic: 5605 break; 5606 case llvm::AtomicOrdering::NotAtomic: 5607 case llvm::AtomicOrdering::Unordered: 5608 llvm_unreachable("Unexpected ordering."); 5609 } 5610 } 5611 5612 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5613 QualType SourceType, QualType ResType, 5614 SourceLocation Loc) { 5615 switch (CGF.getEvaluationKind(ResType)) { 5616 case TEK_Scalar: 5617 return RValue::get( 5618 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5619 case TEK_Complex: { 5620 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5621 return RValue::getComplex(Res.first, Res.second); 5622 } 5623 case TEK_Aggregate: 5624 break; 5625 } 5626 llvm_unreachable("Must be a scalar or complex."); 5627 } 5628 5629 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5630 llvm::AtomicOrdering AO, 5631 bool IsPostfixUpdate, const Expr *V, 5632 const Expr *X, const Expr *E, 5633 const Expr *UE, bool IsXLHSInRHSPart, 5634 SourceLocation Loc) { 5635 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5636 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5637 RValue NewVVal; 5638 LValue VLValue = CGF.EmitLValue(V); 5639 LValue XLValue = CGF.EmitLValue(X); 5640 RValue ExprRValue = CGF.EmitAnyExpr(E); 5641 QualType NewVValType; 5642 if (UE) { 5643 // 'x' is updated with some additional value. 5644 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5645 "Update expr in 'atomic capture' must be a binary operator."); 5646 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5647 // Update expressions are allowed to have the following forms: 5648 // x binop= expr; -> xrval + expr; 5649 // x++, ++x -> xrval + 1; 5650 // x--, --x -> xrval - 1; 5651 // x = x binop expr; -> xrval binop expr 5652 // x = expr Op x; - > expr binop xrval; 5653 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5654 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5655 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5656 NewVValType = XRValExpr->getType(); 5657 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5658 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5659 IsPostfixUpdate](RValue XRValue) { 5660 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5661 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5662 RValue Res = CGF.EmitAnyExpr(UE); 5663 NewVVal = IsPostfixUpdate ? XRValue : Res; 5664 return Res; 5665 }; 5666 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5667 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5668 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5669 if (Res.first) { 5670 // 'atomicrmw' instruction was generated. 5671 if (IsPostfixUpdate) { 5672 // Use old value from 'atomicrmw'. 5673 NewVVal = Res.second; 5674 } else { 5675 // 'atomicrmw' does not provide new value, so evaluate it using old 5676 // value of 'x'. 5677 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5678 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5679 NewVVal = CGF.EmitAnyExpr(UE); 5680 } 5681 } 5682 } else { 5683 // 'x' is simply rewritten with some 'expr'. 5684 NewVValType = X->getType().getNonReferenceType(); 5685 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5686 X->getType().getNonReferenceType(), Loc); 5687 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5688 NewVVal = XRValue; 5689 return ExprRValue; 5690 }; 5691 // Try to perform atomicrmw xchg, otherwise simple exchange. 5692 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5693 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5694 Loc, Gen); 5695 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5696 if (Res.first) { 5697 // 'atomicrmw' instruction was generated. 5698 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5699 } 5700 } 5701 // Emit post-update store to 'v' of old/new 'x' value. 5702 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5703 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5704 // OpenMP, 2.17.7, atomic Construct 5705 // If the write, update, or capture clause is specified and the release, 5706 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5707 // the atomic operation is also a release flush. 5708 // If the read or capture clause is specified and the acquire, acq_rel, or 5709 // seq_cst clause is specified then the strong flush on exit from the atomic 5710 // operation is also an acquire flush. 5711 switch (AO) { 5712 case llvm::AtomicOrdering::Release: 5713 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5714 llvm::AtomicOrdering::Release); 5715 break; 5716 case llvm::AtomicOrdering::Acquire: 5717 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5718 llvm::AtomicOrdering::Acquire); 5719 break; 5720 case llvm::AtomicOrdering::AcquireRelease: 5721 case llvm::AtomicOrdering::SequentiallyConsistent: 5722 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5723 llvm::AtomicOrdering::AcquireRelease); 5724 break; 5725 case llvm::AtomicOrdering::Monotonic: 5726 break; 5727 case llvm::AtomicOrdering::NotAtomic: 5728 case llvm::AtomicOrdering::Unordered: 5729 llvm_unreachable("Unexpected ordering."); 5730 } 5731 } 5732 5733 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5734 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5735 const Expr *X, const Expr *V, const Expr *E, 5736 const Expr *UE, bool IsXLHSInRHSPart, 5737 SourceLocation Loc) { 5738 switch (Kind) { 5739 case OMPC_read: 5740 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5741 break; 5742 case OMPC_write: 5743 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5744 break; 5745 case OMPC_unknown: 5746 case OMPC_update: 5747 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5748 break; 5749 case OMPC_capture: 5750 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5751 IsXLHSInRHSPart, Loc); 5752 break; 5753 case OMPC_if: 5754 case OMPC_final: 5755 case OMPC_num_threads: 5756 case OMPC_private: 5757 case OMPC_firstprivate: 5758 case OMPC_lastprivate: 5759 case OMPC_reduction: 5760 case OMPC_task_reduction: 5761 case OMPC_in_reduction: 5762 case OMPC_safelen: 5763 case OMPC_simdlen: 5764 case OMPC_sizes: 5765 case OMPC_allocator: 5766 case OMPC_allocate: 5767 case OMPC_collapse: 5768 case OMPC_default: 5769 case OMPC_seq_cst: 5770 case OMPC_acq_rel: 5771 case OMPC_acquire: 5772 case OMPC_release: 5773 case OMPC_relaxed: 5774 case OMPC_shared: 5775 case OMPC_linear: 5776 case OMPC_aligned: 5777 case OMPC_copyin: 5778 case OMPC_copyprivate: 5779 case OMPC_flush: 5780 case OMPC_depobj: 5781 case OMPC_proc_bind: 5782 case OMPC_schedule: 5783 case OMPC_ordered: 5784 case OMPC_nowait: 5785 case OMPC_untied: 5786 case OMPC_threadprivate: 5787 case OMPC_depend: 5788 case OMPC_mergeable: 5789 case OMPC_device: 5790 case OMPC_threads: 5791 case OMPC_simd: 5792 case OMPC_map: 5793 case OMPC_num_teams: 5794 case OMPC_thread_limit: 5795 case OMPC_priority: 5796 case OMPC_grainsize: 5797 case OMPC_nogroup: 5798 case OMPC_num_tasks: 5799 case OMPC_hint: 5800 case OMPC_dist_schedule: 5801 case OMPC_defaultmap: 5802 case OMPC_uniform: 5803 case OMPC_to: 5804 case OMPC_from: 5805 case OMPC_use_device_ptr: 5806 case OMPC_use_device_addr: 5807 case OMPC_is_device_ptr: 5808 case OMPC_unified_address: 5809 case OMPC_unified_shared_memory: 5810 case OMPC_reverse_offload: 5811 case OMPC_dynamic_allocators: 5812 case OMPC_atomic_default_mem_order: 5813 case OMPC_device_type: 5814 case OMPC_match: 5815 case OMPC_nontemporal: 5816 case OMPC_order: 5817 case OMPC_destroy: 5818 case OMPC_detach: 5819 case OMPC_inclusive: 5820 case OMPC_exclusive: 5821 case OMPC_uses_allocators: 5822 case OMPC_affinity: 5823 case OMPC_init: 5824 case OMPC_inbranch: 5825 case OMPC_notinbranch: 5826 case OMPC_link: 5827 case OMPC_use: 5828 case OMPC_novariants: 5829 case OMPC_nocontext: 5830 case OMPC_filter: 5831 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5832 } 5833 } 5834 5835 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5836 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5837 bool MemOrderingSpecified = false; 5838 if (S.getSingleClause<OMPSeqCstClause>()) { 5839 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5840 MemOrderingSpecified = true; 5841 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5842 AO = llvm::AtomicOrdering::AcquireRelease; 5843 MemOrderingSpecified = true; 5844 } else if (S.getSingleClause<OMPAcquireClause>()) { 5845 AO = llvm::AtomicOrdering::Acquire; 5846 MemOrderingSpecified = true; 5847 } else if (S.getSingleClause<OMPReleaseClause>()) { 5848 AO = llvm::AtomicOrdering::Release; 5849 MemOrderingSpecified = true; 5850 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5851 AO = llvm::AtomicOrdering::Monotonic; 5852 MemOrderingSpecified = true; 5853 } 5854 OpenMPClauseKind Kind = OMPC_unknown; 5855 for (const OMPClause *C : S.clauses()) { 5856 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5857 // if it is first). 5858 if (C->getClauseKind() != OMPC_seq_cst && 5859 C->getClauseKind() != OMPC_acq_rel && 5860 C->getClauseKind() != OMPC_acquire && 5861 C->getClauseKind() != OMPC_release && 5862 C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) { 5863 Kind = C->getClauseKind(); 5864 break; 5865 } 5866 } 5867 if (!MemOrderingSpecified) { 5868 llvm::AtomicOrdering DefaultOrder = 5869 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5870 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5871 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5872 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5873 Kind == OMPC_capture)) { 5874 AO = DefaultOrder; 5875 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5876 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5877 AO = llvm::AtomicOrdering::Release; 5878 } else if (Kind == OMPC_read) { 5879 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5880 AO = llvm::AtomicOrdering::Acquire; 5881 } 5882 } 5883 } 5884 5885 LexicalScope Scope(*this, S.getSourceRange()); 5886 EmitStopPoint(S.getAssociatedStmt()); 5887 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5888 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5889 S.getBeginLoc()); 5890 } 5891 5892 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5893 const OMPExecutableDirective &S, 5894 const RegionCodeGenTy &CodeGen) { 5895 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5896 CodeGenModule &CGM = CGF.CGM; 5897 5898 // On device emit this construct as inlined code. 5899 if (CGM.getLangOpts().OpenMPIsDevice) { 5900 OMPLexicalScope Scope(CGF, S, OMPD_target); 5901 CGM.getOpenMPRuntime().emitInlinedDirective( 5902 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5903 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5904 }); 5905 return; 5906 } 5907 5908 auto LPCRegion = 5909 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5910 llvm::Function *Fn = nullptr; 5911 llvm::Constant *FnID = nullptr; 5912 5913 const Expr *IfCond = nullptr; 5914 // Check for the at most one if clause associated with the target region. 5915 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5916 if (C->getNameModifier() == OMPD_unknown || 5917 C->getNameModifier() == OMPD_target) { 5918 IfCond = C->getCondition(); 5919 break; 5920 } 5921 } 5922 5923 // Check if we have any device clause associated with the directive. 5924 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5925 nullptr, OMPC_DEVICE_unknown); 5926 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5927 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5928 5929 // Check if we have an if clause whose conditional always evaluates to false 5930 // or if we do not have any targets specified. If so the target region is not 5931 // an offload entry point. 5932 bool IsOffloadEntry = true; 5933 if (IfCond) { 5934 bool Val; 5935 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5936 IsOffloadEntry = false; 5937 } 5938 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5939 IsOffloadEntry = false; 5940 5941 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5942 StringRef ParentName; 5943 // In case we have Ctors/Dtors we use the complete type variant to produce 5944 // the mangling of the device outlined kernel. 5945 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5946 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5947 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5948 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5949 else 5950 ParentName = 5951 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5952 5953 // Emit target region as a standalone region. 5954 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5955 IsOffloadEntry, CodeGen); 5956 OMPLexicalScope Scope(CGF, S, OMPD_task); 5957 auto &&SizeEmitter = 5958 [IsOffloadEntry](CodeGenFunction &CGF, 5959 const OMPLoopDirective &D) -> llvm::Value * { 5960 if (IsOffloadEntry) { 5961 OMPLoopScope(CGF, D); 5962 // Emit calculation of the iterations count. 5963 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5964 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5965 /*isSigned=*/false); 5966 return NumIterations; 5967 } 5968 return nullptr; 5969 }; 5970 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5971 SizeEmitter); 5972 } 5973 5974 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5975 PrePostActionTy &Action) { 5976 Action.Enter(CGF); 5977 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5978 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5979 CGF.EmitOMPPrivateClause(S, PrivateScope); 5980 (void)PrivateScope.Privatize(); 5981 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5982 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5983 5984 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5985 CGF.EnsureInsertPoint(); 5986 } 5987 5988 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5989 StringRef ParentName, 5990 const OMPTargetDirective &S) { 5991 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5992 emitTargetRegion(CGF, S, Action); 5993 }; 5994 llvm::Function *Fn; 5995 llvm::Constant *Addr; 5996 // Emit target region as a standalone region. 5997 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5998 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5999 assert(Fn && Addr && "Target device function emission failed."); 6000 } 6001 6002 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 6003 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6004 emitTargetRegion(CGF, S, Action); 6005 }; 6006 emitCommonOMPTargetDirective(*this, S, CodeGen); 6007 } 6008 6009 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 6010 const OMPExecutableDirective &S, 6011 OpenMPDirectiveKind InnermostKind, 6012 const RegionCodeGenTy &CodeGen) { 6013 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 6014 llvm::Function *OutlinedFn = 6015 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 6016 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 6017 6018 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 6019 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 6020 if (NT || TL) { 6021 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 6022 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 6023 6024 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 6025 S.getBeginLoc()); 6026 } 6027 6028 OMPTeamsScope Scope(CGF, S); 6029 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 6030 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 6031 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 6032 CapturedVars); 6033 } 6034 6035 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 6036 // Emit teams region as a standalone region. 6037 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6038 Action.Enter(CGF); 6039 OMPPrivateScope PrivateScope(CGF); 6040 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6041 CGF.EmitOMPPrivateClause(S, PrivateScope); 6042 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6043 (void)PrivateScope.Privatize(); 6044 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 6045 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6046 }; 6047 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6048 emitPostUpdateForReductionClause(*this, S, 6049 [](CodeGenFunction &) { return nullptr; }); 6050 } 6051 6052 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6053 const OMPTargetTeamsDirective &S) { 6054 auto *CS = S.getCapturedStmt(OMPD_teams); 6055 Action.Enter(CGF); 6056 // Emit teams region as a standalone region. 6057 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6058 Action.Enter(CGF); 6059 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6060 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6061 CGF.EmitOMPPrivateClause(S, PrivateScope); 6062 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6063 (void)PrivateScope.Privatize(); 6064 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6065 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6066 CGF.EmitStmt(CS->getCapturedStmt()); 6067 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6068 }; 6069 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 6070 emitPostUpdateForReductionClause(CGF, S, 6071 [](CodeGenFunction &) { return nullptr; }); 6072 } 6073 6074 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 6075 CodeGenModule &CGM, StringRef ParentName, 6076 const OMPTargetTeamsDirective &S) { 6077 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6078 emitTargetTeamsRegion(CGF, Action, S); 6079 }; 6080 llvm::Function *Fn; 6081 llvm::Constant *Addr; 6082 // Emit target region as a standalone region. 6083 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6084 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6085 assert(Fn && Addr && "Target device function emission failed."); 6086 } 6087 6088 void CodeGenFunction::EmitOMPTargetTeamsDirective( 6089 const OMPTargetTeamsDirective &S) { 6090 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6091 emitTargetTeamsRegion(CGF, Action, S); 6092 }; 6093 emitCommonOMPTargetDirective(*this, S, CodeGen); 6094 } 6095 6096 static void 6097 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6098 const OMPTargetTeamsDistributeDirective &S) { 6099 Action.Enter(CGF); 6100 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6101 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6102 }; 6103 6104 // Emit teams region as a standalone region. 6105 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6106 PrePostActionTy &Action) { 6107 Action.Enter(CGF); 6108 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6109 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6110 (void)PrivateScope.Privatize(); 6111 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6112 CodeGenDistribute); 6113 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6114 }; 6115 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 6116 emitPostUpdateForReductionClause(CGF, S, 6117 [](CodeGenFunction &) { return nullptr; }); 6118 } 6119 6120 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 6121 CodeGenModule &CGM, StringRef ParentName, 6122 const OMPTargetTeamsDistributeDirective &S) { 6123 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6124 emitTargetTeamsDistributeRegion(CGF, Action, S); 6125 }; 6126 llvm::Function *Fn; 6127 llvm::Constant *Addr; 6128 // Emit target region as a standalone region. 6129 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6130 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6131 assert(Fn && Addr && "Target device function emission failed."); 6132 } 6133 6134 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 6135 const OMPTargetTeamsDistributeDirective &S) { 6136 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6137 emitTargetTeamsDistributeRegion(CGF, Action, S); 6138 }; 6139 emitCommonOMPTargetDirective(*this, S, CodeGen); 6140 } 6141 6142 static void emitTargetTeamsDistributeSimdRegion( 6143 CodeGenFunction &CGF, PrePostActionTy &Action, 6144 const OMPTargetTeamsDistributeSimdDirective &S) { 6145 Action.Enter(CGF); 6146 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6147 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6148 }; 6149 6150 // Emit teams region as a standalone region. 6151 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6152 PrePostActionTy &Action) { 6153 Action.Enter(CGF); 6154 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6155 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6156 (void)PrivateScope.Privatize(); 6157 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6158 CodeGenDistribute); 6159 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6160 }; 6161 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 6162 emitPostUpdateForReductionClause(CGF, S, 6163 [](CodeGenFunction &) { return nullptr; }); 6164 } 6165 6166 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 6167 CodeGenModule &CGM, StringRef ParentName, 6168 const OMPTargetTeamsDistributeSimdDirective &S) { 6169 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6170 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6171 }; 6172 llvm::Function *Fn; 6173 llvm::Constant *Addr; 6174 // Emit target region as a standalone region. 6175 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6176 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6177 assert(Fn && Addr && "Target device function emission failed."); 6178 } 6179 6180 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 6181 const OMPTargetTeamsDistributeSimdDirective &S) { 6182 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6183 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6184 }; 6185 emitCommonOMPTargetDirective(*this, S, CodeGen); 6186 } 6187 6188 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 6189 const OMPTeamsDistributeDirective &S) { 6190 6191 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6192 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6193 }; 6194 6195 // Emit teams region as a standalone region. 6196 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6197 PrePostActionTy &Action) { 6198 Action.Enter(CGF); 6199 OMPPrivateScope PrivateScope(CGF); 6200 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6201 (void)PrivateScope.Privatize(); 6202 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6203 CodeGenDistribute); 6204 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6205 }; 6206 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6207 emitPostUpdateForReductionClause(*this, S, 6208 [](CodeGenFunction &) { return nullptr; }); 6209 } 6210 6211 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 6212 const OMPTeamsDistributeSimdDirective &S) { 6213 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6214 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6215 }; 6216 6217 // Emit teams region as a standalone region. 6218 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6219 PrePostActionTy &Action) { 6220 Action.Enter(CGF); 6221 OMPPrivateScope PrivateScope(CGF); 6222 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6223 (void)PrivateScope.Privatize(); 6224 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 6225 CodeGenDistribute); 6226 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6227 }; 6228 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 6229 emitPostUpdateForReductionClause(*this, S, 6230 [](CodeGenFunction &) { return nullptr; }); 6231 } 6232 6233 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 6234 const OMPTeamsDistributeParallelForDirective &S) { 6235 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6236 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6237 S.getDistInc()); 6238 }; 6239 6240 // Emit teams region as a standalone region. 6241 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6242 PrePostActionTy &Action) { 6243 Action.Enter(CGF); 6244 OMPPrivateScope PrivateScope(CGF); 6245 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6246 (void)PrivateScope.Privatize(); 6247 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6248 CodeGenDistribute); 6249 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6250 }; 6251 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 6252 emitPostUpdateForReductionClause(*this, S, 6253 [](CodeGenFunction &) { return nullptr; }); 6254 } 6255 6256 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 6257 const OMPTeamsDistributeParallelForSimdDirective &S) { 6258 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6259 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6260 S.getDistInc()); 6261 }; 6262 6263 // Emit teams region as a standalone region. 6264 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6265 PrePostActionTy &Action) { 6266 Action.Enter(CGF); 6267 OMPPrivateScope PrivateScope(CGF); 6268 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6269 (void)PrivateScope.Privatize(); 6270 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6271 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6272 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6273 }; 6274 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 6275 CodeGen); 6276 emitPostUpdateForReductionClause(*this, S, 6277 [](CodeGenFunction &) { return nullptr; }); 6278 } 6279 6280 static void emitTargetTeamsDistributeParallelForRegion( 6281 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 6282 PrePostActionTy &Action) { 6283 Action.Enter(CGF); 6284 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6285 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6286 S.getDistInc()); 6287 }; 6288 6289 // Emit teams region as a standalone region. 6290 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6291 PrePostActionTy &Action) { 6292 Action.Enter(CGF); 6293 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6294 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6295 (void)PrivateScope.Privatize(); 6296 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6297 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6298 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6299 }; 6300 6301 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 6302 CodeGenTeams); 6303 emitPostUpdateForReductionClause(CGF, S, 6304 [](CodeGenFunction &) { return nullptr; }); 6305 } 6306 6307 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 6308 CodeGenModule &CGM, StringRef ParentName, 6309 const OMPTargetTeamsDistributeParallelForDirective &S) { 6310 // Emit SPMD target teams distribute parallel for region as a standalone 6311 // region. 6312 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6313 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6314 }; 6315 llvm::Function *Fn; 6316 llvm::Constant *Addr; 6317 // Emit target region as a standalone region. 6318 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6319 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6320 assert(Fn && Addr && "Target device function emission failed."); 6321 } 6322 6323 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 6324 const OMPTargetTeamsDistributeParallelForDirective &S) { 6325 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6326 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6327 }; 6328 emitCommonOMPTargetDirective(*this, S, CodeGen); 6329 } 6330 6331 static void emitTargetTeamsDistributeParallelForSimdRegion( 6332 CodeGenFunction &CGF, 6333 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 6334 PrePostActionTy &Action) { 6335 Action.Enter(CGF); 6336 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6337 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6338 S.getDistInc()); 6339 }; 6340 6341 // Emit teams region as a standalone region. 6342 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6343 PrePostActionTy &Action) { 6344 Action.Enter(CGF); 6345 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6346 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6347 (void)PrivateScope.Privatize(); 6348 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6349 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6350 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6351 }; 6352 6353 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 6354 CodeGenTeams); 6355 emitPostUpdateForReductionClause(CGF, S, 6356 [](CodeGenFunction &) { return nullptr; }); 6357 } 6358 6359 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 6360 CodeGenModule &CGM, StringRef ParentName, 6361 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6362 // Emit SPMD target teams distribute parallel for simd region as a standalone 6363 // region. 6364 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6365 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6366 }; 6367 llvm::Function *Fn; 6368 llvm::Constant *Addr; 6369 // Emit target region as a standalone region. 6370 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6371 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6372 assert(Fn && Addr && "Target device function emission failed."); 6373 } 6374 6375 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 6376 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6377 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6378 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6379 }; 6380 emitCommonOMPTargetDirective(*this, S, CodeGen); 6381 } 6382 6383 void CodeGenFunction::EmitOMPCancellationPointDirective( 6384 const OMPCancellationPointDirective &S) { 6385 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 6386 S.getCancelRegion()); 6387 } 6388 6389 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 6390 const Expr *IfCond = nullptr; 6391 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6392 if (C->getNameModifier() == OMPD_unknown || 6393 C->getNameModifier() == OMPD_cancel) { 6394 IfCond = C->getCondition(); 6395 break; 6396 } 6397 } 6398 if (CGM.getLangOpts().OpenMPIRBuilder) { 6399 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6400 // TODO: This check is necessary as we only generate `omp parallel` through 6401 // the OpenMPIRBuilder for now. 6402 if (S.getCancelRegion() == OMPD_parallel || 6403 S.getCancelRegion() == OMPD_sections || 6404 S.getCancelRegion() == OMPD_section) { 6405 llvm::Value *IfCondition = nullptr; 6406 if (IfCond) 6407 IfCondition = EmitScalarExpr(IfCond, 6408 /*IgnoreResultAssign=*/true); 6409 return Builder.restoreIP( 6410 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion())); 6411 } 6412 } 6413 6414 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 6415 S.getCancelRegion()); 6416 } 6417 6418 CodeGenFunction::JumpDest 6419 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 6420 if (Kind == OMPD_parallel || Kind == OMPD_task || 6421 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 6422 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 6423 return ReturnBlock; 6424 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 6425 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 6426 Kind == OMPD_distribute_parallel_for || 6427 Kind == OMPD_target_parallel_for || 6428 Kind == OMPD_teams_distribute_parallel_for || 6429 Kind == OMPD_target_teams_distribute_parallel_for); 6430 return OMPCancelStack.getExitBlock(); 6431 } 6432 6433 void CodeGenFunction::EmitOMPUseDevicePtrClause( 6434 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 6435 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6436 auto OrigVarIt = C.varlist_begin(); 6437 auto InitIt = C.inits().begin(); 6438 for (const Expr *PvtVarIt : C.private_copies()) { 6439 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 6440 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 6441 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 6442 6443 // In order to identify the right initializer we need to match the 6444 // declaration used by the mapping logic. In some cases we may get 6445 // OMPCapturedExprDecl that refers to the original declaration. 6446 const ValueDecl *MatchingVD = OrigVD; 6447 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6448 // OMPCapturedExprDecl are used to privative fields of the current 6449 // structure. 6450 const auto *ME = cast<MemberExpr>(OED->getInit()); 6451 assert(isa<CXXThisExpr>(ME->getBase()) && 6452 "Base should be the current struct!"); 6453 MatchingVD = ME->getMemberDecl(); 6454 } 6455 6456 // If we don't have information about the current list item, move on to 6457 // the next one. 6458 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6459 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6460 continue; 6461 6462 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 6463 InitAddrIt, InitVD, 6464 PvtVD]() { 6465 // Initialize the temporary initialization variable with the address we 6466 // get from the runtime library. We have to cast the source address 6467 // because it is always a void *. References are materialized in the 6468 // privatization scope, so the initialization here disregards the fact 6469 // the original variable is a reference. 6470 QualType AddrQTy = 6471 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 6472 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 6473 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 6474 setAddrOfLocalVar(InitVD, InitAddr); 6475 6476 // Emit private declaration, it will be initialized by the value we 6477 // declaration we just added to the local declarations map. 6478 EmitDecl(*PvtVD); 6479 6480 // The initialization variables reached its purpose in the emission 6481 // of the previous declaration, so we don't need it anymore. 6482 LocalDeclMap.erase(InitVD); 6483 6484 // Return the address of the private variable. 6485 return GetAddrOfLocalVar(PvtVD); 6486 }); 6487 assert(IsRegistered && "firstprivate var already registered as private"); 6488 // Silence the warning about unused variable. 6489 (void)IsRegistered; 6490 6491 ++OrigVarIt; 6492 ++InitIt; 6493 } 6494 } 6495 6496 static const VarDecl *getBaseDecl(const Expr *Ref) { 6497 const Expr *Base = Ref->IgnoreParenImpCasts(); 6498 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 6499 Base = OASE->getBase()->IgnoreParenImpCasts(); 6500 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 6501 Base = ASE->getBase()->IgnoreParenImpCasts(); 6502 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 6503 } 6504 6505 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 6506 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 6507 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6508 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 6509 for (const Expr *Ref : C.varlists()) { 6510 const VarDecl *OrigVD = getBaseDecl(Ref); 6511 if (!Processed.insert(OrigVD).second) 6512 continue; 6513 // In order to identify the right initializer we need to match the 6514 // declaration used by the mapping logic. In some cases we may get 6515 // OMPCapturedExprDecl that refers to the original declaration. 6516 const ValueDecl *MatchingVD = OrigVD; 6517 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6518 // OMPCapturedExprDecl are used to privative fields of the current 6519 // structure. 6520 const auto *ME = cast<MemberExpr>(OED->getInit()); 6521 assert(isa<CXXThisExpr>(ME->getBase()) && 6522 "Base should be the current struct!"); 6523 MatchingVD = ME->getMemberDecl(); 6524 } 6525 6526 // If we don't have information about the current list item, move on to 6527 // the next one. 6528 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6529 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6530 continue; 6531 6532 Address PrivAddr = InitAddrIt->getSecond(); 6533 // For declrefs and variable length array need to load the pointer for 6534 // correct mapping, since the pointer to the data was passed to the runtime. 6535 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 6536 MatchingVD->getType()->isArrayType()) 6537 PrivAddr = 6538 EmitLoadOfPointer(PrivAddr, getContext() 6539 .getPointerType(OrigVD->getType()) 6540 ->castAs<PointerType>()); 6541 llvm::Type *RealTy = 6542 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 6543 ->getPointerTo(); 6544 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 6545 6546 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 6547 } 6548 } 6549 6550 // Generate the instructions for '#pragma omp target data' directive. 6551 void CodeGenFunction::EmitOMPTargetDataDirective( 6552 const OMPTargetDataDirective &S) { 6553 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true, 6554 /*SeparateBeginEndCalls=*/true); 6555 6556 // Create a pre/post action to signal the privatization of the device pointer. 6557 // This action can be replaced by the OpenMP runtime code generation to 6558 // deactivate privatization. 6559 bool PrivatizeDevicePointers = false; 6560 class DevicePointerPrivActionTy : public PrePostActionTy { 6561 bool &PrivatizeDevicePointers; 6562 6563 public: 6564 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 6565 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 6566 void Enter(CodeGenFunction &CGF) override { 6567 PrivatizeDevicePointers = true; 6568 } 6569 }; 6570 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 6571 6572 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 6573 CodeGenFunction &CGF, PrePostActionTy &Action) { 6574 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6575 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 6576 }; 6577 6578 // Codegen that selects whether to generate the privatization code or not. 6579 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 6580 &InnermostCodeGen](CodeGenFunction &CGF, 6581 PrePostActionTy &Action) { 6582 RegionCodeGenTy RCG(InnermostCodeGen); 6583 PrivatizeDevicePointers = false; 6584 6585 // Call the pre-action to change the status of PrivatizeDevicePointers if 6586 // needed. 6587 Action.Enter(CGF); 6588 6589 if (PrivatizeDevicePointers) { 6590 OMPPrivateScope PrivateScope(CGF); 6591 // Emit all instances of the use_device_ptr clause. 6592 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 6593 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 6594 Info.CaptureDeviceAddrMap); 6595 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 6596 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 6597 Info.CaptureDeviceAddrMap); 6598 (void)PrivateScope.Privatize(); 6599 RCG(CGF); 6600 } else { 6601 OMPLexicalScope Scope(CGF, S, OMPD_unknown); 6602 RCG(CGF); 6603 } 6604 }; 6605 6606 // Forward the provided action to the privatization codegen. 6607 RegionCodeGenTy PrivRCG(PrivCodeGen); 6608 PrivRCG.setAction(Action); 6609 6610 // Notwithstanding the body of the region is emitted as inlined directive, 6611 // we don't use an inline scope as changes in the references inside the 6612 // region are expected to be visible outside, so we do not privative them. 6613 OMPLexicalScope Scope(CGF, S); 6614 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 6615 PrivRCG); 6616 }; 6617 6618 RegionCodeGenTy RCG(CodeGen); 6619 6620 // If we don't have target devices, don't bother emitting the data mapping 6621 // code. 6622 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6623 RCG(*this); 6624 return; 6625 } 6626 6627 // Check if we have any if clause associated with the directive. 6628 const Expr *IfCond = nullptr; 6629 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6630 IfCond = C->getCondition(); 6631 6632 // Check if we have any device clause associated with the directive. 6633 const Expr *Device = nullptr; 6634 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6635 Device = C->getDevice(); 6636 6637 // Set the action to signal privatization of device pointers. 6638 RCG.setAction(PrivAction); 6639 6640 // Emit region code. 6641 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6642 Info); 6643 } 6644 6645 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6646 const OMPTargetEnterDataDirective &S) { 6647 // If we don't have target devices, don't bother emitting the data mapping 6648 // code. 6649 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6650 return; 6651 6652 // Check if we have any if clause associated with the directive. 6653 const Expr *IfCond = nullptr; 6654 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6655 IfCond = C->getCondition(); 6656 6657 // Check if we have any device clause associated with the directive. 6658 const Expr *Device = nullptr; 6659 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6660 Device = C->getDevice(); 6661 6662 OMPLexicalScope Scope(*this, S, OMPD_task); 6663 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6664 } 6665 6666 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6667 const OMPTargetExitDataDirective &S) { 6668 // If we don't have target devices, don't bother emitting the data mapping 6669 // code. 6670 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6671 return; 6672 6673 // Check if we have any if clause associated with the directive. 6674 const Expr *IfCond = nullptr; 6675 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6676 IfCond = C->getCondition(); 6677 6678 // Check if we have any device clause associated with the directive. 6679 const Expr *Device = nullptr; 6680 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6681 Device = C->getDevice(); 6682 6683 OMPLexicalScope Scope(*this, S, OMPD_task); 6684 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6685 } 6686 6687 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6688 const OMPTargetParallelDirective &S, 6689 PrePostActionTy &Action) { 6690 // Get the captured statement associated with the 'parallel' region. 6691 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6692 Action.Enter(CGF); 6693 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6694 Action.Enter(CGF); 6695 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6696 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6697 CGF.EmitOMPPrivateClause(S, PrivateScope); 6698 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6699 (void)PrivateScope.Privatize(); 6700 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6701 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6702 // TODO: Add support for clauses. 6703 CGF.EmitStmt(CS->getCapturedStmt()); 6704 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6705 }; 6706 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6707 emitEmptyBoundParameters); 6708 emitPostUpdateForReductionClause(CGF, S, 6709 [](CodeGenFunction &) { return nullptr; }); 6710 } 6711 6712 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6713 CodeGenModule &CGM, StringRef ParentName, 6714 const OMPTargetParallelDirective &S) { 6715 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6716 emitTargetParallelRegion(CGF, S, Action); 6717 }; 6718 llvm::Function *Fn; 6719 llvm::Constant *Addr; 6720 // Emit target region as a standalone region. 6721 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6722 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6723 assert(Fn && Addr && "Target device function emission failed."); 6724 } 6725 6726 void CodeGenFunction::EmitOMPTargetParallelDirective( 6727 const OMPTargetParallelDirective &S) { 6728 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6729 emitTargetParallelRegion(CGF, S, Action); 6730 }; 6731 emitCommonOMPTargetDirective(*this, S, CodeGen); 6732 } 6733 6734 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6735 const OMPTargetParallelForDirective &S, 6736 PrePostActionTy &Action) { 6737 Action.Enter(CGF); 6738 // Emit directive as a combined directive that consists of two implicit 6739 // directives: 'parallel' with 'for' directive. 6740 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6741 Action.Enter(CGF); 6742 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6743 CGF, OMPD_target_parallel_for, S.hasCancel()); 6744 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6745 emitDispatchForLoopBounds); 6746 }; 6747 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6748 emitEmptyBoundParameters); 6749 } 6750 6751 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6752 CodeGenModule &CGM, StringRef ParentName, 6753 const OMPTargetParallelForDirective &S) { 6754 // Emit SPMD target parallel for region as a standalone region. 6755 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6756 emitTargetParallelForRegion(CGF, S, Action); 6757 }; 6758 llvm::Function *Fn; 6759 llvm::Constant *Addr; 6760 // Emit target region as a standalone region. 6761 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6762 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6763 assert(Fn && Addr && "Target device function emission failed."); 6764 } 6765 6766 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6767 const OMPTargetParallelForDirective &S) { 6768 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6769 emitTargetParallelForRegion(CGF, S, Action); 6770 }; 6771 emitCommonOMPTargetDirective(*this, S, CodeGen); 6772 } 6773 6774 static void 6775 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6776 const OMPTargetParallelForSimdDirective &S, 6777 PrePostActionTy &Action) { 6778 Action.Enter(CGF); 6779 // Emit directive as a combined directive that consists of two implicit 6780 // directives: 'parallel' with 'for' directive. 6781 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6782 Action.Enter(CGF); 6783 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6784 emitDispatchForLoopBounds); 6785 }; 6786 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6787 emitEmptyBoundParameters); 6788 } 6789 6790 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6791 CodeGenModule &CGM, StringRef ParentName, 6792 const OMPTargetParallelForSimdDirective &S) { 6793 // Emit SPMD target parallel for region as a standalone region. 6794 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6795 emitTargetParallelForSimdRegion(CGF, S, Action); 6796 }; 6797 llvm::Function *Fn; 6798 llvm::Constant *Addr; 6799 // Emit target region as a standalone region. 6800 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6801 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6802 assert(Fn && Addr && "Target device function emission failed."); 6803 } 6804 6805 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6806 const OMPTargetParallelForSimdDirective &S) { 6807 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6808 emitTargetParallelForSimdRegion(CGF, S, Action); 6809 }; 6810 emitCommonOMPTargetDirective(*this, S, CodeGen); 6811 } 6812 6813 /// Emit a helper variable and return corresponding lvalue. 6814 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6815 const ImplicitParamDecl *PVD, 6816 CodeGenFunction::OMPPrivateScope &Privates) { 6817 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6818 Privates.addPrivate(VDecl, 6819 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6820 } 6821 6822 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6823 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6824 // Emit outlined function for task construct. 6825 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6826 Address CapturedStruct = Address::invalid(); 6827 { 6828 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6829 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6830 } 6831 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6832 const Expr *IfCond = nullptr; 6833 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6834 if (C->getNameModifier() == OMPD_unknown || 6835 C->getNameModifier() == OMPD_taskloop) { 6836 IfCond = C->getCondition(); 6837 break; 6838 } 6839 } 6840 6841 OMPTaskDataTy Data; 6842 // Check if taskloop must be emitted without taskgroup. 6843 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6844 // TODO: Check if we should emit tied or untied task. 6845 Data.Tied = true; 6846 // Set scheduling for taskloop 6847 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6848 // grainsize clause 6849 Data.Schedule.setInt(/*IntVal=*/false); 6850 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6851 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6852 // num_tasks clause 6853 Data.Schedule.setInt(/*IntVal=*/true); 6854 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6855 } 6856 6857 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6858 // if (PreCond) { 6859 // for (IV in 0..LastIteration) BODY; 6860 // <Final counter/linear vars updates>; 6861 // } 6862 // 6863 6864 // Emit: if (PreCond) - begin. 6865 // If the condition constant folds and can be elided, avoid emitting the 6866 // whole loop. 6867 bool CondConstant; 6868 llvm::BasicBlock *ContBlock = nullptr; 6869 OMPLoopScope PreInitScope(CGF, S); 6870 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6871 if (!CondConstant) 6872 return; 6873 } else { 6874 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6875 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6876 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6877 CGF.getProfileCount(&S)); 6878 CGF.EmitBlock(ThenBlock); 6879 CGF.incrementProfileCounter(&S); 6880 } 6881 6882 (void)CGF.EmitOMPLinearClauseInit(S); 6883 6884 OMPPrivateScope LoopScope(CGF); 6885 // Emit helper vars inits. 6886 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6887 auto *I = CS->getCapturedDecl()->param_begin(); 6888 auto *LBP = std::next(I, LowerBound); 6889 auto *UBP = std::next(I, UpperBound); 6890 auto *STP = std::next(I, Stride); 6891 auto *LIP = std::next(I, LastIter); 6892 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6893 LoopScope); 6894 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6895 LoopScope); 6896 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6897 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6898 LoopScope); 6899 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6900 CGF.EmitOMPLinearClause(S, LoopScope); 6901 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6902 (void)LoopScope.Privatize(); 6903 // Emit the loop iteration variable. 6904 const Expr *IVExpr = S.getIterationVariable(); 6905 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6906 CGF.EmitVarDecl(*IVDecl); 6907 CGF.EmitIgnoredExpr(S.getInit()); 6908 6909 // Emit the iterations count variable. 6910 // If it is not a variable, Sema decided to calculate iterations count on 6911 // each iteration (e.g., it is foldable into a constant). 6912 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6913 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6914 // Emit calculation of the iterations count. 6915 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6916 } 6917 6918 { 6919 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6920 emitCommonSimdLoop( 6921 CGF, S, 6922 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6923 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6924 CGF.EmitOMPSimdInit(S); 6925 }, 6926 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6927 CGF.EmitOMPInnerLoop( 6928 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6929 [&S](CodeGenFunction &CGF) { 6930 emitOMPLoopBodyWithStopPoint(CGF, S, 6931 CodeGenFunction::JumpDest()); 6932 }, 6933 [](CodeGenFunction &) {}); 6934 }); 6935 } 6936 // Emit: if (PreCond) - end. 6937 if (ContBlock) { 6938 CGF.EmitBranch(ContBlock); 6939 CGF.EmitBlock(ContBlock, true); 6940 } 6941 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6942 if (HasLastprivateClause) { 6943 CGF.EmitOMPLastprivateClauseFinal( 6944 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6945 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6946 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6947 (*LIP)->getType(), S.getBeginLoc()))); 6948 } 6949 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6950 return CGF.Builder.CreateIsNotNull( 6951 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6952 (*LIP)->getType(), S.getBeginLoc())); 6953 }); 6954 }; 6955 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6956 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6957 const OMPTaskDataTy &Data) { 6958 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6959 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6960 OMPLoopScope PreInitScope(CGF, S); 6961 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6962 OutlinedFn, SharedsTy, 6963 CapturedStruct, IfCond, Data); 6964 }; 6965 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6966 CodeGen); 6967 }; 6968 if (Data.Nogroup) { 6969 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6970 } else { 6971 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6972 *this, 6973 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6974 PrePostActionTy &Action) { 6975 Action.Enter(CGF); 6976 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6977 Data); 6978 }, 6979 S.getBeginLoc()); 6980 } 6981 } 6982 6983 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6984 auto LPCRegion = 6985 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6986 EmitOMPTaskLoopBasedDirective(S); 6987 } 6988 6989 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6990 const OMPTaskLoopSimdDirective &S) { 6991 auto LPCRegion = 6992 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6993 OMPLexicalScope Scope(*this, S); 6994 EmitOMPTaskLoopBasedDirective(S); 6995 } 6996 6997 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6998 const OMPMasterTaskLoopDirective &S) { 6999 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7000 Action.Enter(CGF); 7001 EmitOMPTaskLoopBasedDirective(S); 7002 }; 7003 auto LPCRegion = 7004 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7005 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 7006 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7007 } 7008 7009 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 7010 const OMPMasterTaskLoopSimdDirective &S) { 7011 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7012 Action.Enter(CGF); 7013 EmitOMPTaskLoopBasedDirective(S); 7014 }; 7015 auto LPCRegion = 7016 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7017 OMPLexicalScope Scope(*this, S); 7018 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7019 } 7020 7021 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 7022 const OMPParallelMasterTaskLoopDirective &S) { 7023 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7024 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7025 PrePostActionTy &Action) { 7026 Action.Enter(CGF); 7027 CGF.EmitOMPTaskLoopBasedDirective(S); 7028 }; 7029 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7030 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7031 S.getBeginLoc()); 7032 }; 7033 auto LPCRegion = 7034 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7035 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 7036 emitEmptyBoundParameters); 7037 } 7038 7039 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 7040 const OMPParallelMasterTaskLoopSimdDirective &S) { 7041 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7042 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7043 PrePostActionTy &Action) { 7044 Action.Enter(CGF); 7045 CGF.EmitOMPTaskLoopBasedDirective(S); 7046 }; 7047 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7048 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7049 S.getBeginLoc()); 7050 }; 7051 auto LPCRegion = 7052 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7053 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 7054 emitEmptyBoundParameters); 7055 } 7056 7057 // Generate the instructions for '#pragma omp target update' directive. 7058 void CodeGenFunction::EmitOMPTargetUpdateDirective( 7059 const OMPTargetUpdateDirective &S) { 7060 // If we don't have target devices, don't bother emitting the data mapping 7061 // code. 7062 if (CGM.getLangOpts().OMPTargetTriples.empty()) 7063 return; 7064 7065 // Check if we have any if clause associated with the directive. 7066 const Expr *IfCond = nullptr; 7067 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7068 IfCond = C->getCondition(); 7069 7070 // Check if we have any device clause associated with the directive. 7071 const Expr *Device = nullptr; 7072 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7073 Device = C->getDevice(); 7074 7075 OMPLexicalScope Scope(*this, S, OMPD_task); 7076 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 7077 } 7078 7079 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 7080 const OMPExecutableDirective &D) { 7081 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 7082 EmitOMPScanDirective(*SD); 7083 return; 7084 } 7085 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 7086 return; 7087 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 7088 OMPPrivateScope GlobalsScope(CGF); 7089 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 7090 // Capture global firstprivates to avoid crash. 7091 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 7092 for (const Expr *Ref : C->varlists()) { 7093 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 7094 if (!DRE) 7095 continue; 7096 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 7097 if (!VD || VD->hasLocalStorage()) 7098 continue; 7099 if (!CGF.LocalDeclMap.count(VD)) { 7100 LValue GlobLVal = CGF.EmitLValue(Ref); 7101 GlobalsScope.addPrivate( 7102 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7103 } 7104 } 7105 } 7106 } 7107 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 7108 (void)GlobalsScope.Privatize(); 7109 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 7110 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 7111 } else { 7112 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 7113 for (const Expr *E : LD->counters()) { 7114 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 7115 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 7116 LValue GlobLVal = CGF.EmitLValue(E); 7117 GlobalsScope.addPrivate( 7118 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7119 } 7120 if (isa<OMPCapturedExprDecl>(VD)) { 7121 // Emit only those that were not explicitly referenced in clauses. 7122 if (!CGF.LocalDeclMap.count(VD)) 7123 CGF.EmitVarDecl(*VD); 7124 } 7125 } 7126 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 7127 if (!C->getNumForLoops()) 7128 continue; 7129 for (unsigned I = LD->getLoopsNumber(), 7130 E = C->getLoopNumIterations().size(); 7131 I < E; ++I) { 7132 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 7133 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 7134 // Emit only those that were not explicitly referenced in clauses. 7135 if (!CGF.LocalDeclMap.count(VD)) 7136 CGF.EmitVarDecl(*VD); 7137 } 7138 } 7139 } 7140 } 7141 (void)GlobalsScope.Privatize(); 7142 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 7143 } 7144 }; 7145 if (D.getDirectiveKind() == OMPD_atomic || 7146 D.getDirectiveKind() == OMPD_critical || 7147 D.getDirectiveKind() == OMPD_section || 7148 D.getDirectiveKind() == OMPD_master || 7149 D.getDirectiveKind() == OMPD_masked) { 7150 EmitStmt(D.getAssociatedStmt()); 7151 } else { 7152 auto LPCRegion = 7153 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 7154 OMPSimdLexicalScope Scope(*this, D); 7155 CGM.getOpenMPRuntime().emitInlinedDirective( 7156 *this, 7157 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 7158 : D.getDirectiveKind(), 7159 CodeGen); 7160 } 7161 // Check for outer lastprivate conditional update. 7162 checkForLastprivateConditionalUpdate(*this, D); 7163 } 7164