1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/AST/StmtVisitor.h" 25 #include "clang/Basic/OpenMPKinds.h" 26 #include "clang/Basic/PrettyStackTrace.h" 27 #include "llvm/Frontend/OpenMP/OMPConstants.h" 28 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/Support/AtomicOrdering.h" 32 using namespace clang; 33 using namespace CodeGen; 34 using namespace llvm::omp; 35 36 static const VarDecl *getBaseDecl(const Expr *Ref); 37 38 namespace { 39 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 40 /// for captured expressions. 41 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 42 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 43 for (const auto *C : S.clauses()) { 44 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 45 if (const auto *PreInit = 46 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 47 for (const auto *I : PreInit->decls()) { 48 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 49 CGF.EmitVarDecl(cast<VarDecl>(*I)); 50 } else { 51 CodeGenFunction::AutoVarEmission Emission = 52 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 53 CGF.EmitAutoVarCleanups(Emission); 54 } 55 } 56 } 57 } 58 } 59 } 60 CodeGenFunction::OMPPrivateScope InlinedShareds; 61 62 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 63 return CGF.LambdaCaptureFields.lookup(VD) || 64 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 65 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 66 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 67 } 68 69 public: 70 OMPLexicalScope( 71 CodeGenFunction &CGF, const OMPExecutableDirective &S, 72 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 73 const bool EmitPreInitStmt = true) 74 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 75 InlinedShareds(CGF) { 76 if (EmitPreInitStmt) 77 emitPreInitStmt(CGF, S); 78 if (!CapturedRegion.hasValue()) 79 return; 80 assert(S.hasAssociatedStmt() && 81 "Expected associated statement for inlined directive."); 82 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 83 for (const auto &C : CS->captures()) { 84 if (C.capturesVariable() || C.capturesVariableByCopy()) { 85 auto *VD = C.getCapturedVar(); 86 assert(VD == VD->getCanonicalDecl() && 87 "Canonical decl must be captured."); 88 DeclRefExpr DRE( 89 CGF.getContext(), const_cast<VarDecl *>(VD), 90 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 91 InlinedShareds.isGlobalVarCaptured(VD)), 92 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 93 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 94 return CGF.EmitLValue(&DRE).getAddress(CGF); 95 }); 96 } 97 } 98 (void)InlinedShareds.Privatize(); 99 } 100 }; 101 102 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 103 /// for captured expressions. 104 class OMPParallelScope final : public OMPLexicalScope { 105 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 106 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 107 return !(isOpenMPTargetExecutionDirective(Kind) || 108 isOpenMPLoopBoundSharingDirective(Kind)) && 109 isOpenMPParallelDirective(Kind); 110 } 111 112 public: 113 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 114 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 115 EmitPreInitStmt(S)) {} 116 }; 117 118 /// Lexical scope for OpenMP teams construct, that handles correct codegen 119 /// for captured expressions. 120 class OMPTeamsScope final : public OMPLexicalScope { 121 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 122 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 123 return !isOpenMPTargetExecutionDirective(Kind) && 124 isOpenMPTeamsDirective(Kind); 125 } 126 127 public: 128 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 129 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 130 EmitPreInitStmt(S)) {} 131 }; 132 133 /// Private scope for OpenMP loop-based directives, that supports capturing 134 /// of used expression from loop statement. 135 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 136 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) { 137 const DeclStmt *PreInits; 138 CodeGenFunction::OMPMapVars PreCondVars; 139 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 140 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 141 for (const auto *E : LD->counters()) { 142 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 143 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 144 (void)PreCondVars.setVarAddr( 145 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 146 } 147 // Mark private vars as undefs. 148 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) { 149 for (const Expr *IRef : C->varlists()) { 150 const auto *OrigVD = 151 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 152 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 153 (void)PreCondVars.setVarAddr( 154 CGF, OrigVD, 155 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem( 156 CGF.getContext().getPointerType( 157 OrigVD->getType().getNonReferenceType()))), 158 CGF.getContext().getDeclAlign(OrigVD))); 159 } 160 } 161 } 162 (void)PreCondVars.apply(CGF); 163 // Emit init, __range and __end variables for C++ range loops. 164 (void)OMPLoopBasedDirective::doForAllLoops( 165 LD->getInnermostCapturedStmt()->getCapturedStmt(), 166 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(), 167 [&CGF](unsigned Cnt, const Stmt *CurStmt) { 168 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) { 169 if (const Stmt *Init = CXXFor->getInit()) 170 CGF.EmitStmt(Init); 171 CGF.EmitStmt(CXXFor->getRangeStmt()); 172 CGF.EmitStmt(CXXFor->getEndStmt()); 173 } 174 return false; 175 }); 176 PreInits = cast_or_null<DeclStmt>(LD->getPreInits()); 177 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) { 178 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits()); 179 } else { 180 llvm_unreachable("Unknown loop-based directive kind."); 181 } 182 if (PreInits) { 183 for (const auto *I : PreInits->decls()) 184 CGF.EmitVarDecl(cast<VarDecl>(*I)); 185 } 186 PreCondVars.restore(CGF); 187 } 188 189 public: 190 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) 191 : CodeGenFunction::RunCleanupsScope(CGF) { 192 emitPreInitStmt(CGF, S); 193 } 194 }; 195 196 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 197 CodeGenFunction::OMPPrivateScope InlinedShareds; 198 199 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 200 return CGF.LambdaCaptureFields.lookup(VD) || 201 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 202 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 203 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 204 } 205 206 public: 207 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 208 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 209 InlinedShareds(CGF) { 210 for (const auto *C : S.clauses()) { 211 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 212 if (const auto *PreInit = 213 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 214 for (const auto *I : PreInit->decls()) { 215 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 216 CGF.EmitVarDecl(cast<VarDecl>(*I)); 217 } else { 218 CodeGenFunction::AutoVarEmission Emission = 219 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 220 CGF.EmitAutoVarCleanups(Emission); 221 } 222 } 223 } 224 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 225 for (const Expr *E : UDP->varlists()) { 226 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 227 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 228 CGF.EmitVarDecl(*OED); 229 } 230 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 231 for (const Expr *E : UDP->varlists()) { 232 const Decl *D = getBaseDecl(E); 233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 234 CGF.EmitVarDecl(*OED); 235 } 236 } 237 } 238 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 239 CGF.EmitOMPPrivateClause(S, InlinedShareds); 240 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 241 if (const Expr *E = TG->getReductionRef()) 242 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 243 } 244 // Temp copy arrays for inscan reductions should not be emitted as they are 245 // not used in simd only mode. 246 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps; 247 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 248 if (C->getModifier() != OMPC_REDUCTION_inscan) 249 continue; 250 for (const Expr *E : C->copy_array_temps()) 251 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl()); 252 } 253 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 254 while (CS) { 255 for (auto &C : CS->captures()) { 256 if (C.capturesVariable() || C.capturesVariableByCopy()) { 257 auto *VD = C.getCapturedVar(); 258 if (CopyArrayTemps.contains(VD)) 259 continue; 260 assert(VD == VD->getCanonicalDecl() && 261 "Canonical decl must be captured."); 262 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 263 isCapturedVar(CGF, VD) || 264 (CGF.CapturedStmtInfo && 265 InlinedShareds.isGlobalVarCaptured(VD)), 266 VD->getType().getNonReferenceType(), VK_LValue, 267 C.getLocation()); 268 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 269 return CGF.EmitLValue(&DRE).getAddress(CGF); 270 }); 271 } 272 } 273 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 274 } 275 (void)InlinedShareds.Privatize(); 276 } 277 }; 278 279 } // namespace 280 281 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 282 const OMPExecutableDirective &S, 283 const RegionCodeGenTy &CodeGen); 284 285 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 286 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 287 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 288 OrigVD = OrigVD->getCanonicalDecl(); 289 bool IsCaptured = 290 LambdaCaptureFields.lookup(OrigVD) || 291 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 292 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 293 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 294 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 295 return EmitLValue(&DRE); 296 } 297 } 298 return EmitLValue(E); 299 } 300 301 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 302 ASTContext &C = getContext(); 303 llvm::Value *Size = nullptr; 304 auto SizeInChars = C.getTypeSizeInChars(Ty); 305 if (SizeInChars.isZero()) { 306 // getTypeSizeInChars() returns 0 for a VLA. 307 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 308 VlaSizePair VlaSize = getVLASize(VAT); 309 Ty = VlaSize.Type; 310 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 311 : VlaSize.NumElts; 312 } 313 SizeInChars = C.getTypeSizeInChars(Ty); 314 if (SizeInChars.isZero()) 315 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 316 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 317 } 318 return CGM.getSize(SizeInChars); 319 } 320 321 void CodeGenFunction::GenerateOpenMPCapturedVars( 322 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 323 const RecordDecl *RD = S.getCapturedRecordDecl(); 324 auto CurField = RD->field_begin(); 325 auto CurCap = S.captures().begin(); 326 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 327 E = S.capture_init_end(); 328 I != E; ++I, ++CurField, ++CurCap) { 329 if (CurField->hasCapturedVLAType()) { 330 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 331 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 332 CapturedVars.push_back(Val); 333 } else if (CurCap->capturesThis()) { 334 CapturedVars.push_back(CXXThisValue); 335 } else if (CurCap->capturesVariableByCopy()) { 336 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 337 338 // If the field is not a pointer, we need to save the actual value 339 // and load it as a void pointer. 340 if (!CurField->getType()->isAnyPointerType()) { 341 ASTContext &Ctx = getContext(); 342 Address DstAddr = CreateMemTemp( 343 Ctx.getUIntPtrType(), 344 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 345 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 346 347 llvm::Value *SrcAddrVal = EmitScalarConversion( 348 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 349 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 350 LValue SrcLV = 351 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 352 353 // Store the value using the source type pointer. 354 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 355 356 // Load the value using the destination type pointer. 357 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 358 } 359 CapturedVars.push_back(CV); 360 } else { 361 assert(CurCap->capturesVariable() && "Expected capture by reference."); 362 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 363 } 364 } 365 } 366 367 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 368 QualType DstType, StringRef Name, 369 LValue AddrLV) { 370 ASTContext &Ctx = CGF.getContext(); 371 372 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 373 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 374 Ctx.getPointerType(DstType), Loc); 375 Address TmpAddr = 376 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 377 .getAddress(CGF); 378 return TmpAddr; 379 } 380 381 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 382 if (T->isLValueReferenceType()) 383 return C.getLValueReferenceType( 384 getCanonicalParamType(C, T.getNonReferenceType()), 385 /*SpelledAsLValue=*/false); 386 if (T->isPointerType()) 387 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 388 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 389 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 390 return getCanonicalParamType(C, VLA->getElementType()); 391 if (!A->isVariablyModifiedType()) 392 return C.getCanonicalType(T); 393 } 394 return C.getCanonicalParamType(T); 395 } 396 397 namespace { 398 /// Contains required data for proper outlined function codegen. 399 struct FunctionOptions { 400 /// Captured statement for which the function is generated. 401 const CapturedStmt *S = nullptr; 402 /// true if cast to/from UIntPtr is required for variables captured by 403 /// value. 404 const bool UIntPtrCastRequired = true; 405 /// true if only casted arguments must be registered as local args or VLA 406 /// sizes. 407 const bool RegisterCastedArgsOnly = false; 408 /// Name of the generated function. 409 const StringRef FunctionName; 410 /// Location of the non-debug version of the outlined function. 411 SourceLocation Loc; 412 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 413 bool RegisterCastedArgsOnly, StringRef FunctionName, 414 SourceLocation Loc) 415 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 416 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 417 FunctionName(FunctionName), Loc(Loc) {} 418 }; 419 } // namespace 420 421 static llvm::Function *emitOutlinedFunctionPrologue( 422 CodeGenFunction &CGF, FunctionArgList &Args, 423 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 424 &LocalAddrs, 425 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 426 &VLASizes, 427 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 428 const CapturedDecl *CD = FO.S->getCapturedDecl(); 429 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 430 assert(CD->hasBody() && "missing CapturedDecl body"); 431 432 CXXThisValue = nullptr; 433 // Build the argument list. 434 CodeGenModule &CGM = CGF.CGM; 435 ASTContext &Ctx = CGM.getContext(); 436 FunctionArgList TargetArgs; 437 Args.append(CD->param_begin(), 438 std::next(CD->param_begin(), CD->getContextParamPosition())); 439 TargetArgs.append( 440 CD->param_begin(), 441 std::next(CD->param_begin(), CD->getContextParamPosition())); 442 auto I = FO.S->captures().begin(); 443 FunctionDecl *DebugFunctionDecl = nullptr; 444 if (!FO.UIntPtrCastRequired) { 445 FunctionProtoType::ExtProtoInfo EPI; 446 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 447 DebugFunctionDecl = FunctionDecl::Create( 448 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 449 SourceLocation(), DeclarationName(), FunctionTy, 450 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 451 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 452 } 453 for (const FieldDecl *FD : RD->fields()) { 454 QualType ArgType = FD->getType(); 455 IdentifierInfo *II = nullptr; 456 VarDecl *CapVar = nullptr; 457 458 // If this is a capture by copy and the type is not a pointer, the outlined 459 // function argument type should be uintptr and the value properly casted to 460 // uintptr. This is necessary given that the runtime library is only able to 461 // deal with pointers. We can pass in the same way the VLA type sizes to the 462 // outlined function. 463 if (FO.UIntPtrCastRequired && 464 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 465 I->capturesVariableArrayType())) 466 ArgType = Ctx.getUIntPtrType(); 467 468 if (I->capturesVariable() || I->capturesVariableByCopy()) { 469 CapVar = I->getCapturedVar(); 470 II = CapVar->getIdentifier(); 471 } else if (I->capturesThis()) { 472 II = &Ctx.Idents.get("this"); 473 } else { 474 assert(I->capturesVariableArrayType()); 475 II = &Ctx.Idents.get("vla"); 476 } 477 if (ArgType->isVariablyModifiedType()) 478 ArgType = getCanonicalParamType(Ctx, ArgType); 479 VarDecl *Arg; 480 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 481 Arg = ParmVarDecl::Create( 482 Ctx, DebugFunctionDecl, 483 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 484 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 485 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 486 } else { 487 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 488 II, ArgType, ImplicitParamDecl::Other); 489 } 490 Args.emplace_back(Arg); 491 // Do not cast arguments if we emit function with non-original types. 492 TargetArgs.emplace_back( 493 FO.UIntPtrCastRequired 494 ? Arg 495 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 496 ++I; 497 } 498 Args.append( 499 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 500 CD->param_end()); 501 TargetArgs.append( 502 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 503 CD->param_end()); 504 505 // Create the function declaration. 506 const CGFunctionInfo &FuncInfo = 507 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 508 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 509 510 auto *F = 511 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 512 FO.FunctionName, &CGM.getModule()); 513 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 514 if (CD->isNothrow()) 515 F->setDoesNotThrow(); 516 F->setDoesNotRecurse(); 517 518 // Generate the function. 519 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 520 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 521 FO.UIntPtrCastRequired ? FO.Loc 522 : CD->getBody()->getBeginLoc()); 523 unsigned Cnt = CD->getContextParamPosition(); 524 I = FO.S->captures().begin(); 525 for (const FieldDecl *FD : RD->fields()) { 526 // Do not map arguments if we emit function with non-original types. 527 Address LocalAddr(Address::invalid()); 528 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 529 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 530 TargetArgs[Cnt]); 531 } else { 532 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 533 } 534 // If we are capturing a pointer by copy we don't need to do anything, just 535 // use the value that we get from the arguments. 536 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 537 const VarDecl *CurVD = I->getCapturedVar(); 538 if (!FO.RegisterCastedArgsOnly) 539 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 540 ++Cnt; 541 ++I; 542 continue; 543 } 544 545 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 546 AlignmentSource::Decl); 547 if (FD->hasCapturedVLAType()) { 548 if (FO.UIntPtrCastRequired) { 549 ArgLVal = CGF.MakeAddrLValue( 550 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 551 Args[Cnt]->getName(), ArgLVal), 552 FD->getType(), AlignmentSource::Decl); 553 } 554 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 555 const VariableArrayType *VAT = FD->getCapturedVLAType(); 556 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 557 } else if (I->capturesVariable()) { 558 const VarDecl *Var = I->getCapturedVar(); 559 QualType VarTy = Var->getType(); 560 Address ArgAddr = ArgLVal.getAddress(CGF); 561 if (ArgLVal.getType()->isLValueReferenceType()) { 562 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 563 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 564 assert(ArgLVal.getType()->isPointerType()); 565 ArgAddr = CGF.EmitLoadOfPointer( 566 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 567 } 568 if (!FO.RegisterCastedArgsOnly) { 569 LocalAddrs.insert( 570 {Args[Cnt], 571 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 572 } 573 } else if (I->capturesVariableByCopy()) { 574 assert(!FD->getType()->isAnyPointerType() && 575 "Not expecting a captured pointer."); 576 const VarDecl *Var = I->getCapturedVar(); 577 LocalAddrs.insert({Args[Cnt], 578 {Var, FO.UIntPtrCastRequired 579 ? castValueFromUintptr( 580 CGF, I->getLocation(), FD->getType(), 581 Args[Cnt]->getName(), ArgLVal) 582 : ArgLVal.getAddress(CGF)}}); 583 } else { 584 // If 'this' is captured, load it into CXXThisValue. 585 assert(I->capturesThis()); 586 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 587 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 588 } 589 ++Cnt; 590 ++I; 591 } 592 593 return F; 594 } 595 596 llvm::Function * 597 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 598 SourceLocation Loc) { 599 assert( 600 CapturedStmtInfo && 601 "CapturedStmtInfo should be set when generating the captured function"); 602 const CapturedDecl *CD = S.getCapturedDecl(); 603 // Build the argument list. 604 bool NeedWrapperFunction = 605 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 606 FunctionArgList Args; 607 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 608 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 609 SmallString<256> Buffer; 610 llvm::raw_svector_ostream Out(Buffer); 611 Out << CapturedStmtInfo->getHelperName(); 612 if (NeedWrapperFunction) 613 Out << "_debug__"; 614 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 615 Out.str(), Loc); 616 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 617 VLASizes, CXXThisValue, FO); 618 CodeGenFunction::OMPPrivateScope LocalScope(*this); 619 for (const auto &LocalAddrPair : LocalAddrs) { 620 if (LocalAddrPair.second.first) { 621 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 622 return LocalAddrPair.second.second; 623 }); 624 } 625 } 626 (void)LocalScope.Privatize(); 627 for (const auto &VLASizePair : VLASizes) 628 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 629 PGO.assignRegionCounters(GlobalDecl(CD), F); 630 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 631 (void)LocalScope.ForceCleanup(); 632 FinishFunction(CD->getBodyRBrace()); 633 if (!NeedWrapperFunction) 634 return F; 635 636 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 637 /*RegisterCastedArgsOnly=*/true, 638 CapturedStmtInfo->getHelperName(), Loc); 639 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 640 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 641 Args.clear(); 642 LocalAddrs.clear(); 643 VLASizes.clear(); 644 llvm::Function *WrapperF = 645 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 646 WrapperCGF.CXXThisValue, WrapperFO); 647 llvm::SmallVector<llvm::Value *, 4> CallArgs; 648 auto *PI = F->arg_begin(); 649 for (const auto *Arg : Args) { 650 llvm::Value *CallArg; 651 auto I = LocalAddrs.find(Arg); 652 if (I != LocalAddrs.end()) { 653 LValue LV = WrapperCGF.MakeAddrLValue( 654 I->second.second, 655 I->second.first ? I->second.first->getType() : Arg->getType(), 656 AlignmentSource::Decl); 657 if (LV.getType()->isAnyComplexType()) 658 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 659 LV.getAddress(WrapperCGF), 660 PI->getType()->getPointerTo( 661 LV.getAddress(WrapperCGF).getAddressSpace()))); 662 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 663 } else { 664 auto EI = VLASizes.find(Arg); 665 if (EI != VLASizes.end()) { 666 CallArg = EI->second.second; 667 } else { 668 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 669 Arg->getType(), 670 AlignmentSource::Decl); 671 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 672 } 673 } 674 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 675 ++PI; 676 } 677 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 678 WrapperCGF.FinishFunction(); 679 return WrapperF; 680 } 681 682 //===----------------------------------------------------------------------===// 683 // OpenMP Directive Emission 684 //===----------------------------------------------------------------------===// 685 void CodeGenFunction::EmitOMPAggregateAssign( 686 Address DestAddr, Address SrcAddr, QualType OriginalType, 687 const llvm::function_ref<void(Address, Address)> CopyGen) { 688 // Perform element-by-element initialization. 689 QualType ElementTy; 690 691 // Drill down to the base element type on both arrays. 692 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 693 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 694 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 695 696 llvm::Value *SrcBegin = SrcAddr.getPointer(); 697 llvm::Value *DestBegin = DestAddr.getPointer(); 698 // Cast from pointer to array type to pointer to single element. 699 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 700 // The basic structure here is a while-do loop. 701 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 702 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 703 llvm::Value *IsEmpty = 704 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 705 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 706 707 // Enter the loop body, making that address the current address. 708 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 709 EmitBlock(BodyBB); 710 711 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 712 713 llvm::PHINode *SrcElementPHI = 714 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 715 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 716 Address SrcElementCurrent = 717 Address(SrcElementPHI, 718 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 719 720 llvm::PHINode *DestElementPHI = 721 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 722 DestElementPHI->addIncoming(DestBegin, EntryBB); 723 Address DestElementCurrent = 724 Address(DestElementPHI, 725 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 726 727 // Emit copy. 728 CopyGen(DestElementCurrent, SrcElementCurrent); 729 730 // Shift the address forward by one element. 731 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 732 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 733 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 734 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 735 // Check whether we've reached the end. 736 llvm::Value *Done = 737 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 738 Builder.CreateCondBr(Done, DoneBB, BodyBB); 739 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 740 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 741 742 // Done. 743 EmitBlock(DoneBB, /*IsFinished=*/true); 744 } 745 746 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 747 Address SrcAddr, const VarDecl *DestVD, 748 const VarDecl *SrcVD, const Expr *Copy) { 749 if (OriginalType->isArrayType()) { 750 const auto *BO = dyn_cast<BinaryOperator>(Copy); 751 if (BO && BO->getOpcode() == BO_Assign) { 752 // Perform simple memcpy for simple copying. 753 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 754 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 755 EmitAggregateAssign(Dest, Src, OriginalType); 756 } else { 757 // For arrays with complex element types perform element by element 758 // copying. 759 EmitOMPAggregateAssign( 760 DestAddr, SrcAddr, OriginalType, 761 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 762 // Working with the single array element, so have to remap 763 // destination and source variables to corresponding array 764 // elements. 765 CodeGenFunction::OMPPrivateScope Remap(*this); 766 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 767 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 768 (void)Remap.Privatize(); 769 EmitIgnoredExpr(Copy); 770 }); 771 } 772 } else { 773 // Remap pseudo source variable to private copy. 774 CodeGenFunction::OMPPrivateScope Remap(*this); 775 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 776 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 777 (void)Remap.Privatize(); 778 // Emit copying of the whole variable. 779 EmitIgnoredExpr(Copy); 780 } 781 } 782 783 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 784 OMPPrivateScope &PrivateScope) { 785 if (!HaveInsertPoint()) 786 return false; 787 bool DeviceConstTarget = 788 getLangOpts().OpenMPIsDevice && 789 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 790 bool FirstprivateIsLastprivate = false; 791 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 792 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 793 for (const auto *D : C->varlists()) 794 Lastprivates.try_emplace( 795 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 796 C->getKind()); 797 } 798 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 799 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 800 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 801 // Force emission of the firstprivate copy if the directive does not emit 802 // outlined function, like omp for, omp simd, omp distribute etc. 803 bool MustEmitFirstprivateCopy = 804 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 805 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 806 const auto *IRef = C->varlist_begin(); 807 const auto *InitsRef = C->inits().begin(); 808 for (const Expr *IInit : C->private_copies()) { 809 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 810 bool ThisFirstprivateIsLastprivate = 811 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 812 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 813 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 814 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 815 !FD->getType()->isReferenceType() && 816 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 817 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 818 ++IRef; 819 ++InitsRef; 820 continue; 821 } 822 // Do not emit copy for firstprivate constant variables in target regions, 823 // captured by reference. 824 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 825 FD && FD->getType()->isReferenceType() && 826 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 827 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 828 OrigVD); 829 ++IRef; 830 ++InitsRef; 831 continue; 832 } 833 FirstprivateIsLastprivate = 834 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 835 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 836 const auto *VDInit = 837 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 838 bool IsRegistered; 839 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 840 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 841 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 842 LValue OriginalLVal; 843 if (!FD) { 844 // Check if the firstprivate variable is just a constant value. 845 ConstantEmission CE = tryEmitAsConstant(&DRE); 846 if (CE && !CE.isReference()) { 847 // Constant value, no need to create a copy. 848 ++IRef; 849 ++InitsRef; 850 continue; 851 } 852 if (CE && CE.isReference()) { 853 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 854 } else { 855 assert(!CE && "Expected non-constant firstprivate."); 856 OriginalLVal = EmitLValue(&DRE); 857 } 858 } else { 859 OriginalLVal = EmitLValue(&DRE); 860 } 861 QualType Type = VD->getType(); 862 if (Type->isArrayType()) { 863 // Emit VarDecl with copy init for arrays. 864 // Get the address of the original variable captured in current 865 // captured region. 866 IsRegistered = PrivateScope.addPrivate( 867 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 868 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 869 const Expr *Init = VD->getInit(); 870 if (!isa<CXXConstructExpr>(Init) || 871 isTrivialInitializer(Init)) { 872 // Perform simple memcpy. 873 LValue Dest = 874 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 875 EmitAggregateAssign(Dest, OriginalLVal, Type); 876 } else { 877 EmitOMPAggregateAssign( 878 Emission.getAllocatedAddress(), 879 OriginalLVal.getAddress(*this), Type, 880 [this, VDInit, Init](Address DestElement, 881 Address SrcElement) { 882 // Clean up any temporaries needed by the 883 // initialization. 884 RunCleanupsScope InitScope(*this); 885 // Emit initialization for single element. 886 setAddrOfLocalVar(VDInit, SrcElement); 887 EmitAnyExprToMem(Init, DestElement, 888 Init->getType().getQualifiers(), 889 /*IsInitializer*/ false); 890 LocalDeclMap.erase(VDInit); 891 }); 892 } 893 EmitAutoVarCleanups(Emission); 894 return Emission.getAllocatedAddress(); 895 }); 896 } else { 897 Address OriginalAddr = OriginalLVal.getAddress(*this); 898 IsRegistered = 899 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 900 ThisFirstprivateIsLastprivate, 901 OrigVD, &Lastprivates, IRef]() { 902 // Emit private VarDecl with copy init. 903 // Remap temp VDInit variable to the address of the original 904 // variable (for proper handling of captured global variables). 905 setAddrOfLocalVar(VDInit, OriginalAddr); 906 EmitDecl(*VD); 907 LocalDeclMap.erase(VDInit); 908 if (ThisFirstprivateIsLastprivate && 909 Lastprivates[OrigVD->getCanonicalDecl()] == 910 OMPC_LASTPRIVATE_conditional) { 911 // Create/init special variable for lastprivate conditionals. 912 Address VDAddr = 913 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 914 *this, OrigVD); 915 llvm::Value *V = EmitLoadOfScalar( 916 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 917 AlignmentSource::Decl), 918 (*IRef)->getExprLoc()); 919 EmitStoreOfScalar(V, 920 MakeAddrLValue(VDAddr, (*IRef)->getType(), 921 AlignmentSource::Decl)); 922 LocalDeclMap.erase(VD); 923 setAddrOfLocalVar(VD, VDAddr); 924 return VDAddr; 925 } 926 return GetAddrOfLocalVar(VD); 927 }); 928 } 929 assert(IsRegistered && 930 "firstprivate var already registered as private"); 931 // Silence the warning about unused variable. 932 (void)IsRegistered; 933 } 934 ++IRef; 935 ++InitsRef; 936 } 937 } 938 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 939 } 940 941 void CodeGenFunction::EmitOMPPrivateClause( 942 const OMPExecutableDirective &D, 943 CodeGenFunction::OMPPrivateScope &PrivateScope) { 944 if (!HaveInsertPoint()) 945 return; 946 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 947 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 948 auto IRef = C->varlist_begin(); 949 for (const Expr *IInit : C->private_copies()) { 950 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 951 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 952 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 953 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 954 // Emit private VarDecl with copy init. 955 EmitDecl(*VD); 956 return GetAddrOfLocalVar(VD); 957 }); 958 assert(IsRegistered && "private var already registered as private"); 959 // Silence the warning about unused variable. 960 (void)IsRegistered; 961 } 962 ++IRef; 963 } 964 } 965 } 966 967 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 968 if (!HaveInsertPoint()) 969 return false; 970 // threadprivate_var1 = master_threadprivate_var1; 971 // operator=(threadprivate_var2, master_threadprivate_var2); 972 // ... 973 // __kmpc_barrier(&loc, global_tid); 974 llvm::DenseSet<const VarDecl *> CopiedVars; 975 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 976 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 977 auto IRef = C->varlist_begin(); 978 auto ISrcRef = C->source_exprs().begin(); 979 auto IDestRef = C->destination_exprs().begin(); 980 for (const Expr *AssignOp : C->assignment_ops()) { 981 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 982 QualType Type = VD->getType(); 983 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 984 // Get the address of the master variable. If we are emitting code with 985 // TLS support, the address is passed from the master as field in the 986 // captured declaration. 987 Address MasterAddr = Address::invalid(); 988 if (getLangOpts().OpenMPUseTLS && 989 getContext().getTargetInfo().isTLSSupported()) { 990 assert(CapturedStmtInfo->lookup(VD) && 991 "Copyin threadprivates should have been captured!"); 992 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 993 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 994 MasterAddr = EmitLValue(&DRE).getAddress(*this); 995 LocalDeclMap.erase(VD); 996 } else { 997 MasterAddr = 998 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 999 : CGM.GetAddrOfGlobal(VD), 1000 getContext().getDeclAlign(VD)); 1001 } 1002 // Get the address of the threadprivate variable. 1003 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 1004 if (CopiedVars.size() == 1) { 1005 // At first check if current thread is a master thread. If it is, no 1006 // need to copy data. 1007 CopyBegin = createBasicBlock("copyin.not.master"); 1008 CopyEnd = createBasicBlock("copyin.not.master.end"); 1009 Builder.CreateCondBr( 1010 Builder.CreateICmpNE( 1011 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 1012 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 1013 CGM.IntPtrTy)), 1014 CopyBegin, CopyEnd); 1015 EmitBlock(CopyBegin); 1016 } 1017 const auto *SrcVD = 1018 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1019 const auto *DestVD = 1020 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1021 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 1022 } 1023 ++IRef; 1024 ++ISrcRef; 1025 ++IDestRef; 1026 } 1027 } 1028 if (CopyEnd) { 1029 // Exit out of copying procedure for non-master thread. 1030 EmitBlock(CopyEnd, /*IsFinished=*/true); 1031 return true; 1032 } 1033 return false; 1034 } 1035 1036 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1037 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1038 if (!HaveInsertPoint()) 1039 return false; 1040 bool HasAtLeastOneLastprivate = false; 1041 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1042 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1043 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1044 for (const Expr *C : LoopDirective->counters()) { 1045 SIMDLCVs.insert( 1046 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1047 } 1048 } 1049 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1050 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1051 HasAtLeastOneLastprivate = true; 1052 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1053 !getLangOpts().OpenMPSimd) 1054 break; 1055 const auto *IRef = C->varlist_begin(); 1056 const auto *IDestRef = C->destination_exprs().begin(); 1057 for (const Expr *IInit : C->private_copies()) { 1058 // Keep the address of the original variable for future update at the end 1059 // of the loop. 1060 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1061 // Taskloops do not require additional initialization, it is done in 1062 // runtime support library. 1063 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1064 const auto *DestVD = 1065 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1066 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1067 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1068 /*RefersToEnclosingVariableOrCapture=*/ 1069 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1070 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1071 return EmitLValue(&DRE).getAddress(*this); 1072 }); 1073 // Check if the variable is also a firstprivate: in this case IInit is 1074 // not generated. Initialization of this variable will happen in codegen 1075 // for 'firstprivate' clause. 1076 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1077 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1078 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1079 OrigVD]() { 1080 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1081 Address VDAddr = 1082 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1083 OrigVD); 1084 setAddrOfLocalVar(VD, VDAddr); 1085 return VDAddr; 1086 } 1087 // Emit private VarDecl with copy init. 1088 EmitDecl(*VD); 1089 return GetAddrOfLocalVar(VD); 1090 }); 1091 assert(IsRegistered && 1092 "lastprivate var already registered as private"); 1093 (void)IsRegistered; 1094 } 1095 } 1096 ++IRef; 1097 ++IDestRef; 1098 } 1099 } 1100 return HasAtLeastOneLastprivate; 1101 } 1102 1103 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1104 const OMPExecutableDirective &D, bool NoFinals, 1105 llvm::Value *IsLastIterCond) { 1106 if (!HaveInsertPoint()) 1107 return; 1108 // Emit following code: 1109 // if (<IsLastIterCond>) { 1110 // orig_var1 = private_orig_var1; 1111 // ... 1112 // orig_varn = private_orig_varn; 1113 // } 1114 llvm::BasicBlock *ThenBB = nullptr; 1115 llvm::BasicBlock *DoneBB = nullptr; 1116 if (IsLastIterCond) { 1117 // Emit implicit barrier if at least one lastprivate conditional is found 1118 // and this is not a simd mode. 1119 if (!getLangOpts().OpenMPSimd && 1120 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1121 [](const OMPLastprivateClause *C) { 1122 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1123 })) { 1124 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1125 OMPD_unknown, 1126 /*EmitChecks=*/false, 1127 /*ForceSimpleCall=*/true); 1128 } 1129 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1130 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1131 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1132 EmitBlock(ThenBB); 1133 } 1134 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1135 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1136 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1137 auto IC = LoopDirective->counters().begin(); 1138 for (const Expr *F : LoopDirective->finals()) { 1139 const auto *D = 1140 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1141 if (NoFinals) 1142 AlreadyEmittedVars.insert(D); 1143 else 1144 LoopCountersAndUpdates[D] = F; 1145 ++IC; 1146 } 1147 } 1148 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1149 auto IRef = C->varlist_begin(); 1150 auto ISrcRef = C->source_exprs().begin(); 1151 auto IDestRef = C->destination_exprs().begin(); 1152 for (const Expr *AssignOp : C->assignment_ops()) { 1153 const auto *PrivateVD = 1154 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1155 QualType Type = PrivateVD->getType(); 1156 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1157 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1158 // If lastprivate variable is a loop control variable for loop-based 1159 // directive, update its value before copyin back to original 1160 // variable. 1161 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1162 EmitIgnoredExpr(FinalExpr); 1163 const auto *SrcVD = 1164 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1165 const auto *DestVD = 1166 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1167 // Get the address of the private variable. 1168 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1169 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1170 PrivateAddr = 1171 Address(Builder.CreateLoad(PrivateAddr), 1172 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1173 // Store the last value to the private copy in the last iteration. 1174 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1175 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1176 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1177 (*IRef)->getExprLoc()); 1178 // Get the address of the original variable. 1179 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1180 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1181 } 1182 ++IRef; 1183 ++ISrcRef; 1184 ++IDestRef; 1185 } 1186 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1187 EmitIgnoredExpr(PostUpdate); 1188 } 1189 if (IsLastIterCond) 1190 EmitBlock(DoneBB, /*IsFinished=*/true); 1191 } 1192 1193 void CodeGenFunction::EmitOMPReductionClauseInit( 1194 const OMPExecutableDirective &D, 1195 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1196 if (!HaveInsertPoint()) 1197 return; 1198 SmallVector<const Expr *, 4> Shareds; 1199 SmallVector<const Expr *, 4> Privates; 1200 SmallVector<const Expr *, 4> ReductionOps; 1201 SmallVector<const Expr *, 4> LHSs; 1202 SmallVector<const Expr *, 4> RHSs; 1203 OMPTaskDataTy Data; 1204 SmallVector<const Expr *, 4> TaskLHSs; 1205 SmallVector<const Expr *, 4> TaskRHSs; 1206 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1207 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1208 continue; 1209 Shareds.append(C->varlist_begin(), C->varlist_end()); 1210 Privates.append(C->privates().begin(), C->privates().end()); 1211 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1212 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1213 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1214 if (C->getModifier() == OMPC_REDUCTION_task) { 1215 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1216 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1217 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1218 Data.ReductionOps.append(C->reduction_ops().begin(), 1219 C->reduction_ops().end()); 1220 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1221 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1222 } 1223 } 1224 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1225 unsigned Count = 0; 1226 auto *ILHS = LHSs.begin(); 1227 auto *IRHS = RHSs.begin(); 1228 auto *IPriv = Privates.begin(); 1229 for (const Expr *IRef : Shareds) { 1230 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1231 // Emit private VarDecl with reduction init. 1232 RedCG.emitSharedOrigLValue(*this, Count); 1233 RedCG.emitAggregateType(*this, Count); 1234 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1235 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1236 RedCG.getSharedLValue(Count), 1237 [&Emission](CodeGenFunction &CGF) { 1238 CGF.EmitAutoVarInit(Emission); 1239 return true; 1240 }); 1241 EmitAutoVarCleanups(Emission); 1242 Address BaseAddr = RedCG.adjustPrivateAddress( 1243 *this, Count, Emission.getAllocatedAddress()); 1244 bool IsRegistered = PrivateScope.addPrivate( 1245 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1246 assert(IsRegistered && "private var already registered as private"); 1247 // Silence the warning about unused variable. 1248 (void)IsRegistered; 1249 1250 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1251 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1252 QualType Type = PrivateVD->getType(); 1253 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1254 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1258 return RedCG.getSharedLValue(Count).getAddress(*this); 1259 }); 1260 PrivateScope.addPrivate( 1261 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1262 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1263 isa<ArraySubscriptExpr>(IRef)) { 1264 // Store the address of the original variable associated with the LHS 1265 // implicit variable. 1266 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1267 return RedCG.getSharedLValue(Count).getAddress(*this); 1268 }); 1269 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1270 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1271 ConvertTypeForMem(RHSVD->getType()), 1272 "rhs.begin"); 1273 }); 1274 } else { 1275 QualType Type = PrivateVD->getType(); 1276 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1277 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1278 // Store the address of the original variable associated with the LHS 1279 // implicit variable. 1280 if (IsArray) { 1281 OriginalAddr = Builder.CreateElementBitCast( 1282 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1283 } 1284 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1285 PrivateScope.addPrivate( 1286 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1287 return IsArray 1288 ? Builder.CreateElementBitCast( 1289 GetAddrOfLocalVar(PrivateVD), 1290 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1291 : GetAddrOfLocalVar(PrivateVD); 1292 }); 1293 } 1294 ++ILHS; 1295 ++IRHS; 1296 ++IPriv; 1297 ++Count; 1298 } 1299 if (!Data.ReductionVars.empty()) { 1300 Data.IsReductionWithTaskMod = true; 1301 Data.IsWorksharingReduction = 1302 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1303 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1304 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1305 const Expr *TaskRedRef = nullptr; 1306 switch (D.getDirectiveKind()) { 1307 case OMPD_parallel: 1308 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1309 break; 1310 case OMPD_for: 1311 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1312 break; 1313 case OMPD_sections: 1314 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_parallel_for: 1317 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1318 break; 1319 case OMPD_parallel_master: 1320 TaskRedRef = 1321 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1322 break; 1323 case OMPD_parallel_sections: 1324 TaskRedRef = 1325 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1326 break; 1327 case OMPD_target_parallel: 1328 TaskRedRef = 1329 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1330 break; 1331 case OMPD_target_parallel_for: 1332 TaskRedRef = 1333 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1334 break; 1335 case OMPD_distribute_parallel_for: 1336 TaskRedRef = 1337 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1338 break; 1339 case OMPD_teams_distribute_parallel_for: 1340 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1341 .getTaskReductionRefExpr(); 1342 break; 1343 case OMPD_target_teams_distribute_parallel_for: 1344 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1345 .getTaskReductionRefExpr(); 1346 break; 1347 case OMPD_simd: 1348 case OMPD_for_simd: 1349 case OMPD_section: 1350 case OMPD_single: 1351 case OMPD_master: 1352 case OMPD_critical: 1353 case OMPD_parallel_for_simd: 1354 case OMPD_task: 1355 case OMPD_taskyield: 1356 case OMPD_barrier: 1357 case OMPD_taskwait: 1358 case OMPD_taskgroup: 1359 case OMPD_flush: 1360 case OMPD_depobj: 1361 case OMPD_scan: 1362 case OMPD_ordered: 1363 case OMPD_atomic: 1364 case OMPD_teams: 1365 case OMPD_target: 1366 case OMPD_cancellation_point: 1367 case OMPD_cancel: 1368 case OMPD_target_data: 1369 case OMPD_target_enter_data: 1370 case OMPD_target_exit_data: 1371 case OMPD_taskloop: 1372 case OMPD_taskloop_simd: 1373 case OMPD_master_taskloop: 1374 case OMPD_master_taskloop_simd: 1375 case OMPD_parallel_master_taskloop: 1376 case OMPD_parallel_master_taskloop_simd: 1377 case OMPD_distribute: 1378 case OMPD_target_update: 1379 case OMPD_distribute_parallel_for_simd: 1380 case OMPD_distribute_simd: 1381 case OMPD_target_parallel_for_simd: 1382 case OMPD_target_simd: 1383 case OMPD_teams_distribute: 1384 case OMPD_teams_distribute_simd: 1385 case OMPD_teams_distribute_parallel_for_simd: 1386 case OMPD_target_teams: 1387 case OMPD_target_teams_distribute: 1388 case OMPD_target_teams_distribute_parallel_for_simd: 1389 case OMPD_target_teams_distribute_simd: 1390 case OMPD_declare_target: 1391 case OMPD_end_declare_target: 1392 case OMPD_threadprivate: 1393 case OMPD_allocate: 1394 case OMPD_declare_reduction: 1395 case OMPD_declare_mapper: 1396 case OMPD_declare_simd: 1397 case OMPD_requires: 1398 case OMPD_declare_variant: 1399 case OMPD_begin_declare_variant: 1400 case OMPD_end_declare_variant: 1401 case OMPD_unknown: 1402 default: 1403 llvm_unreachable("Enexpected directive with task reductions."); 1404 } 1405 1406 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1407 EmitVarDecl(*VD); 1408 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1409 /*Volatile=*/false, TaskRedRef->getType()); 1410 } 1411 } 1412 1413 void CodeGenFunction::EmitOMPReductionClauseFinal( 1414 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1415 if (!HaveInsertPoint()) 1416 return; 1417 llvm::SmallVector<const Expr *, 8> Privates; 1418 llvm::SmallVector<const Expr *, 8> LHSExprs; 1419 llvm::SmallVector<const Expr *, 8> RHSExprs; 1420 llvm::SmallVector<const Expr *, 8> ReductionOps; 1421 bool HasAtLeastOneReduction = false; 1422 bool IsReductionWithTaskMod = false; 1423 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1424 // Do not emit for inscan reductions. 1425 if (C->getModifier() == OMPC_REDUCTION_inscan) 1426 continue; 1427 HasAtLeastOneReduction = true; 1428 Privates.append(C->privates().begin(), C->privates().end()); 1429 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1430 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1431 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1432 IsReductionWithTaskMod = 1433 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1434 } 1435 if (HasAtLeastOneReduction) { 1436 if (IsReductionWithTaskMod) { 1437 CGM.getOpenMPRuntime().emitTaskReductionFini( 1438 *this, D.getBeginLoc(), 1439 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1440 } 1441 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1442 isOpenMPParallelDirective(D.getDirectiveKind()) || 1443 ReductionKind == OMPD_simd; 1444 bool SimpleReduction = ReductionKind == OMPD_simd; 1445 // Emit nowait reduction if nowait clause is present or directive is a 1446 // parallel directive (it always has implicit barrier). 1447 CGM.getOpenMPRuntime().emitReduction( 1448 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1449 {WithNowait, SimpleReduction, ReductionKind}); 1450 } 1451 } 1452 1453 static void emitPostUpdateForReductionClause( 1454 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1455 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1456 if (!CGF.HaveInsertPoint()) 1457 return; 1458 llvm::BasicBlock *DoneBB = nullptr; 1459 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1460 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1461 if (!DoneBB) { 1462 if (llvm::Value *Cond = CondGen(CGF)) { 1463 // If the first post-update expression is found, emit conditional 1464 // block if it was requested. 1465 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1466 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1467 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1468 CGF.EmitBlock(ThenBB); 1469 } 1470 } 1471 CGF.EmitIgnoredExpr(PostUpdate); 1472 } 1473 } 1474 if (DoneBB) 1475 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1476 } 1477 1478 namespace { 1479 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1480 /// parallel function. This is necessary for combined constructs such as 1481 /// 'distribute parallel for' 1482 typedef llvm::function_ref<void(CodeGenFunction &, 1483 const OMPExecutableDirective &, 1484 llvm::SmallVectorImpl<llvm::Value *> &)> 1485 CodeGenBoundParametersTy; 1486 } // anonymous namespace 1487 1488 static void 1489 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1490 const OMPExecutableDirective &S) { 1491 if (CGF.getLangOpts().OpenMP < 50) 1492 return; 1493 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1494 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1495 for (const Expr *Ref : C->varlists()) { 1496 if (!Ref->getType()->isScalarType()) 1497 continue; 1498 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1499 if (!DRE) 1500 continue; 1501 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1502 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1503 } 1504 } 1505 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1506 for (const Expr *Ref : C->varlists()) { 1507 if (!Ref->getType()->isScalarType()) 1508 continue; 1509 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1510 if (!DRE) 1511 continue; 1512 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1513 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1514 } 1515 } 1516 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1517 for (const Expr *Ref : C->varlists()) { 1518 if (!Ref->getType()->isScalarType()) 1519 continue; 1520 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1521 if (!DRE) 1522 continue; 1523 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1524 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1525 } 1526 } 1527 // Privates should ne analyzed since they are not captured at all. 1528 // Task reductions may be skipped - tasks are ignored. 1529 // Firstprivates do not return value but may be passed by reference - no need 1530 // to check for updated lastprivate conditional. 1531 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1532 for (const Expr *Ref : C->varlists()) { 1533 if (!Ref->getType()->isScalarType()) 1534 continue; 1535 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1536 if (!DRE) 1537 continue; 1538 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1539 } 1540 } 1541 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1542 CGF, S, PrivateDecls); 1543 } 1544 1545 static void emitCommonOMPParallelDirective( 1546 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1547 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1548 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1549 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1550 llvm::Function *OutlinedFn = 1551 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1552 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1553 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1554 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1555 llvm::Value *NumThreads = 1556 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1557 /*IgnoreResultAssign=*/true); 1558 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1559 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1560 } 1561 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1562 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1563 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1564 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1565 } 1566 const Expr *IfCond = nullptr; 1567 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1568 if (C->getNameModifier() == OMPD_unknown || 1569 C->getNameModifier() == OMPD_parallel) { 1570 IfCond = C->getCondition(); 1571 break; 1572 } 1573 } 1574 1575 OMPParallelScope Scope(CGF, S); 1576 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1577 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1578 // lower and upper bounds with the pragma 'for' chunking mechanism. 1579 // The following lambda takes care of appending the lower and upper bound 1580 // parameters when necessary 1581 CodeGenBoundParameters(CGF, S, CapturedVars); 1582 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1583 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1584 CapturedVars, IfCond); 1585 } 1586 1587 static bool isAllocatableDecl(const VarDecl *VD) { 1588 const VarDecl *CVD = VD->getCanonicalDecl(); 1589 if (!CVD->hasAttr<OMPAllocateDeclAttr>()) 1590 return false; 1591 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1592 // Use the default allocation. 1593 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || 1594 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && 1595 !AA->getAllocator()); 1596 } 1597 1598 static void emitEmptyBoundParameters(CodeGenFunction &, 1599 const OMPExecutableDirective &, 1600 llvm::SmallVectorImpl<llvm::Value *> &) {} 1601 1602 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable( 1603 CodeGenFunction &CGF, const VarDecl *VD) { 1604 CodeGenModule &CGM = CGF.CGM; 1605 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1606 1607 if (!VD) 1608 return Address::invalid(); 1609 const VarDecl *CVD = VD->getCanonicalDecl(); 1610 if (!isAllocatableDecl(CVD)) 1611 return Address::invalid(); 1612 llvm::Value *Size; 1613 CharUnits Align = CGM.getContext().getDeclAlign(CVD); 1614 if (CVD->getType()->isVariablyModifiedType()) { 1615 Size = CGF.getTypeSize(CVD->getType()); 1616 // Align the size: ((size + align - 1) / align) * align 1617 Size = CGF.Builder.CreateNUWAdd( 1618 Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); 1619 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); 1620 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); 1621 } else { 1622 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); 1623 Size = CGM.getSize(Sz.alignTo(Align)); 1624 } 1625 1626 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1627 assert(AA->getAllocator() && 1628 "Expected allocator expression for non-default allocator."); 1629 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); 1630 // According to the standard, the original allocator type is a enum (integer). 1631 // Convert to pointer type, if required. 1632 if (Allocator->getType()->isIntegerTy()) 1633 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy); 1634 else if (Allocator->getType()->isPointerTy()) 1635 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator, 1636 CGM.VoidPtrTy); 1637 1638 llvm::Value *Addr = OMPBuilder.createOMPAlloc( 1639 CGF.Builder, Size, Allocator, 1640 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", ".")); 1641 llvm::CallInst *FreeCI = 1642 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator); 1643 1644 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI); 1645 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1646 Addr, 1647 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())), 1648 getNameWithSeparators({CVD->getName(), ".addr"}, ".", ".")); 1649 return Address(Addr, Align); 1650 } 1651 1652 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 1653 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, 1654 SourceLocation Loc) { 1655 CodeGenModule &CGM = CGF.CGM; 1656 if (CGM.getLangOpts().OpenMPUseTLS && 1657 CGM.getContext().getTargetInfo().isTLSSupported()) 1658 return VDAddr; 1659 1660 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1661 1662 llvm::Type *VarTy = VDAddr.getElementType(); 1663 llvm::Value *Data = 1664 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); 1665 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); 1666 std::string Suffix = getNameWithSeparators({"cache", ""}); 1667 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); 1668 1669 llvm::CallInst *ThreadPrivateCacheCall = 1670 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName); 1671 1672 return Address(ThreadPrivateCacheCall, VDAddr.getAlignment()); 1673 } 1674 1675 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators( 1676 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) { 1677 SmallString<128> Buffer; 1678 llvm::raw_svector_ostream OS(Buffer); 1679 StringRef Sep = FirstSeparator; 1680 for (StringRef Part : Parts) { 1681 OS << Sep << Part; 1682 Sep = Separator; 1683 } 1684 return OS.str().str(); 1685 } 1686 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1687 if (CGM.getLangOpts().OpenMPIRBuilder) { 1688 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1689 // Check if we have any if clause associated with the directive. 1690 llvm::Value *IfCond = nullptr; 1691 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1692 IfCond = EmitScalarExpr(C->getCondition(), 1693 /*IgnoreResultAssign=*/true); 1694 1695 llvm::Value *NumThreads = nullptr; 1696 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1697 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1698 /*IgnoreResultAssign=*/true); 1699 1700 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1701 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1702 ProcBind = ProcBindClause->getProcBindKind(); 1703 1704 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1705 1706 // The cleanup callback that finalizes all variabels at the given location, 1707 // thus calls destructors etc. 1708 auto FiniCB = [this](InsertPointTy IP) { 1709 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1710 }; 1711 1712 // Privatization callback that performs appropriate action for 1713 // shared/private/firstprivate/lastprivate/copyin/... variables. 1714 // 1715 // TODO: This defaults to shared right now. 1716 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1717 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 1718 // The next line is appropriate only for variables (Val) with the 1719 // data-sharing attribute "shared". 1720 ReplVal = &Val; 1721 1722 return CodeGenIP; 1723 }; 1724 1725 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1726 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1727 1728 auto BodyGenCB = [ParallelRegionBodyStmt, 1729 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1730 llvm::BasicBlock &ContinuationBB) { 1731 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1732 ContinuationBB); 1733 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1734 CodeGenIP, ContinuationBB); 1735 }; 1736 1737 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1738 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1739 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 1740 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 1741 Builder.restoreIP( 1742 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB, 1743 IfCond, NumThreads, ProcBind, S.hasCancel())); 1744 return; 1745 } 1746 1747 // Emit parallel region as a standalone region. 1748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1749 Action.Enter(CGF); 1750 OMPPrivateScope PrivateScope(CGF); 1751 bool Copyins = CGF.EmitOMPCopyinClause(S); 1752 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1753 if (Copyins) { 1754 // Emit implicit barrier to synchronize threads and avoid data races on 1755 // propagation master's thread values of threadprivate variables to local 1756 // instances of that variables of all other implicit threads. 1757 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1758 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1759 /*ForceSimpleCall=*/true); 1760 } 1761 CGF.EmitOMPPrivateClause(S, PrivateScope); 1762 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1763 (void)PrivateScope.Privatize(); 1764 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1765 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1766 }; 1767 { 1768 auto LPCRegion = 1769 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1770 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1771 emitEmptyBoundParameters); 1772 emitPostUpdateForReductionClause(*this, S, 1773 [](CodeGenFunction &) { return nullptr; }); 1774 } 1775 // Check for outer lastprivate conditional update. 1776 checkForLastprivateConditionalUpdate(*this, S); 1777 } 1778 1779 namespace { 1780 /// RAII to handle scopes for loop transformation directives. 1781 class OMPTransformDirectiveScopeRAII { 1782 OMPLoopScope *Scope = nullptr; 1783 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr; 1784 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr; 1785 1786 public: 1787 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) { 1788 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) { 1789 Scope = new OMPLoopScope(CGF, *Dir); 1790 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); 1791 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); 1792 } 1793 } 1794 ~OMPTransformDirectiveScopeRAII() { 1795 if (!Scope) 1796 return; 1797 delete CapInfoRAII; 1798 delete CGSI; 1799 delete Scope; 1800 } 1801 }; 1802 } // namespace 1803 1804 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1805 int MaxLevel, int Level = 0) { 1806 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1807 const Stmt *SimplifiedS = S->IgnoreContainers(); 1808 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1809 PrettyStackTraceLoc CrashInfo( 1810 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1811 "LLVM IR generation of compound statement ('{}')"); 1812 1813 // Keep track of the current cleanup stack depth, including debug scopes. 1814 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1815 for (const Stmt *CurStmt : CS->body()) 1816 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1817 return; 1818 } 1819 if (SimplifiedS == NextLoop) { 1820 OMPTransformDirectiveScopeRAII PossiblyTransformDirectiveScope(CGF, 1821 SimplifiedS); 1822 if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS)) 1823 SimplifiedS = Dir->getTransformedStmt(); 1824 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS)) 1825 SimplifiedS = CanonLoop->getLoopStmt(); 1826 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1827 S = For->getBody(); 1828 } else { 1829 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1830 "Expected canonical for loop or range-based for loop."); 1831 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1832 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1833 S = CXXFor->getBody(); 1834 } 1835 if (Level + 1 < MaxLevel) { 1836 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1837 S, /*TryImperfectlyNestedLoops=*/true); 1838 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1839 return; 1840 } 1841 } 1842 CGF.EmitStmt(S); 1843 } 1844 1845 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1846 JumpDest LoopExit) { 1847 RunCleanupsScope BodyScope(*this); 1848 // Update counters values on current iteration. 1849 for (const Expr *UE : D.updates()) 1850 EmitIgnoredExpr(UE); 1851 // Update the linear variables. 1852 // In distribute directives only loop counters may be marked as linear, no 1853 // need to generate the code for them. 1854 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1855 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1856 for (const Expr *UE : C->updates()) 1857 EmitIgnoredExpr(UE); 1858 } 1859 } 1860 1861 // On a continue in the body, jump to the end. 1862 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1863 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1864 for (const Expr *E : D.finals_conditions()) { 1865 if (!E) 1866 continue; 1867 // Check that loop counter in non-rectangular nest fits into the iteration 1868 // space. 1869 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1870 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1871 getProfileCount(D.getBody())); 1872 EmitBlock(NextBB); 1873 } 1874 1875 OMPPrivateScope InscanScope(*this); 1876 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1877 bool IsInscanRegion = InscanScope.Privatize(); 1878 if (IsInscanRegion) { 1879 // Need to remember the block before and after scan directive 1880 // to dispatch them correctly depending on the clause used in 1881 // this directive, inclusive or exclusive. For inclusive scan the natural 1882 // order of the blocks is used, for exclusive clause the blocks must be 1883 // executed in reverse order. 1884 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1885 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1886 // No need to allocate inscan exit block, in simd mode it is selected in the 1887 // codegen for the scan directive. 1888 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1889 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1890 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1891 EmitBranch(OMPScanDispatch); 1892 EmitBlock(OMPBeforeScanBlock); 1893 } 1894 1895 // Emit loop variables for C++ range loops. 1896 const Stmt *Body = 1897 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1898 // Emit loop body. 1899 emitBody(*this, Body, 1900 OMPLoopBasedDirective::tryToFindNextInnerLoop( 1901 Body, /*TryImperfectlyNestedLoops=*/true), 1902 D.getLoopsNumber()); 1903 1904 // Jump to the dispatcher at the end of the loop body. 1905 if (IsInscanRegion) 1906 EmitBranch(OMPScanExitBlock); 1907 1908 // The end (updates/cleanups). 1909 EmitBlock(Continue.getBlock()); 1910 BreakContinueStack.pop_back(); 1911 } 1912 1913 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>; 1914 1915 /// Emit a captured statement and return the function as well as its captured 1916 /// closure context. 1917 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, 1918 const CapturedStmt *S) { 1919 LValue CapStruct = ParentCGF.InitCapturedStruct(*S); 1920 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true); 1921 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI = 1922 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S); 1923 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get()); 1924 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S); 1925 1926 return {F, CapStruct.getPointer(ParentCGF)}; 1927 } 1928 1929 /// Emit a call to a previously captured closure. 1930 static llvm::CallInst * 1931 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, 1932 llvm::ArrayRef<llvm::Value *> Args) { 1933 // Append the closure context to the argument. 1934 SmallVector<llvm::Value *> EffectiveArgs; 1935 EffectiveArgs.reserve(Args.size() + 1); 1936 llvm::append_range(EffectiveArgs, Args); 1937 EffectiveArgs.push_back(Cap.second); 1938 1939 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs); 1940 } 1941 1942 llvm::CanonicalLoopInfo * 1943 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) { 1944 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented"); 1945 1946 EmitStmt(S); 1947 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops"); 1948 1949 // The last added loop is the outermost one. 1950 return OMPLoopNestStack.back(); 1951 } 1952 1953 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { 1954 const Stmt *SyntacticalLoop = S->getLoopStmt(); 1955 if (!getLangOpts().OpenMPIRBuilder) { 1956 // Ignore if OpenMPIRBuilder is not enabled. 1957 EmitStmt(SyntacticalLoop); 1958 return; 1959 } 1960 1961 LexicalScope ForScope(*this, S->getSourceRange()); 1962 1963 // Emit init statements. The Distance/LoopVar funcs may reference variable 1964 // declarations they contain. 1965 const Stmt *BodyStmt; 1966 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) { 1967 if (const Stmt *InitStmt = For->getInit()) 1968 EmitStmt(InitStmt); 1969 BodyStmt = For->getBody(); 1970 } else if (const auto *RangeFor = 1971 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) { 1972 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt()) 1973 EmitStmt(RangeStmt); 1974 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt()) 1975 EmitStmt(BeginStmt); 1976 if (const DeclStmt *EndStmt = RangeFor->getEndStmt()) 1977 EmitStmt(EndStmt); 1978 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt()) 1979 EmitStmt(LoopVarStmt); 1980 BodyStmt = RangeFor->getBody(); 1981 } else 1982 llvm_unreachable("Expected for-stmt or range-based for-stmt"); 1983 1984 // Emit closure for later use. By-value captures will be captured here. 1985 const CapturedStmt *DistanceFunc = S->getDistanceFunc(); 1986 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc); 1987 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc(); 1988 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc); 1989 1990 // Call the distance function to get the number of iterations of the loop to 1991 // come. 1992 QualType LogicalTy = DistanceFunc->getCapturedDecl() 1993 ->getParam(0) 1994 ->getType() 1995 .getNonReferenceType(); 1996 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); 1997 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); 1998 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); 1999 2000 // Emit the loop structure. 2001 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 2002 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, 2003 llvm::Value *IndVar) { 2004 Builder.restoreIP(CodeGenIP); 2005 2006 // Emit the loop body: Convert the logical iteration number to the loop 2007 // variable and emit the body. 2008 const DeclRefExpr *LoopVarRef = S->getLoopVarRef(); 2009 LValue LCVal = EmitLValue(LoopVarRef); 2010 Address LoopVarAddress = LCVal.getAddress(*this); 2011 emitCapturedStmtCall(*this, LoopVarClosure, 2012 {LoopVarAddress.getPointer(), IndVar}); 2013 2014 RunCleanupsScope BodyScope(*this); 2015 EmitStmt(BodyStmt); 2016 }; 2017 llvm::CanonicalLoopInfo *CL = 2018 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal); 2019 2020 // Finish up the loop. 2021 Builder.restoreIP(CL->getAfterIP()); 2022 ForScope.ForceCleanup(); 2023 2024 // Remember the CanonicalLoopInfo for parent AST nodes consuming it. 2025 OMPLoopNestStack.push_back(CL); 2026 } 2027 2028 void CodeGenFunction::EmitOMPInnerLoop( 2029 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 2030 const Expr *IncExpr, 2031 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 2032 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 2033 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 2034 2035 // Start the loop with a block that tests the condition. 2036 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 2037 EmitBlock(CondBlock); 2038 const SourceRange R = S.getSourceRange(); 2039 2040 // If attributes are attached, push to the basic block with them. 2041 const auto &OMPED = cast<OMPExecutableDirective>(S); 2042 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 2043 const Stmt *SS = ICS->getCapturedStmt(); 2044 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 2045 OMPLoopNestStack.clear(); 2046 if (AS) 2047 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 2048 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 2049 SourceLocToDebugLoc(R.getEnd())); 2050 else 2051 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2052 SourceLocToDebugLoc(R.getEnd())); 2053 2054 // If there are any cleanups between here and the loop-exit scope, 2055 // create a block to stage a loop exit along. 2056 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2057 if (RequiresCleanup) 2058 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 2059 2060 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 2061 2062 // Emit condition. 2063 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 2064 if (ExitBlock != LoopExit.getBlock()) { 2065 EmitBlock(ExitBlock); 2066 EmitBranchThroughCleanup(LoopExit); 2067 } 2068 2069 EmitBlock(LoopBody); 2070 incrementProfileCounter(&S); 2071 2072 // Create a block for the increment. 2073 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 2074 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2075 2076 BodyGen(*this); 2077 2078 // Emit "IV = IV + 1" and a back-edge to the condition block. 2079 EmitBlock(Continue.getBlock()); 2080 EmitIgnoredExpr(IncExpr); 2081 PostIncGen(*this); 2082 BreakContinueStack.pop_back(); 2083 EmitBranch(CondBlock); 2084 LoopStack.pop(); 2085 // Emit the fall-through block. 2086 EmitBlock(LoopExit.getBlock()); 2087 } 2088 2089 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 2090 if (!HaveInsertPoint()) 2091 return false; 2092 // Emit inits for the linear variables. 2093 bool HasLinears = false; 2094 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2095 for (const Expr *Init : C->inits()) { 2096 HasLinears = true; 2097 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 2098 if (const auto *Ref = 2099 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 2100 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 2101 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 2102 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2103 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2104 VD->getInit()->getType(), VK_LValue, 2105 VD->getInit()->getExprLoc()); 2106 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 2107 VD->getType()), 2108 /*capturedByInit=*/false); 2109 EmitAutoVarCleanups(Emission); 2110 } else { 2111 EmitVarDecl(*VD); 2112 } 2113 } 2114 // Emit the linear steps for the linear clauses. 2115 // If a step is not constant, it is pre-calculated before the loop. 2116 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 2117 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 2118 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 2119 // Emit calculation of the linear step. 2120 EmitIgnoredExpr(CS); 2121 } 2122 } 2123 return HasLinears; 2124 } 2125 2126 void CodeGenFunction::EmitOMPLinearClauseFinal( 2127 const OMPLoopDirective &D, 2128 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2129 if (!HaveInsertPoint()) 2130 return; 2131 llvm::BasicBlock *DoneBB = nullptr; 2132 // Emit the final values of the linear variables. 2133 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2134 auto IC = C->varlist_begin(); 2135 for (const Expr *F : C->finals()) { 2136 if (!DoneBB) { 2137 if (llvm::Value *Cond = CondGen(*this)) { 2138 // If the first post-update expression is found, emit conditional 2139 // block if it was requested. 2140 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 2141 DoneBB = createBasicBlock(".omp.linear.pu.done"); 2142 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2143 EmitBlock(ThenBB); 2144 } 2145 } 2146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 2147 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2148 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2149 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 2150 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 2151 CodeGenFunction::OMPPrivateScope VarScope(*this); 2152 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2153 (void)VarScope.Privatize(); 2154 EmitIgnoredExpr(F); 2155 ++IC; 2156 } 2157 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 2158 EmitIgnoredExpr(PostUpdate); 2159 } 2160 if (DoneBB) 2161 EmitBlock(DoneBB, /*IsFinished=*/true); 2162 } 2163 2164 static void emitAlignedClause(CodeGenFunction &CGF, 2165 const OMPExecutableDirective &D) { 2166 if (!CGF.HaveInsertPoint()) 2167 return; 2168 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 2169 llvm::APInt ClauseAlignment(64, 0); 2170 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 2171 auto *AlignmentCI = 2172 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 2173 ClauseAlignment = AlignmentCI->getValue(); 2174 } 2175 for (const Expr *E : Clause->varlists()) { 2176 llvm::APInt Alignment(ClauseAlignment); 2177 if (Alignment == 0) { 2178 // OpenMP [2.8.1, Description] 2179 // If no optional parameter is specified, implementation-defined default 2180 // alignments for SIMD instructions on the target platforms are assumed. 2181 Alignment = 2182 CGF.getContext() 2183 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2184 E->getType()->getPointeeType())) 2185 .getQuantity(); 2186 } 2187 assert((Alignment == 0 || Alignment.isPowerOf2()) && 2188 "alignment is not power of 2"); 2189 if (Alignment != 0) { 2190 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 2191 CGF.emitAlignmentAssumption( 2192 PtrValue, E, /*No second loc needed*/ SourceLocation(), 2193 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 2194 } 2195 } 2196 } 2197 } 2198 2199 void CodeGenFunction::EmitOMPPrivateLoopCounters( 2200 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 2201 if (!HaveInsertPoint()) 2202 return; 2203 auto I = S.private_counters().begin(); 2204 for (const Expr *E : S.counters()) { 2205 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2206 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 2207 // Emit var without initialization. 2208 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 2209 EmitAutoVarCleanups(VarEmission); 2210 LocalDeclMap.erase(PrivateVD); 2211 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 2212 return VarEmission.getAllocatedAddress(); 2213 }); 2214 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 2215 VD->hasGlobalStorage()) { 2216 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 2217 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 2218 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 2219 E->getType(), VK_LValue, E->getExprLoc()); 2220 return EmitLValue(&DRE).getAddress(*this); 2221 }); 2222 } else { 2223 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 2224 return VarEmission.getAllocatedAddress(); 2225 }); 2226 } 2227 ++I; 2228 } 2229 // Privatize extra loop counters used in loops for ordered(n) clauses. 2230 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 2231 if (!C->getNumForLoops()) 2232 continue; 2233 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size(); 2234 I < E; ++I) { 2235 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 2236 const auto *VD = cast<VarDecl>(DRE->getDecl()); 2237 // Override only those variables that can be captured to avoid re-emission 2238 // of the variables declared within the loops. 2239 if (DRE->refersToEnclosingVariableOrCapture()) { 2240 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 2241 return CreateMemTemp(DRE->getType(), VD->getName()); 2242 }); 2243 } 2244 } 2245 } 2246 } 2247 2248 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 2249 const Expr *Cond, llvm::BasicBlock *TrueBlock, 2250 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 2251 if (!CGF.HaveInsertPoint()) 2252 return; 2253 { 2254 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 2255 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 2256 (void)PreCondScope.Privatize(); 2257 // Get initial values of real counters. 2258 for (const Expr *I : S.inits()) { 2259 CGF.EmitIgnoredExpr(I); 2260 } 2261 } 2262 // Create temp loop control variables with their init values to support 2263 // non-rectangular loops. 2264 CodeGenFunction::OMPMapVars PreCondVars; 2265 for (const Expr * E: S.dependent_counters()) { 2266 if (!E) 2267 continue; 2268 assert(!E->getType().getNonReferenceType()->isRecordType() && 2269 "dependent counter must not be an iterator."); 2270 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2271 Address CounterAddr = 2272 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2273 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2274 } 2275 (void)PreCondVars.apply(CGF); 2276 for (const Expr *E : S.dependent_inits()) { 2277 if (!E) 2278 continue; 2279 CGF.EmitIgnoredExpr(E); 2280 } 2281 // Check that loop is executed at least one time. 2282 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2283 PreCondVars.restore(CGF); 2284 } 2285 2286 void CodeGenFunction::EmitOMPLinearClause( 2287 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2288 if (!HaveInsertPoint()) 2289 return; 2290 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2291 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2292 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2293 for (const Expr *C : LoopDirective->counters()) { 2294 SIMDLCVs.insert( 2295 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2296 } 2297 } 2298 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2299 auto CurPrivate = C->privates().begin(); 2300 for (const Expr *E : C->varlists()) { 2301 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2302 const auto *PrivateVD = 2303 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2304 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2305 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2306 // Emit private VarDecl with copy init. 2307 EmitVarDecl(*PrivateVD); 2308 return GetAddrOfLocalVar(PrivateVD); 2309 }); 2310 assert(IsRegistered && "linear var already registered as private"); 2311 // Silence the warning about unused variable. 2312 (void)IsRegistered; 2313 } else { 2314 EmitVarDecl(*PrivateVD); 2315 } 2316 ++CurPrivate; 2317 } 2318 } 2319 } 2320 2321 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2322 const OMPExecutableDirective &D, 2323 bool IsMonotonic) { 2324 if (!CGF.HaveInsertPoint()) 2325 return; 2326 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2327 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2328 /*ignoreResult=*/true); 2329 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2330 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2331 // In presence of finite 'safelen', it may be unsafe to mark all 2332 // the memory instructions parallel, because loop-carried 2333 // dependences of 'safelen' iterations are possible. 2334 if (!IsMonotonic) 2335 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2336 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2337 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2338 /*ignoreResult=*/true); 2339 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2340 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2341 // In presence of finite 'safelen', it may be unsafe to mark all 2342 // the memory instructions parallel, because loop-carried 2343 // dependences of 'safelen' iterations are possible. 2344 CGF.LoopStack.setParallel(/*Enable=*/false); 2345 } 2346 } 2347 2348 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2349 bool IsMonotonic) { 2350 // Walk clauses and process safelen/lastprivate. 2351 LoopStack.setParallel(!IsMonotonic); 2352 LoopStack.setVectorizeEnable(); 2353 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2354 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2355 if (C->getKind() == OMPC_ORDER_concurrent) 2356 LoopStack.setParallel(/*Enable=*/true); 2357 if ((D.getDirectiveKind() == OMPD_simd || 2358 (getLangOpts().OpenMPSimd && 2359 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2360 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2361 [](const OMPReductionClause *C) { 2362 return C->getModifier() == OMPC_REDUCTION_inscan; 2363 })) 2364 // Disable parallel access in case of prefix sum. 2365 LoopStack.setParallel(/*Enable=*/false); 2366 } 2367 2368 void CodeGenFunction::EmitOMPSimdFinal( 2369 const OMPLoopDirective &D, 2370 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2371 if (!HaveInsertPoint()) 2372 return; 2373 llvm::BasicBlock *DoneBB = nullptr; 2374 auto IC = D.counters().begin(); 2375 auto IPC = D.private_counters().begin(); 2376 for (const Expr *F : D.finals()) { 2377 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2378 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2379 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2380 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2381 OrigVD->hasGlobalStorage() || CED) { 2382 if (!DoneBB) { 2383 if (llvm::Value *Cond = CondGen(*this)) { 2384 // If the first post-update expression is found, emit conditional 2385 // block if it was requested. 2386 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2387 DoneBB = createBasicBlock(".omp.final.done"); 2388 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2389 EmitBlock(ThenBB); 2390 } 2391 } 2392 Address OrigAddr = Address::invalid(); 2393 if (CED) { 2394 OrigAddr = 2395 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2396 } else { 2397 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2398 /*RefersToEnclosingVariableOrCapture=*/false, 2399 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2400 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2401 } 2402 OMPPrivateScope VarScope(*this); 2403 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2404 (void)VarScope.Privatize(); 2405 EmitIgnoredExpr(F); 2406 } 2407 ++IC; 2408 ++IPC; 2409 } 2410 if (DoneBB) 2411 EmitBlock(DoneBB, /*IsFinished=*/true); 2412 } 2413 2414 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2415 const OMPLoopDirective &S, 2416 CodeGenFunction::JumpDest LoopExit) { 2417 CGF.EmitOMPLoopBody(S, LoopExit); 2418 CGF.EmitStopPoint(&S); 2419 } 2420 2421 /// Emit a helper variable and return corresponding lvalue. 2422 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2423 const DeclRefExpr *Helper) { 2424 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2425 CGF.EmitVarDecl(*VDecl); 2426 return CGF.EmitLValue(Helper); 2427 } 2428 2429 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2430 const RegionCodeGenTy &SimdInitGen, 2431 const RegionCodeGenTy &BodyCodeGen) { 2432 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2433 PrePostActionTy &) { 2434 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2435 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2436 SimdInitGen(CGF); 2437 2438 BodyCodeGen(CGF); 2439 }; 2440 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2441 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2442 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2443 2444 BodyCodeGen(CGF); 2445 }; 2446 const Expr *IfCond = nullptr; 2447 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2448 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2449 if (CGF.getLangOpts().OpenMP >= 50 && 2450 (C->getNameModifier() == OMPD_unknown || 2451 C->getNameModifier() == OMPD_simd)) { 2452 IfCond = C->getCondition(); 2453 break; 2454 } 2455 } 2456 } 2457 if (IfCond) { 2458 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2459 } else { 2460 RegionCodeGenTy ThenRCG(ThenGen); 2461 ThenRCG(CGF); 2462 } 2463 } 2464 2465 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2466 PrePostActionTy &Action) { 2467 Action.Enter(CGF); 2468 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2469 "Expected simd directive"); 2470 OMPLoopScope PreInitScope(CGF, S); 2471 // if (PreCond) { 2472 // for (IV in 0..LastIteration) BODY; 2473 // <Final counter/linear vars updates>; 2474 // } 2475 // 2476 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2477 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2478 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2479 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2480 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2481 } 2482 2483 // Emit: if (PreCond) - begin. 2484 // If the condition constant folds and can be elided, avoid emitting the 2485 // whole loop. 2486 bool CondConstant; 2487 llvm::BasicBlock *ContBlock = nullptr; 2488 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2489 if (!CondConstant) 2490 return; 2491 } else { 2492 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2493 ContBlock = CGF.createBasicBlock("simd.if.end"); 2494 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2495 CGF.getProfileCount(&S)); 2496 CGF.EmitBlock(ThenBlock); 2497 CGF.incrementProfileCounter(&S); 2498 } 2499 2500 // Emit the loop iteration variable. 2501 const Expr *IVExpr = S.getIterationVariable(); 2502 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2503 CGF.EmitVarDecl(*IVDecl); 2504 CGF.EmitIgnoredExpr(S.getInit()); 2505 2506 // Emit the iterations count variable. 2507 // If it is not a variable, Sema decided to calculate iterations count on 2508 // each iteration (e.g., it is foldable into a constant). 2509 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2510 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2511 // Emit calculation of the iterations count. 2512 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2513 } 2514 2515 emitAlignedClause(CGF, S); 2516 (void)CGF.EmitOMPLinearClauseInit(S); 2517 { 2518 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2519 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2520 CGF.EmitOMPLinearClause(S, LoopScope); 2521 CGF.EmitOMPPrivateClause(S, LoopScope); 2522 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2523 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2524 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2525 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2526 (void)LoopScope.Privatize(); 2527 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2528 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2529 2530 emitCommonSimdLoop( 2531 CGF, S, 2532 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2533 CGF.EmitOMPSimdInit(S); 2534 }, 2535 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2536 CGF.EmitOMPInnerLoop( 2537 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2538 [&S](CodeGenFunction &CGF) { 2539 emitOMPLoopBodyWithStopPoint(CGF, S, 2540 CodeGenFunction::JumpDest()); 2541 }, 2542 [](CodeGenFunction &) {}); 2543 }); 2544 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2545 // Emit final copy of the lastprivate variables at the end of loops. 2546 if (HasLastprivateClause) 2547 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2548 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2549 emitPostUpdateForReductionClause(CGF, S, 2550 [](CodeGenFunction &) { return nullptr; }); 2551 } 2552 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2553 // Emit: if (PreCond) - end. 2554 if (ContBlock) { 2555 CGF.EmitBranch(ContBlock); 2556 CGF.EmitBlock(ContBlock, true); 2557 } 2558 } 2559 2560 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2561 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2562 OMPFirstScanLoop = true; 2563 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2564 emitOMPSimdRegion(CGF, S, Action); 2565 }; 2566 { 2567 auto LPCRegion = 2568 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2569 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2570 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2571 } 2572 // Check for outer lastprivate conditional update. 2573 checkForLastprivateConditionalUpdate(*this, S); 2574 } 2575 2576 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) { 2577 // Emit the de-sugared statement. 2578 OMPTransformDirectiveScopeRAII TileScope(*this, &S); 2579 EmitStmt(S.getTransformedStmt()); 2580 } 2581 2582 void CodeGenFunction::EmitOMPOuterLoop( 2583 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2584 CodeGenFunction::OMPPrivateScope &LoopScope, 2585 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2586 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2587 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2588 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2589 2590 const Expr *IVExpr = S.getIterationVariable(); 2591 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2592 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2593 2594 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2595 2596 // Start the loop with a block that tests the condition. 2597 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2598 EmitBlock(CondBlock); 2599 const SourceRange R = S.getSourceRange(); 2600 OMPLoopNestStack.clear(); 2601 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2602 SourceLocToDebugLoc(R.getEnd())); 2603 2604 llvm::Value *BoolCondVal = nullptr; 2605 if (!DynamicOrOrdered) { 2606 // UB = min(UB, GlobalUB) or 2607 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2608 // 'distribute parallel for') 2609 EmitIgnoredExpr(LoopArgs.EUB); 2610 // IV = LB 2611 EmitIgnoredExpr(LoopArgs.Init); 2612 // IV < UB 2613 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2614 } else { 2615 BoolCondVal = 2616 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2617 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2618 } 2619 2620 // If there are any cleanups between here and the loop-exit scope, 2621 // create a block to stage a loop exit along. 2622 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2623 if (LoopScope.requiresCleanups()) 2624 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2625 2626 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2627 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2628 if (ExitBlock != LoopExit.getBlock()) { 2629 EmitBlock(ExitBlock); 2630 EmitBranchThroughCleanup(LoopExit); 2631 } 2632 EmitBlock(LoopBody); 2633 2634 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2635 // LB for loop condition and emitted it above). 2636 if (DynamicOrOrdered) 2637 EmitIgnoredExpr(LoopArgs.Init); 2638 2639 // Create a block for the increment. 2640 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2641 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2642 2643 emitCommonSimdLoop( 2644 *this, S, 2645 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2646 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2647 // with dynamic/guided scheduling and without ordered clause. 2648 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2649 CGF.LoopStack.setParallel(!IsMonotonic); 2650 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2651 if (C->getKind() == OMPC_ORDER_concurrent) 2652 CGF.LoopStack.setParallel(/*Enable=*/true); 2653 } else { 2654 CGF.EmitOMPSimdInit(S, IsMonotonic); 2655 } 2656 }, 2657 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2658 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2659 SourceLocation Loc = S.getBeginLoc(); 2660 // when 'distribute' is not combined with a 'for': 2661 // while (idx <= UB) { BODY; ++idx; } 2662 // when 'distribute' is combined with a 'for' 2663 // (e.g. 'distribute parallel for') 2664 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2665 CGF.EmitOMPInnerLoop( 2666 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2667 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2668 CodeGenLoop(CGF, S, LoopExit); 2669 }, 2670 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2671 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2672 }); 2673 }); 2674 2675 EmitBlock(Continue.getBlock()); 2676 BreakContinueStack.pop_back(); 2677 if (!DynamicOrOrdered) { 2678 // Emit "LB = LB + Stride", "UB = UB + Stride". 2679 EmitIgnoredExpr(LoopArgs.NextLB); 2680 EmitIgnoredExpr(LoopArgs.NextUB); 2681 } 2682 2683 EmitBranch(CondBlock); 2684 OMPLoopNestStack.clear(); 2685 LoopStack.pop(); 2686 // Emit the fall-through block. 2687 EmitBlock(LoopExit.getBlock()); 2688 2689 // Tell the runtime we are done. 2690 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2691 if (!DynamicOrOrdered) 2692 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2693 S.getDirectiveKind()); 2694 }; 2695 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2696 } 2697 2698 void CodeGenFunction::EmitOMPForOuterLoop( 2699 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2700 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2701 const OMPLoopArguments &LoopArgs, 2702 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2703 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2704 2705 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2706 const bool DynamicOrOrdered = 2707 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2708 2709 assert((Ordered || 2710 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2711 LoopArgs.Chunk != nullptr)) && 2712 "static non-chunked schedule does not need outer loop"); 2713 2714 // Emit outer loop. 2715 // 2716 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2717 // When schedule(dynamic,chunk_size) is specified, the iterations are 2718 // distributed to threads in the team in chunks as the threads request them. 2719 // Each thread executes a chunk of iterations, then requests another chunk, 2720 // until no chunks remain to be distributed. Each chunk contains chunk_size 2721 // iterations, except for the last chunk to be distributed, which may have 2722 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2723 // 2724 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2725 // to threads in the team in chunks as the executing threads request them. 2726 // Each thread executes a chunk of iterations, then requests another chunk, 2727 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2728 // each chunk is proportional to the number of unassigned iterations divided 2729 // by the number of threads in the team, decreasing to 1. For a chunk_size 2730 // with value k (greater than 1), the size of each chunk is determined in the 2731 // same way, with the restriction that the chunks do not contain fewer than k 2732 // iterations (except for the last chunk to be assigned, which may have fewer 2733 // than k iterations). 2734 // 2735 // When schedule(auto) is specified, the decision regarding scheduling is 2736 // delegated to the compiler and/or runtime system. The programmer gives the 2737 // implementation the freedom to choose any possible mapping of iterations to 2738 // threads in the team. 2739 // 2740 // When schedule(runtime) is specified, the decision regarding scheduling is 2741 // deferred until run time, and the schedule and chunk size are taken from the 2742 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2743 // implementation defined 2744 // 2745 // while(__kmpc_dispatch_next(&LB, &UB)) { 2746 // idx = LB; 2747 // while (idx <= UB) { BODY; ++idx; 2748 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2749 // } // inner loop 2750 // } 2751 // 2752 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2753 // When schedule(static, chunk_size) is specified, iterations are divided into 2754 // chunks of size chunk_size, and the chunks are assigned to the threads in 2755 // the team in a round-robin fashion in the order of the thread number. 2756 // 2757 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2758 // while (idx <= UB) { BODY; ++idx; } // inner loop 2759 // LB = LB + ST; 2760 // UB = UB + ST; 2761 // } 2762 // 2763 2764 const Expr *IVExpr = S.getIterationVariable(); 2765 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2766 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2767 2768 if (DynamicOrOrdered) { 2769 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2770 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2771 llvm::Value *LBVal = DispatchBounds.first; 2772 llvm::Value *UBVal = DispatchBounds.second; 2773 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2774 LoopArgs.Chunk}; 2775 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2776 IVSigned, Ordered, DipatchRTInputValues); 2777 } else { 2778 CGOpenMPRuntime::StaticRTInput StaticInit( 2779 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2780 LoopArgs.ST, LoopArgs.Chunk); 2781 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2782 ScheduleKind, StaticInit); 2783 } 2784 2785 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2786 const unsigned IVSize, 2787 const bool IVSigned) { 2788 if (Ordered) { 2789 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2790 IVSigned); 2791 } 2792 }; 2793 2794 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2795 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2796 OuterLoopArgs.IncExpr = S.getInc(); 2797 OuterLoopArgs.Init = S.getInit(); 2798 OuterLoopArgs.Cond = S.getCond(); 2799 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2800 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2801 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2802 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2803 } 2804 2805 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2806 const unsigned IVSize, const bool IVSigned) {} 2807 2808 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2809 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2810 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2811 const CodeGenLoopTy &CodeGenLoopContent) { 2812 2813 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2814 2815 // Emit outer loop. 2816 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2817 // dynamic 2818 // 2819 2820 const Expr *IVExpr = S.getIterationVariable(); 2821 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2822 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2823 2824 CGOpenMPRuntime::StaticRTInput StaticInit( 2825 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2826 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2827 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2828 2829 // for combined 'distribute' and 'for' the increment expression of distribute 2830 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2831 Expr *IncExpr; 2832 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2833 IncExpr = S.getDistInc(); 2834 else 2835 IncExpr = S.getInc(); 2836 2837 // this routine is shared by 'omp distribute parallel for' and 2838 // 'omp distribute': select the right EUB expression depending on the 2839 // directive 2840 OMPLoopArguments OuterLoopArgs; 2841 OuterLoopArgs.LB = LoopArgs.LB; 2842 OuterLoopArgs.UB = LoopArgs.UB; 2843 OuterLoopArgs.ST = LoopArgs.ST; 2844 OuterLoopArgs.IL = LoopArgs.IL; 2845 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2846 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2847 ? S.getCombinedEnsureUpperBound() 2848 : S.getEnsureUpperBound(); 2849 OuterLoopArgs.IncExpr = IncExpr; 2850 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2851 ? S.getCombinedInit() 2852 : S.getInit(); 2853 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2854 ? S.getCombinedCond() 2855 : S.getCond(); 2856 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2857 ? S.getCombinedNextLowerBound() 2858 : S.getNextLowerBound(); 2859 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2860 ? S.getCombinedNextUpperBound() 2861 : S.getNextUpperBound(); 2862 2863 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2864 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2865 emitEmptyOrdered); 2866 } 2867 2868 static std::pair<LValue, LValue> 2869 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2870 const OMPExecutableDirective &S) { 2871 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2872 LValue LB = 2873 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2874 LValue UB = 2875 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2876 2877 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2878 // parallel for') we need to use the 'distribute' 2879 // chunk lower and upper bounds rather than the whole loop iteration 2880 // space. These are parameters to the outlined function for 'parallel' 2881 // and we copy the bounds of the previous schedule into the 2882 // the current ones. 2883 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2884 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2885 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2886 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2887 PrevLBVal = CGF.EmitScalarConversion( 2888 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2889 LS.getIterationVariable()->getType(), 2890 LS.getPrevLowerBoundVariable()->getExprLoc()); 2891 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2892 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2893 PrevUBVal = CGF.EmitScalarConversion( 2894 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2895 LS.getIterationVariable()->getType(), 2896 LS.getPrevUpperBoundVariable()->getExprLoc()); 2897 2898 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2899 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2900 2901 return {LB, UB}; 2902 } 2903 2904 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2905 /// we need to use the LB and UB expressions generated by the worksharing 2906 /// code generation support, whereas in non combined situations we would 2907 /// just emit 0 and the LastIteration expression 2908 /// This function is necessary due to the difference of the LB and UB 2909 /// types for the RT emission routines for 'for_static_init' and 2910 /// 'for_dispatch_init' 2911 static std::pair<llvm::Value *, llvm::Value *> 2912 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2913 const OMPExecutableDirective &S, 2914 Address LB, Address UB) { 2915 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2916 const Expr *IVExpr = LS.getIterationVariable(); 2917 // when implementing a dynamic schedule for a 'for' combined with a 2918 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2919 // is not normalized as each team only executes its own assigned 2920 // distribute chunk 2921 QualType IteratorTy = IVExpr->getType(); 2922 llvm::Value *LBVal = 2923 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2924 llvm::Value *UBVal = 2925 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2926 return {LBVal, UBVal}; 2927 } 2928 2929 static void emitDistributeParallelForDistributeInnerBoundParams( 2930 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2931 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2932 const auto &Dir = cast<OMPLoopDirective>(S); 2933 LValue LB = 2934 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2935 llvm::Value *LBCast = 2936 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2937 CGF.SizeTy, /*isSigned=*/false); 2938 CapturedVars.push_back(LBCast); 2939 LValue UB = 2940 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2941 2942 llvm::Value *UBCast = 2943 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2944 CGF.SizeTy, /*isSigned=*/false); 2945 CapturedVars.push_back(UBCast); 2946 } 2947 2948 static void 2949 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2950 const OMPLoopDirective &S, 2951 CodeGenFunction::JumpDest LoopExit) { 2952 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2953 PrePostActionTy &Action) { 2954 Action.Enter(CGF); 2955 bool HasCancel = false; 2956 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2957 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2958 HasCancel = D->hasCancel(); 2959 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2960 HasCancel = D->hasCancel(); 2961 else if (const auto *D = 2962 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2963 HasCancel = D->hasCancel(); 2964 } 2965 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2966 HasCancel); 2967 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2968 emitDistributeParallelForInnerBounds, 2969 emitDistributeParallelForDispatchBounds); 2970 }; 2971 2972 emitCommonOMPParallelDirective( 2973 CGF, S, 2974 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2975 CGInlinedWorksharingLoop, 2976 emitDistributeParallelForDistributeInnerBoundParams); 2977 } 2978 2979 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2980 const OMPDistributeParallelForDirective &S) { 2981 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2982 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2983 S.getDistInc()); 2984 }; 2985 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2986 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2987 } 2988 2989 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2990 const OMPDistributeParallelForSimdDirective &S) { 2991 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2992 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2993 S.getDistInc()); 2994 }; 2995 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2996 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2997 } 2998 2999 void CodeGenFunction::EmitOMPDistributeSimdDirective( 3000 const OMPDistributeSimdDirective &S) { 3001 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3002 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3003 }; 3004 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3005 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3006 } 3007 3008 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 3009 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 3010 // Emit SPMD target parallel for region as a standalone region. 3011 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3012 emitOMPSimdRegion(CGF, S, Action); 3013 }; 3014 llvm::Function *Fn; 3015 llvm::Constant *Addr; 3016 // Emit target region as a standalone region. 3017 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3018 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3019 assert(Fn && Addr && "Target device function emission failed."); 3020 } 3021 3022 void CodeGenFunction::EmitOMPTargetSimdDirective( 3023 const OMPTargetSimdDirective &S) { 3024 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3025 emitOMPSimdRegion(CGF, S, Action); 3026 }; 3027 emitCommonOMPTargetDirective(*this, S, CodeGen); 3028 } 3029 3030 namespace { 3031 struct ScheduleKindModifiersTy { 3032 OpenMPScheduleClauseKind Kind; 3033 OpenMPScheduleClauseModifier M1; 3034 OpenMPScheduleClauseModifier M2; 3035 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 3036 OpenMPScheduleClauseModifier M1, 3037 OpenMPScheduleClauseModifier M2) 3038 : Kind(Kind), M1(M1), M2(M2) {} 3039 }; 3040 } // namespace 3041 3042 bool CodeGenFunction::EmitOMPWorksharingLoop( 3043 const OMPLoopDirective &S, Expr *EUB, 3044 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 3045 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 3046 // Emit the loop iteration variable. 3047 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3048 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3049 EmitVarDecl(*IVDecl); 3050 3051 // Emit the iterations count variable. 3052 // If it is not a variable, Sema decided to calculate iterations count on each 3053 // iteration (e.g., it is foldable into a constant). 3054 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3055 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3056 // Emit calculation of the iterations count. 3057 EmitIgnoredExpr(S.getCalcLastIteration()); 3058 } 3059 3060 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3061 3062 bool HasLastprivateClause; 3063 // Check pre-condition. 3064 { 3065 OMPLoopScope PreInitScope(*this, S); 3066 // Skip the entire loop if we don't meet the precondition. 3067 // If the condition constant folds and can be elided, avoid emitting the 3068 // whole loop. 3069 bool CondConstant; 3070 llvm::BasicBlock *ContBlock = nullptr; 3071 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3072 if (!CondConstant) 3073 return false; 3074 } else { 3075 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3076 ContBlock = createBasicBlock("omp.precond.end"); 3077 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3078 getProfileCount(&S)); 3079 EmitBlock(ThenBlock); 3080 incrementProfileCounter(&S); 3081 } 3082 3083 RunCleanupsScope DoacrossCleanupScope(*this); 3084 bool Ordered = false; 3085 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 3086 if (OrderedClause->getNumForLoops()) 3087 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 3088 else 3089 Ordered = true; 3090 } 3091 3092 llvm::DenseSet<const Expr *> EmittedFinals; 3093 emitAlignedClause(*this, S); 3094 bool HasLinears = EmitOMPLinearClauseInit(S); 3095 // Emit helper vars inits. 3096 3097 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 3098 LValue LB = Bounds.first; 3099 LValue UB = Bounds.second; 3100 LValue ST = 3101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3102 LValue IL = 3103 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3104 3105 // Emit 'then' code. 3106 { 3107 OMPPrivateScope LoopScope(*this); 3108 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 3109 // Emit implicit barrier to synchronize threads and avoid data races on 3110 // initialization of firstprivate variables and post-update of 3111 // lastprivate variables. 3112 CGM.getOpenMPRuntime().emitBarrierCall( 3113 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3114 /*ForceSimpleCall=*/true); 3115 } 3116 EmitOMPPrivateClause(S, LoopScope); 3117 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 3118 *this, S, EmitLValue(S.getIterationVariable())); 3119 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3120 EmitOMPReductionClauseInit(S, LoopScope); 3121 EmitOMPPrivateLoopCounters(S, LoopScope); 3122 EmitOMPLinearClause(S, LoopScope); 3123 (void)LoopScope.Privatize(); 3124 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3125 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3126 3127 // Detect the loop schedule kind and chunk. 3128 const Expr *ChunkExpr = nullptr; 3129 OpenMPScheduleTy ScheduleKind; 3130 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 3131 ScheduleKind.Schedule = C->getScheduleKind(); 3132 ScheduleKind.M1 = C->getFirstScheduleModifier(); 3133 ScheduleKind.M2 = C->getSecondScheduleModifier(); 3134 ChunkExpr = C->getChunkSize(); 3135 } else { 3136 // Default behaviour for schedule clause. 3137 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 3138 *this, S, ScheduleKind.Schedule, ChunkExpr); 3139 } 3140 bool HasChunkSizeOne = false; 3141 llvm::Value *Chunk = nullptr; 3142 if (ChunkExpr) { 3143 Chunk = EmitScalarExpr(ChunkExpr); 3144 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 3145 S.getIterationVariable()->getType(), 3146 S.getBeginLoc()); 3147 Expr::EvalResult Result; 3148 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 3149 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 3150 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 3151 } 3152 } 3153 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3154 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3155 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 3156 // If the static schedule kind is specified or if the ordered clause is 3157 // specified, and if no monotonic modifier is specified, the effect will 3158 // be as if the monotonic modifier was specified. 3159 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 3160 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 3161 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3162 bool IsMonotonic = 3163 Ordered || 3164 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 3165 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 3166 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 3167 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 3168 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 3169 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 3170 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 3171 /* Chunked */ Chunk != nullptr) || 3172 StaticChunkedOne) && 3173 !Ordered) { 3174 JumpDest LoopExit = 3175 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3176 emitCommonSimdLoop( 3177 *this, S, 3178 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 3179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3180 CGF.EmitOMPSimdInit(S, IsMonotonic); 3181 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 3182 if (C->getKind() == OMPC_ORDER_concurrent) 3183 CGF.LoopStack.setParallel(/*Enable=*/true); 3184 } 3185 }, 3186 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 3187 &S, ScheduleKind, LoopExit, 3188 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 3189 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 3190 // When no chunk_size is specified, the iteration space is divided 3191 // into chunks that are approximately equal in size, and at most 3192 // one chunk is distributed to each thread. Note that the size of 3193 // the chunks is unspecified in this case. 3194 CGOpenMPRuntime::StaticRTInput StaticInit( 3195 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 3196 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 3197 StaticChunkedOne ? Chunk : nullptr); 3198 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3199 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 3200 StaticInit); 3201 // UB = min(UB, GlobalUB); 3202 if (!StaticChunkedOne) 3203 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 3204 // IV = LB; 3205 CGF.EmitIgnoredExpr(S.getInit()); 3206 // For unchunked static schedule generate: 3207 // 3208 // while (idx <= UB) { 3209 // BODY; 3210 // ++idx; 3211 // } 3212 // 3213 // For static schedule with chunk one: 3214 // 3215 // while (IV <= PrevUB) { 3216 // BODY; 3217 // IV += ST; 3218 // } 3219 CGF.EmitOMPInnerLoop( 3220 S, LoopScope.requiresCleanups(), 3221 StaticChunkedOne ? S.getCombinedParForInDistCond() 3222 : S.getCond(), 3223 StaticChunkedOne ? S.getDistInc() : S.getInc(), 3224 [&S, LoopExit](CodeGenFunction &CGF) { 3225 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 3226 }, 3227 [](CodeGenFunction &) {}); 3228 }); 3229 EmitBlock(LoopExit.getBlock()); 3230 // Tell the runtime we are done. 3231 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3232 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3233 S.getDirectiveKind()); 3234 }; 3235 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 3236 } else { 3237 // Emit the outer loop, which requests its work chunk [LB..UB] from 3238 // runtime and runs the inner loop to process it. 3239 const OMPLoopArguments LoopArguments( 3240 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3241 IL.getAddress(*this), Chunk, EUB); 3242 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 3243 LoopArguments, CGDispatchBounds); 3244 } 3245 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3246 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3247 return CGF.Builder.CreateIsNotNull( 3248 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3249 }); 3250 } 3251 EmitOMPReductionClauseFinal( 3252 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 3253 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 3254 : /*Parallel only*/ OMPD_parallel); 3255 // Emit post-update of the reduction variables if IsLastIter != 0. 3256 emitPostUpdateForReductionClause( 3257 *this, S, [IL, &S](CodeGenFunction &CGF) { 3258 return CGF.Builder.CreateIsNotNull( 3259 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3260 }); 3261 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3262 if (HasLastprivateClause) 3263 EmitOMPLastprivateClauseFinal( 3264 S, isOpenMPSimdDirective(S.getDirectiveKind()), 3265 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3266 } 3267 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 3268 return CGF.Builder.CreateIsNotNull( 3269 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3270 }); 3271 DoacrossCleanupScope.ForceCleanup(); 3272 // We're now done with the loop, so jump to the continuation block. 3273 if (ContBlock) { 3274 EmitBranch(ContBlock); 3275 EmitBlock(ContBlock, /*IsFinished=*/true); 3276 } 3277 } 3278 return HasLastprivateClause; 3279 } 3280 3281 /// The following two functions generate expressions for the loop lower 3282 /// and upper bounds in case of static and dynamic (dispatch) schedule 3283 /// of the associated 'for' or 'distribute' loop. 3284 static std::pair<LValue, LValue> 3285 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3286 const auto &LS = cast<OMPLoopDirective>(S); 3287 LValue LB = 3288 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3289 LValue UB = 3290 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3291 return {LB, UB}; 3292 } 3293 3294 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3295 /// consider the lower and upper bound expressions generated by the 3296 /// worksharing loop support, but we use 0 and the iteration space size as 3297 /// constants 3298 static std::pair<llvm::Value *, llvm::Value *> 3299 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3300 Address LB, Address UB) { 3301 const auto &LS = cast<OMPLoopDirective>(S); 3302 const Expr *IVExpr = LS.getIterationVariable(); 3303 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3304 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3305 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3306 return {LBVal, UBVal}; 3307 } 3308 3309 /// Emits internal temp array declarations for the directive with inscan 3310 /// reductions. 3311 /// The code is the following: 3312 /// \code 3313 /// size num_iters = <num_iters>; 3314 /// <type> buffer[num_iters]; 3315 /// \endcode 3316 static void emitScanBasedDirectiveDecls( 3317 CodeGenFunction &CGF, const OMPLoopDirective &S, 3318 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) { 3319 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3320 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3321 SmallVector<const Expr *, 4> Shareds; 3322 SmallVector<const Expr *, 4> Privates; 3323 SmallVector<const Expr *, 4> ReductionOps; 3324 SmallVector<const Expr *, 4> CopyArrayTemps; 3325 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3326 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3327 "Only inscan reductions are expected."); 3328 Shareds.append(C->varlist_begin(), C->varlist_end()); 3329 Privates.append(C->privates().begin(), C->privates().end()); 3330 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3331 CopyArrayTemps.append(C->copy_array_temps().begin(), 3332 C->copy_array_temps().end()); 3333 } 3334 { 3335 // Emit buffers for each reduction variables. 3336 // ReductionCodeGen is required to emit correctly the code for array 3337 // reductions. 3338 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3339 unsigned Count = 0; 3340 auto *ITA = CopyArrayTemps.begin(); 3341 for (const Expr *IRef : Privates) { 3342 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3343 // Emit variably modified arrays, used for arrays/array sections 3344 // reductions. 3345 if (PrivateVD->getType()->isVariablyModifiedType()) { 3346 RedCG.emitSharedOrigLValue(CGF, Count); 3347 RedCG.emitAggregateType(CGF, Count); 3348 } 3349 CodeGenFunction::OpaqueValueMapping DimMapping( 3350 CGF, 3351 cast<OpaqueValueExpr>( 3352 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3353 ->getSizeExpr()), 3354 RValue::get(OMPScanNumIterations)); 3355 // Emit temp buffer. 3356 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3357 ++ITA; 3358 ++Count; 3359 } 3360 } 3361 } 3362 3363 /// Emits the code for the directive with inscan reductions. 3364 /// The code is the following: 3365 /// \code 3366 /// #pragma omp ... 3367 /// for (i: 0..<num_iters>) { 3368 /// <input phase>; 3369 /// buffer[i] = red; 3370 /// } 3371 /// #pragma omp master // in parallel region 3372 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3373 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3374 /// buffer[i] op= buffer[i-pow(2,k)]; 3375 /// #pragma omp barrier // in parallel region 3376 /// #pragma omp ... 3377 /// for (0..<num_iters>) { 3378 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3379 /// <scan phase>; 3380 /// } 3381 /// \endcode 3382 static void emitScanBasedDirective( 3383 CodeGenFunction &CGF, const OMPLoopDirective &S, 3384 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3385 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3386 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3387 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3388 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3389 SmallVector<const Expr *, 4> Privates; 3390 SmallVector<const Expr *, 4> ReductionOps; 3391 SmallVector<const Expr *, 4> LHSs; 3392 SmallVector<const Expr *, 4> RHSs; 3393 SmallVector<const Expr *, 4> CopyArrayElems; 3394 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3395 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3396 "Only inscan reductions are expected."); 3397 Privates.append(C->privates().begin(), C->privates().end()); 3398 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3399 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3400 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3401 CopyArrayElems.append(C->copy_array_elems().begin(), 3402 C->copy_array_elems().end()); 3403 } 3404 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3405 { 3406 // Emit loop with input phase: 3407 // #pragma omp ... 3408 // for (i: 0..<num_iters>) { 3409 // <input phase>; 3410 // buffer[i] = red; 3411 // } 3412 CGF.OMPFirstScanLoop = true; 3413 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3414 FirstGen(CGF); 3415 } 3416 // #pragma omp barrier // in parallel region 3417 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems, 3418 &ReductionOps, 3419 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) { 3420 Action.Enter(CGF); 3421 // Emit prefix reduction: 3422 // #pragma omp master // in parallel region 3423 // for (int k = 0; k <= ceil(log2(n)); ++k) 3424 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3425 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3426 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3427 llvm::Function *F = 3428 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3429 llvm::Value *Arg = 3430 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3431 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3432 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3433 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3434 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3435 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3436 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3437 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3438 CGF.EmitBlock(LoopBB); 3439 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3440 // size pow2k = 1; 3441 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3442 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3443 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3444 // for (size i = n - 1; i >= 2 ^ k; --i) 3445 // tmp[i] op= tmp[i-pow2k]; 3446 llvm::BasicBlock *InnerLoopBB = 3447 CGF.createBasicBlock("omp.inner.log.scan.body"); 3448 llvm::BasicBlock *InnerExitBB = 3449 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3450 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3451 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3452 CGF.EmitBlock(InnerLoopBB); 3453 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3454 IVal->addIncoming(NMin1, LoopBB); 3455 { 3456 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3457 auto *ILHS = LHSs.begin(); 3458 auto *IRHS = RHSs.begin(); 3459 for (const Expr *CopyArrayElem : CopyArrayElems) { 3460 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3461 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3462 Address LHSAddr = Address::invalid(); 3463 { 3464 CodeGenFunction::OpaqueValueMapping IdxMapping( 3465 CGF, 3466 cast<OpaqueValueExpr>( 3467 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3468 RValue::get(IVal)); 3469 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3470 } 3471 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3472 Address RHSAddr = Address::invalid(); 3473 { 3474 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3475 CodeGenFunction::OpaqueValueMapping IdxMapping( 3476 CGF, 3477 cast<OpaqueValueExpr>( 3478 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3479 RValue::get(OffsetIVal)); 3480 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3481 } 3482 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3483 ++ILHS; 3484 ++IRHS; 3485 } 3486 PrivScope.Privatize(); 3487 CGF.CGM.getOpenMPRuntime().emitReduction( 3488 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3489 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3490 } 3491 llvm::Value *NextIVal = 3492 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3493 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3494 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3495 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3496 CGF.EmitBlock(InnerExitBB); 3497 llvm::Value *Next = 3498 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3499 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3500 // pow2k <<= 1; 3501 llvm::Value *NextPow2K = 3502 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3503 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3504 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3505 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3506 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3507 CGF.EmitBlock(ExitBB); 3508 }; 3509 if (isOpenMPParallelDirective(S.getDirectiveKind())) { 3510 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3511 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3512 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3513 /*ForceSimpleCall=*/true); 3514 } else { 3515 RegionCodeGenTy RCG(CodeGen); 3516 RCG(CGF); 3517 } 3518 3519 CGF.OMPFirstScanLoop = false; 3520 SecondGen(CGF); 3521 } 3522 3523 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3524 const OMPLoopDirective &S, 3525 bool HasCancel) { 3526 bool HasLastprivates; 3527 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3528 [](const OMPReductionClause *C) { 3529 return C->getModifier() == OMPC_REDUCTION_inscan; 3530 })) { 3531 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3532 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3533 OMPLoopScope LoopScope(CGF, S); 3534 return CGF.EmitScalarExpr(S.getNumIterations()); 3535 }; 3536 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3537 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3538 CGF, S.getDirectiveKind(), HasCancel); 3539 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3540 emitForLoopBounds, 3541 emitDispatchForLoopBounds); 3542 // Emit an implicit barrier at the end. 3543 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3544 OMPD_for); 3545 }; 3546 const auto &&SecondGen = [&S, HasCancel, 3547 &HasLastprivates](CodeGenFunction &CGF) { 3548 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3549 CGF, S.getDirectiveKind(), HasCancel); 3550 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3551 emitForLoopBounds, 3552 emitDispatchForLoopBounds); 3553 }; 3554 if (!isOpenMPParallelDirective(S.getDirectiveKind())) 3555 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen); 3556 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3557 } else { 3558 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3559 HasCancel); 3560 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3561 emitForLoopBounds, 3562 emitDispatchForLoopBounds); 3563 } 3564 return HasLastprivates; 3565 } 3566 3567 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) { 3568 if (S.hasCancel()) 3569 return false; 3570 for (OMPClause *C : S.clauses()) 3571 if (!isa<OMPNowaitClause>(C)) 3572 return false; 3573 3574 return true; 3575 } 3576 3577 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3578 bool HasLastprivates = false; 3579 bool UseOMPIRBuilder = 3580 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 3581 auto &&CodeGen = [this, &S, &HasLastprivates, 3582 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) { 3583 // Use the OpenMPIRBuilder if enabled. 3584 if (UseOMPIRBuilder) { 3585 // Emit the associated statement and get its loop representation. 3586 const Stmt *Inner = S.getRawStmt(); 3587 llvm::CanonicalLoopInfo *CLI = 3588 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 3589 3590 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>(); 3591 llvm::OpenMPIRBuilder &OMPBuilder = 3592 CGM.getOpenMPRuntime().getOMPBuilder(); 3593 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3594 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3595 OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier); 3596 return; 3597 } 3598 3599 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3600 }; 3601 { 3602 auto LPCRegion = 3603 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3604 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3605 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3606 S.hasCancel()); 3607 } 3608 3609 if (!UseOMPIRBuilder) { 3610 // Emit an implicit barrier at the end. 3611 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3612 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3613 } 3614 // Check for outer lastprivate conditional update. 3615 checkForLastprivateConditionalUpdate(*this, S); 3616 } 3617 3618 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3619 bool HasLastprivates = false; 3620 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3621 PrePostActionTy &) { 3622 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3623 }; 3624 { 3625 auto LPCRegion = 3626 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3627 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3628 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3629 } 3630 3631 // Emit an implicit barrier at the end. 3632 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3633 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3634 // Check for outer lastprivate conditional update. 3635 checkForLastprivateConditionalUpdate(*this, S); 3636 } 3637 3638 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3639 const Twine &Name, 3640 llvm::Value *Init = nullptr) { 3641 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3642 if (Init) 3643 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3644 return LVal; 3645 } 3646 3647 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3648 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3649 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3650 bool HasLastprivates = false; 3651 auto &&CodeGen = [&S, CapturedStmt, CS, 3652 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3653 const ASTContext &C = CGF.getContext(); 3654 QualType KmpInt32Ty = 3655 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3656 // Emit helper vars inits. 3657 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3658 CGF.Builder.getInt32(0)); 3659 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3660 ? CGF.Builder.getInt32(CS->size() - 1) 3661 : CGF.Builder.getInt32(0); 3662 LValue UB = 3663 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3664 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3665 CGF.Builder.getInt32(1)); 3666 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3667 CGF.Builder.getInt32(0)); 3668 // Loop counter. 3669 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3670 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3671 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3672 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3673 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3674 // Generate condition for loop. 3675 BinaryOperator *Cond = BinaryOperator::Create( 3676 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3677 S.getBeginLoc(), FPOptionsOverride()); 3678 // Increment for loop counter. 3679 UnaryOperator *Inc = UnaryOperator::Create( 3680 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3681 S.getBeginLoc(), true, FPOptionsOverride()); 3682 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3683 // Iterate through all sections and emit a switch construct: 3684 // switch (IV) { 3685 // case 0: 3686 // <SectionStmt[0]>; 3687 // break; 3688 // ... 3689 // case <NumSection> - 1: 3690 // <SectionStmt[<NumSection> - 1]>; 3691 // break; 3692 // } 3693 // .omp.sections.exit: 3694 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3695 llvm::SwitchInst *SwitchStmt = 3696 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3697 ExitBB, CS == nullptr ? 1 : CS->size()); 3698 if (CS) { 3699 unsigned CaseNumber = 0; 3700 for (const Stmt *SubStmt : CS->children()) { 3701 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3702 CGF.EmitBlock(CaseBB); 3703 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3704 CGF.EmitStmt(SubStmt); 3705 CGF.EmitBranch(ExitBB); 3706 ++CaseNumber; 3707 } 3708 } else { 3709 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3710 CGF.EmitBlock(CaseBB); 3711 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3712 CGF.EmitStmt(CapturedStmt); 3713 CGF.EmitBranch(ExitBB); 3714 } 3715 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3716 }; 3717 3718 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3719 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3720 // Emit implicit barrier to synchronize threads and avoid data races on 3721 // initialization of firstprivate variables and post-update of lastprivate 3722 // variables. 3723 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3724 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3725 /*ForceSimpleCall=*/true); 3726 } 3727 CGF.EmitOMPPrivateClause(S, LoopScope); 3728 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3729 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3730 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3731 (void)LoopScope.Privatize(); 3732 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3733 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3734 3735 // Emit static non-chunked loop. 3736 OpenMPScheduleTy ScheduleKind; 3737 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3738 CGOpenMPRuntime::StaticRTInput StaticInit( 3739 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3740 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3741 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3742 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3743 // UB = min(UB, GlobalUB); 3744 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3745 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3746 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3747 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3748 // IV = LB; 3749 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3750 // while (idx <= UB) { BODY; ++idx; } 3751 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3752 [](CodeGenFunction &) {}); 3753 // Tell the runtime we are done. 3754 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3755 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3756 S.getDirectiveKind()); 3757 }; 3758 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3759 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3760 // Emit post-update of the reduction variables if IsLastIter != 0. 3761 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3762 return CGF.Builder.CreateIsNotNull( 3763 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3764 }); 3765 3766 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3767 if (HasLastprivates) 3768 CGF.EmitOMPLastprivateClauseFinal( 3769 S, /*NoFinals=*/false, 3770 CGF.Builder.CreateIsNotNull( 3771 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3772 }; 3773 3774 bool HasCancel = false; 3775 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3776 HasCancel = OSD->hasCancel(); 3777 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3778 HasCancel = OPSD->hasCancel(); 3779 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3780 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3781 HasCancel); 3782 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3783 // clause. Otherwise the barrier will be generated by the codegen for the 3784 // directive. 3785 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3786 // Emit implicit barrier to synchronize threads and avoid data races on 3787 // initialization of firstprivate variables. 3788 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3789 OMPD_unknown); 3790 } 3791 } 3792 3793 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3794 if (CGM.getLangOpts().OpenMPIRBuilder) { 3795 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3796 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3797 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy; 3798 3799 auto FiniCB = [this](InsertPointTy IP) { 3800 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3801 }; 3802 3803 const CapturedStmt *ICS = S.getInnermostCapturedStmt(); 3804 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3805 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3806 llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector; 3807 if (CS) { 3808 for (const Stmt *SubStmt : CS->children()) { 3809 auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP, 3810 InsertPointTy CodeGenIP, 3811 llvm::BasicBlock &FiniBB) { 3812 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, 3813 FiniBB); 3814 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP, 3815 FiniBB); 3816 }; 3817 SectionCBVector.push_back(SectionCB); 3818 } 3819 } else { 3820 auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP, 3821 InsertPointTy CodeGenIP, 3822 llvm::BasicBlock &FiniBB) { 3823 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3824 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP, 3825 FiniBB); 3826 }; 3827 SectionCBVector.push_back(SectionCB); 3828 } 3829 3830 // Privatization callback that performs appropriate action for 3831 // shared/private/firstprivate/lastprivate/copyin/... variables. 3832 // 3833 // TODO: This defaults to shared right now. 3834 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 3835 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 3836 // The next line is appropriate only for variables (Val) with the 3837 // data-sharing attribute "shared". 3838 ReplVal = &Val; 3839 3840 return CodeGenIP; 3841 }; 3842 3843 CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP); 3844 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3845 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3846 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3847 Builder.restoreIP(OMPBuilder.createSections( 3848 Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(), 3849 S.getSingleClause<OMPNowaitClause>())); 3850 return; 3851 } 3852 { 3853 auto LPCRegion = 3854 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3855 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3856 EmitSections(S); 3857 } 3858 // Emit an implicit barrier at the end. 3859 if (!S.getSingleClause<OMPNowaitClause>()) { 3860 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3861 OMPD_sections); 3862 } 3863 // Check for outer lastprivate conditional update. 3864 checkForLastprivateConditionalUpdate(*this, S); 3865 } 3866 3867 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3868 if (CGM.getLangOpts().OpenMPIRBuilder) { 3869 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3870 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3871 3872 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt(); 3873 auto FiniCB = [this](InsertPointTy IP) { 3874 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3875 }; 3876 3877 auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP, 3878 InsertPointTy CodeGenIP, 3879 llvm::BasicBlock &FiniBB) { 3880 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3881 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt, 3882 CodeGenIP, FiniBB); 3883 }; 3884 3885 LexicalScope Scope(*this, S.getSourceRange()); 3886 EmitStopPoint(&S); 3887 Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB)); 3888 3889 return; 3890 } 3891 LexicalScope Scope(*this, S.getSourceRange()); 3892 EmitStopPoint(&S); 3893 EmitStmt(S.getAssociatedStmt()); 3894 } 3895 3896 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3897 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3898 llvm::SmallVector<const Expr *, 8> DestExprs; 3899 llvm::SmallVector<const Expr *, 8> SrcExprs; 3900 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3901 // Check if there are any 'copyprivate' clauses associated with this 3902 // 'single' construct. 3903 // Build a list of copyprivate variables along with helper expressions 3904 // (<source>, <destination>, <destination>=<source> expressions) 3905 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3906 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3907 DestExprs.append(C->destination_exprs().begin(), 3908 C->destination_exprs().end()); 3909 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3910 AssignmentOps.append(C->assignment_ops().begin(), 3911 C->assignment_ops().end()); 3912 } 3913 // Emit code for 'single' region along with 'copyprivate' clauses 3914 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3915 Action.Enter(CGF); 3916 OMPPrivateScope SingleScope(CGF); 3917 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3918 CGF.EmitOMPPrivateClause(S, SingleScope); 3919 (void)SingleScope.Privatize(); 3920 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3921 }; 3922 { 3923 auto LPCRegion = 3924 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3925 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3926 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3927 CopyprivateVars, DestExprs, 3928 SrcExprs, AssignmentOps); 3929 } 3930 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3931 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3932 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3933 CGM.getOpenMPRuntime().emitBarrierCall( 3934 *this, S.getBeginLoc(), 3935 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3936 } 3937 // Check for outer lastprivate conditional update. 3938 checkForLastprivateConditionalUpdate(*this, S); 3939 } 3940 3941 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3942 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3943 Action.Enter(CGF); 3944 CGF.EmitStmt(S.getRawStmt()); 3945 }; 3946 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3947 } 3948 3949 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3950 if (CGM.getLangOpts().OpenMPIRBuilder) { 3951 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3952 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3953 3954 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt(); 3955 3956 auto FiniCB = [this](InsertPointTy IP) { 3957 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3958 }; 3959 3960 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3961 InsertPointTy CodeGenIP, 3962 llvm::BasicBlock &FiniBB) { 3963 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3964 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3965 CodeGenIP, FiniBB); 3966 }; 3967 3968 LexicalScope Scope(*this, S.getSourceRange()); 3969 EmitStopPoint(&S); 3970 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB)); 3971 3972 return; 3973 } 3974 LexicalScope Scope(*this, S.getSourceRange()); 3975 EmitStopPoint(&S); 3976 emitMaster(*this, S); 3977 } 3978 3979 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3980 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3981 Action.Enter(CGF); 3982 CGF.EmitStmt(S.getRawStmt()); 3983 }; 3984 Expr *Filter = nullptr; 3985 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3986 Filter = FilterClause->getThreadID(); 3987 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(), 3988 Filter); 3989 } 3990 3991 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) { 3992 if (CGM.getLangOpts().OpenMPIRBuilder) { 3993 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3994 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3995 3996 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt(); 3997 const Expr *Filter = nullptr; 3998 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3999 Filter = FilterClause->getThreadID(); 4000 llvm::Value *FilterVal = Filter 4001 ? EmitScalarExpr(Filter, CGM.Int32Ty) 4002 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); 4003 4004 auto FiniCB = [this](InsertPointTy IP) { 4005 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4006 }; 4007 4008 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP, 4009 InsertPointTy CodeGenIP, 4010 llvm::BasicBlock &FiniBB) { 4011 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4012 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt, 4013 CodeGenIP, FiniBB); 4014 }; 4015 4016 LexicalScope Scope(*this, S.getSourceRange()); 4017 EmitStopPoint(&S); 4018 Builder.restoreIP( 4019 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal)); 4020 4021 return; 4022 } 4023 LexicalScope Scope(*this, S.getSourceRange()); 4024 EmitStopPoint(&S); 4025 emitMasked(*this, S); 4026 } 4027 4028 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 4029 if (CGM.getLangOpts().OpenMPIRBuilder) { 4030 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4031 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4032 4033 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt(); 4034 const Expr *Hint = nullptr; 4035 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4036 Hint = HintClause->getHint(); 4037 4038 // TODO: This is slightly different from what's currently being done in 4039 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 4040 // about typing is final. 4041 llvm::Value *HintInst = nullptr; 4042 if (Hint) 4043 HintInst = 4044 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 4045 4046 auto FiniCB = [this](InsertPointTy IP) { 4047 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4048 }; 4049 4050 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 4051 InsertPointTy CodeGenIP, 4052 llvm::BasicBlock &FiniBB) { 4053 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4054 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 4055 CodeGenIP, FiniBB); 4056 }; 4057 4058 LexicalScope Scope(*this, S.getSourceRange()); 4059 EmitStopPoint(&S); 4060 Builder.restoreIP(OMPBuilder.createCritical( 4061 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 4062 HintInst)); 4063 4064 return; 4065 } 4066 4067 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4068 Action.Enter(CGF); 4069 CGF.EmitStmt(S.getAssociatedStmt()); 4070 }; 4071 const Expr *Hint = nullptr; 4072 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4073 Hint = HintClause->getHint(); 4074 LexicalScope Scope(*this, S.getSourceRange()); 4075 EmitStopPoint(&S); 4076 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 4077 S.getDirectiveName().getAsString(), 4078 CodeGen, S.getBeginLoc(), Hint); 4079 } 4080 4081 void CodeGenFunction::EmitOMPParallelForDirective( 4082 const OMPParallelForDirective &S) { 4083 // Emit directive as a combined directive that consists of two implicit 4084 // directives: 'parallel' with 'for' directive. 4085 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4086 Action.Enter(CGF); 4087 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 4088 }; 4089 { 4090 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4091 [](const OMPReductionClause *C) { 4092 return C->getModifier() == OMPC_REDUCTION_inscan; 4093 })) { 4094 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4095 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4096 CGCapturedStmtInfo CGSI(CR_OpenMP); 4097 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4098 OMPLoopScope LoopScope(CGF, S); 4099 return CGF.EmitScalarExpr(S.getNumIterations()); 4100 }; 4101 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4102 } 4103 auto LPCRegion = 4104 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4105 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 4106 emitEmptyBoundParameters); 4107 } 4108 // Check for outer lastprivate conditional update. 4109 checkForLastprivateConditionalUpdate(*this, S); 4110 } 4111 4112 void CodeGenFunction::EmitOMPParallelForSimdDirective( 4113 const OMPParallelForSimdDirective &S) { 4114 // Emit directive as a combined directive that consists of two implicit 4115 // directives: 'parallel' with 'for' directive. 4116 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4117 Action.Enter(CGF); 4118 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 4119 }; 4120 { 4121 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4122 [](const OMPReductionClause *C) { 4123 return C->getModifier() == OMPC_REDUCTION_inscan; 4124 })) { 4125 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4126 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4127 CGCapturedStmtInfo CGSI(CR_OpenMP); 4128 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4129 OMPLoopScope LoopScope(CGF, S); 4130 return CGF.EmitScalarExpr(S.getNumIterations()); 4131 }; 4132 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4133 } 4134 auto LPCRegion = 4135 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4136 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 4137 emitEmptyBoundParameters); 4138 } 4139 // Check for outer lastprivate conditional update. 4140 checkForLastprivateConditionalUpdate(*this, S); 4141 } 4142 4143 void CodeGenFunction::EmitOMPParallelMasterDirective( 4144 const OMPParallelMasterDirective &S) { 4145 // Emit directive as a combined directive that consists of two implicit 4146 // directives: 'parallel' with 'master' directive. 4147 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4148 Action.Enter(CGF); 4149 OMPPrivateScope PrivateScope(CGF); 4150 bool Copyins = CGF.EmitOMPCopyinClause(S); 4151 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4152 if (Copyins) { 4153 // Emit implicit barrier to synchronize threads and avoid data races on 4154 // propagation master's thread values of threadprivate variables to local 4155 // instances of that variables of all other implicit threads. 4156 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 4157 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4158 /*ForceSimpleCall=*/true); 4159 } 4160 CGF.EmitOMPPrivateClause(S, PrivateScope); 4161 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4162 (void)PrivateScope.Privatize(); 4163 emitMaster(CGF, S); 4164 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4165 }; 4166 { 4167 auto LPCRegion = 4168 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4169 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 4170 emitEmptyBoundParameters); 4171 emitPostUpdateForReductionClause(*this, S, 4172 [](CodeGenFunction &) { return nullptr; }); 4173 } 4174 // Check for outer lastprivate conditional update. 4175 checkForLastprivateConditionalUpdate(*this, S); 4176 } 4177 4178 void CodeGenFunction::EmitOMPParallelSectionsDirective( 4179 const OMPParallelSectionsDirective &S) { 4180 // Emit directive as a combined directive that consists of two implicit 4181 // directives: 'parallel' with 'sections' directive. 4182 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4183 Action.Enter(CGF); 4184 CGF.EmitSections(S); 4185 }; 4186 { 4187 auto LPCRegion = 4188 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4189 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 4190 emitEmptyBoundParameters); 4191 } 4192 // Check for outer lastprivate conditional update. 4193 checkForLastprivateConditionalUpdate(*this, S); 4194 } 4195 4196 namespace { 4197 /// Get the list of variables declared in the context of the untied tasks. 4198 class CheckVarsEscapingUntiedTaskDeclContext final 4199 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> { 4200 llvm::SmallVector<const VarDecl *, 4> PrivateDecls; 4201 4202 public: 4203 explicit CheckVarsEscapingUntiedTaskDeclContext() = default; 4204 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default; 4205 void VisitDeclStmt(const DeclStmt *S) { 4206 if (!S) 4207 return; 4208 // Need to privatize only local vars, static locals can be processed as is. 4209 for (const Decl *D : S->decls()) { 4210 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) 4211 if (VD->hasLocalStorage()) 4212 PrivateDecls.push_back(VD); 4213 } 4214 } 4215 void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; } 4216 void VisitCapturedStmt(const CapturedStmt *) { return; } 4217 void VisitLambdaExpr(const LambdaExpr *) { return; } 4218 void VisitBlockExpr(const BlockExpr *) { return; } 4219 void VisitStmt(const Stmt *S) { 4220 if (!S) 4221 return; 4222 for (const Stmt *Child : S->children()) 4223 if (Child) 4224 Visit(Child); 4225 } 4226 4227 /// Swaps list of vars with the provided one. 4228 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; } 4229 }; 4230 } // anonymous namespace 4231 4232 void CodeGenFunction::EmitOMPTaskBasedDirective( 4233 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 4234 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 4235 OMPTaskDataTy &Data) { 4236 // Emit outlined function for task construct. 4237 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 4238 auto I = CS->getCapturedDecl()->param_begin(); 4239 auto PartId = std::next(I); 4240 auto TaskT = std::next(I, 4); 4241 // Check if the task is final 4242 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 4243 // If the condition constant folds and can be elided, try to avoid emitting 4244 // the condition and the dead arm of the if/else. 4245 const Expr *Cond = Clause->getCondition(); 4246 bool CondConstant; 4247 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 4248 Data.Final.setInt(CondConstant); 4249 else 4250 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 4251 } else { 4252 // By default the task is not final. 4253 Data.Final.setInt(/*IntVal=*/false); 4254 } 4255 // Check if the task has 'priority' clause. 4256 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 4257 const Expr *Prio = Clause->getPriority(); 4258 Data.Priority.setInt(/*IntVal=*/true); 4259 Data.Priority.setPointer(EmitScalarConversion( 4260 EmitScalarExpr(Prio), Prio->getType(), 4261 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 4262 Prio->getExprLoc())); 4263 } 4264 // The first function argument for tasks is a thread id, the second one is a 4265 // part id (0 for tied tasks, >=0 for untied task). 4266 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 4267 // Get list of private variables. 4268 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 4269 auto IRef = C->varlist_begin(); 4270 for (const Expr *IInit : C->private_copies()) { 4271 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4272 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4273 Data.PrivateVars.push_back(*IRef); 4274 Data.PrivateCopies.push_back(IInit); 4275 } 4276 ++IRef; 4277 } 4278 } 4279 EmittedAsPrivate.clear(); 4280 // Get list of firstprivate variables. 4281 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4282 auto IRef = C->varlist_begin(); 4283 auto IElemInitRef = C->inits().begin(); 4284 for (const Expr *IInit : C->private_copies()) { 4285 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4286 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4287 Data.FirstprivateVars.push_back(*IRef); 4288 Data.FirstprivateCopies.push_back(IInit); 4289 Data.FirstprivateInits.push_back(*IElemInitRef); 4290 } 4291 ++IRef; 4292 ++IElemInitRef; 4293 } 4294 } 4295 // Get list of lastprivate variables (for taskloops). 4296 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 4297 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 4298 auto IRef = C->varlist_begin(); 4299 auto ID = C->destination_exprs().begin(); 4300 for (const Expr *IInit : C->private_copies()) { 4301 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4302 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4303 Data.LastprivateVars.push_back(*IRef); 4304 Data.LastprivateCopies.push_back(IInit); 4305 } 4306 LastprivateDstsOrigs.insert( 4307 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 4308 cast<DeclRefExpr>(*IRef)}); 4309 ++IRef; 4310 ++ID; 4311 } 4312 } 4313 SmallVector<const Expr *, 4> LHSs; 4314 SmallVector<const Expr *, 4> RHSs; 4315 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 4316 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4317 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4318 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4319 Data.ReductionOps.append(C->reduction_ops().begin(), 4320 C->reduction_ops().end()); 4321 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4322 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4323 } 4324 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 4325 *this, S.getBeginLoc(), LHSs, RHSs, Data); 4326 // Build list of dependences. 4327 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4328 OMPTaskDataTy::DependData &DD = 4329 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4330 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4331 } 4332 // Get list of local vars for untied tasks. 4333 if (!Data.Tied) { 4334 CheckVarsEscapingUntiedTaskDeclContext Checker; 4335 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt()); 4336 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(), 4337 Checker.getPrivateDecls().end()); 4338 } 4339 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 4340 CapturedRegion](CodeGenFunction &CGF, 4341 PrePostActionTy &Action) { 4342 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> 4343 UntiedLocalVars; 4344 // Set proper addresses for generated private copies. 4345 OMPPrivateScope Scope(CGF); 4346 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 4347 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 4348 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) { 4349 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4350 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4351 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4352 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4353 CS->getCapturedDecl()->getParam(PrivatesParam))); 4354 // Map privates. 4355 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4356 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4357 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4358 CallArgs.push_back(PrivatesPtr); 4359 ParamTypes.push_back(PrivatesPtr->getType()); 4360 for (const Expr *E : Data.PrivateVars) { 4361 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4362 Address PrivatePtr = CGF.CreateMemTemp( 4363 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 4364 PrivatePtrs.emplace_back(VD, PrivatePtr); 4365 CallArgs.push_back(PrivatePtr.getPointer()); 4366 ParamTypes.push_back(PrivatePtr.getType()); 4367 } 4368 for (const Expr *E : Data.FirstprivateVars) { 4369 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4370 Address PrivatePtr = 4371 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4372 ".firstpriv.ptr.addr"); 4373 PrivatePtrs.emplace_back(VD, PrivatePtr); 4374 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 4375 CallArgs.push_back(PrivatePtr.getPointer()); 4376 ParamTypes.push_back(PrivatePtr.getType()); 4377 } 4378 for (const Expr *E : Data.LastprivateVars) { 4379 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4380 Address PrivatePtr = 4381 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4382 ".lastpriv.ptr.addr"); 4383 PrivatePtrs.emplace_back(VD, PrivatePtr); 4384 CallArgs.push_back(PrivatePtr.getPointer()); 4385 ParamTypes.push_back(PrivatePtr.getType()); 4386 } 4387 for (const VarDecl *VD : Data.PrivateLocals) { 4388 QualType Ty = VD->getType().getNonReferenceType(); 4389 if (VD->getType()->isLValueReferenceType()) 4390 Ty = CGF.getContext().getPointerType(Ty); 4391 if (isAllocatableDecl(VD)) 4392 Ty = CGF.getContext().getPointerType(Ty); 4393 Address PrivatePtr = CGF.CreateMemTemp( 4394 CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); 4395 UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid()); 4396 CallArgs.push_back(PrivatePtr.getPointer()); 4397 ParamTypes.push_back(PrivatePtr.getType()); 4398 } 4399 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4400 ParamTypes, /*isVarArg=*/false); 4401 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4402 CopyFn, CopyFnTy->getPointerTo()); 4403 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4404 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4405 for (const auto &Pair : LastprivateDstsOrigs) { 4406 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 4407 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 4408 /*RefersToEnclosingVariableOrCapture=*/ 4409 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 4410 Pair.second->getType(), VK_LValue, 4411 Pair.second->getExprLoc()); 4412 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 4413 return CGF.EmitLValue(&DRE).getAddress(CGF); 4414 }); 4415 } 4416 for (const auto &Pair : PrivatePtrs) { 4417 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4418 CGF.getContext().getDeclAlign(Pair.first)); 4419 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4420 } 4421 // Adjust mapping for internal locals by mapping actual memory instead of 4422 // a pointer to this memory. 4423 for (auto &Pair : UntiedLocalVars) { 4424 if (isAllocatableDecl(Pair.first)) { 4425 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4426 Address Replacement(Ptr, CGF.getPointerAlign()); 4427 Pair.getSecond().first = Replacement; 4428 Ptr = CGF.Builder.CreateLoad(Replacement); 4429 Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4430 Pair.getSecond().second = Replacement; 4431 } else { 4432 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4433 Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4434 Pair.getSecond().first = Replacement; 4435 } 4436 } 4437 } 4438 if (Data.Reductions) { 4439 OMPPrivateScope FirstprivateScope(CGF); 4440 for (const auto &Pair : FirstprivatePtrs) { 4441 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4442 CGF.getContext().getDeclAlign(Pair.first)); 4443 FirstprivateScope.addPrivate(Pair.first, 4444 [Replacement]() { return Replacement; }); 4445 } 4446 (void)FirstprivateScope.Privatize(); 4447 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 4448 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 4449 Data.ReductionCopies, Data.ReductionOps); 4450 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 4451 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 4452 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 4453 RedCG.emitSharedOrigLValue(CGF, Cnt); 4454 RedCG.emitAggregateType(CGF, Cnt); 4455 // FIXME: This must removed once the runtime library is fixed. 4456 // Emit required threadprivate variables for 4457 // initializer/combiner/finalizer. 4458 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4459 RedCG, Cnt); 4460 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4461 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4462 Replacement = 4463 Address(CGF.EmitScalarConversion( 4464 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4465 CGF.getContext().getPointerType( 4466 Data.ReductionCopies[Cnt]->getType()), 4467 Data.ReductionCopies[Cnt]->getExprLoc()), 4468 Replacement.getAlignment()); 4469 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4470 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 4471 [Replacement]() { return Replacement; }); 4472 } 4473 } 4474 // Privatize all private variables except for in_reduction items. 4475 (void)Scope.Privatize(); 4476 SmallVector<const Expr *, 4> InRedVars; 4477 SmallVector<const Expr *, 4> InRedPrivs; 4478 SmallVector<const Expr *, 4> InRedOps; 4479 SmallVector<const Expr *, 4> TaskgroupDescriptors; 4480 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 4481 auto IPriv = C->privates().begin(); 4482 auto IRed = C->reduction_ops().begin(); 4483 auto ITD = C->taskgroup_descriptors().begin(); 4484 for (const Expr *Ref : C->varlists()) { 4485 InRedVars.emplace_back(Ref); 4486 InRedPrivs.emplace_back(*IPriv); 4487 InRedOps.emplace_back(*IRed); 4488 TaskgroupDescriptors.emplace_back(*ITD); 4489 std::advance(IPriv, 1); 4490 std::advance(IRed, 1); 4491 std::advance(ITD, 1); 4492 } 4493 } 4494 // Privatize in_reduction items here, because taskgroup descriptors must be 4495 // privatized earlier. 4496 OMPPrivateScope InRedScope(CGF); 4497 if (!InRedVars.empty()) { 4498 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 4499 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 4500 RedCG.emitSharedOrigLValue(CGF, Cnt); 4501 RedCG.emitAggregateType(CGF, Cnt); 4502 // The taskgroup descriptor variable is always implicit firstprivate and 4503 // privatized already during processing of the firstprivates. 4504 // FIXME: This must removed once the runtime library is fixed. 4505 // Emit required threadprivate variables for 4506 // initializer/combiner/finalizer. 4507 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4508 RedCG, Cnt); 4509 llvm::Value *ReductionsPtr; 4510 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 4511 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 4512 TRExpr->getExprLoc()); 4513 } else { 4514 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4515 } 4516 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4517 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4518 Replacement = Address( 4519 CGF.EmitScalarConversion( 4520 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4521 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 4522 InRedPrivs[Cnt]->getExprLoc()), 4523 Replacement.getAlignment()); 4524 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4525 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 4526 [Replacement]() { return Replacement; }); 4527 } 4528 } 4529 (void)InRedScope.Privatize(); 4530 4531 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF, 4532 UntiedLocalVars); 4533 Action.Enter(CGF); 4534 BodyGen(CGF); 4535 }; 4536 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4537 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 4538 Data.NumberOfParts); 4539 OMPLexicalScope Scope(*this, S, llvm::None, 4540 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4541 !isOpenMPSimdDirective(S.getDirectiveKind())); 4542 TaskGen(*this, OutlinedFn, Data); 4543 } 4544 4545 static ImplicitParamDecl * 4546 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 4547 QualType Ty, CapturedDecl *CD, 4548 SourceLocation Loc) { 4549 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4550 ImplicitParamDecl::Other); 4551 auto *OrigRef = DeclRefExpr::Create( 4552 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 4553 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4554 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4555 ImplicitParamDecl::Other); 4556 auto *PrivateRef = DeclRefExpr::Create( 4557 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 4558 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4559 QualType ElemType = C.getBaseElementType(Ty); 4560 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 4561 ImplicitParamDecl::Other); 4562 auto *InitRef = DeclRefExpr::Create( 4563 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 4564 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 4565 PrivateVD->setInitStyle(VarDecl::CInit); 4566 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 4567 InitRef, /*BasePath=*/nullptr, 4568 VK_RValue, FPOptionsOverride())); 4569 Data.FirstprivateVars.emplace_back(OrigRef); 4570 Data.FirstprivateCopies.emplace_back(PrivateRef); 4571 Data.FirstprivateInits.emplace_back(InitRef); 4572 return OrigVD; 4573 } 4574 4575 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4576 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4577 OMPTargetDataInfo &InputInfo) { 4578 // Emit outlined function for task construct. 4579 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4580 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4581 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4582 auto I = CS->getCapturedDecl()->param_begin(); 4583 auto PartId = std::next(I); 4584 auto TaskT = std::next(I, 4); 4585 OMPTaskDataTy Data; 4586 // The task is not final. 4587 Data.Final.setInt(/*IntVal=*/false); 4588 // Get list of firstprivate variables. 4589 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4590 auto IRef = C->varlist_begin(); 4591 auto IElemInitRef = C->inits().begin(); 4592 for (auto *IInit : C->private_copies()) { 4593 Data.FirstprivateVars.push_back(*IRef); 4594 Data.FirstprivateCopies.push_back(IInit); 4595 Data.FirstprivateInits.push_back(*IElemInitRef); 4596 ++IRef; 4597 ++IElemInitRef; 4598 } 4599 } 4600 OMPPrivateScope TargetScope(*this); 4601 VarDecl *BPVD = nullptr; 4602 VarDecl *PVD = nullptr; 4603 VarDecl *SVD = nullptr; 4604 VarDecl *MVD = nullptr; 4605 if (InputInfo.NumberOfTargetItems > 0) { 4606 auto *CD = CapturedDecl::Create( 4607 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4608 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4609 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType( 4610 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4611 /*IndexTypeQuals=*/0); 4612 BPVD = createImplicitFirstprivateForType( 4613 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4614 PVD = createImplicitFirstprivateForType( 4615 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4616 QualType SizesType = getContext().getConstantArrayType( 4617 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4618 ArrSize, nullptr, ArrayType::Normal, 4619 /*IndexTypeQuals=*/0); 4620 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4621 S.getBeginLoc()); 4622 TargetScope.addPrivate( 4623 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4624 TargetScope.addPrivate(PVD, 4625 [&InputInfo]() { return InputInfo.PointersArray; }); 4626 TargetScope.addPrivate(SVD, 4627 [&InputInfo]() { return InputInfo.SizesArray; }); 4628 // If there is no user-defined mapper, the mapper array will be nullptr. In 4629 // this case, we don't need to privatize it. 4630 if (!dyn_cast_or_null<llvm::ConstantPointerNull>( 4631 InputInfo.MappersArray.getPointer())) { 4632 MVD = createImplicitFirstprivateForType( 4633 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4634 TargetScope.addPrivate(MVD, 4635 [&InputInfo]() { return InputInfo.MappersArray; }); 4636 } 4637 } 4638 (void)TargetScope.Privatize(); 4639 // Build list of dependences. 4640 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4641 OMPTaskDataTy::DependData &DD = 4642 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4643 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4644 } 4645 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, 4646 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4647 // Set proper addresses for generated private copies. 4648 OMPPrivateScope Scope(CGF); 4649 if (!Data.FirstprivateVars.empty()) { 4650 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4651 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4652 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4653 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4654 CS->getCapturedDecl()->getParam(PrivatesParam))); 4655 // Map privates. 4656 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4657 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4658 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4659 CallArgs.push_back(PrivatesPtr); 4660 ParamTypes.push_back(PrivatesPtr->getType()); 4661 for (const Expr *E : Data.FirstprivateVars) { 4662 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4663 Address PrivatePtr = 4664 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4665 ".firstpriv.ptr.addr"); 4666 PrivatePtrs.emplace_back(VD, PrivatePtr); 4667 CallArgs.push_back(PrivatePtr.getPointer()); 4668 ParamTypes.push_back(PrivatePtr.getType()); 4669 } 4670 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4671 ParamTypes, /*isVarArg=*/false); 4672 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4673 CopyFn, CopyFnTy->getPointerTo()); 4674 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4675 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4676 for (const auto &Pair : PrivatePtrs) { 4677 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4678 CGF.getContext().getDeclAlign(Pair.first)); 4679 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4680 } 4681 } 4682 // Privatize all private variables except for in_reduction items. 4683 (void)Scope.Privatize(); 4684 if (InputInfo.NumberOfTargetItems > 0) { 4685 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4686 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4687 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4688 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4689 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4690 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4691 // If MVD is nullptr, the mapper array is not privatized 4692 if (MVD) 4693 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP( 4694 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0); 4695 } 4696 4697 Action.Enter(CGF); 4698 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4699 BodyGen(CGF); 4700 }; 4701 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4702 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4703 Data.NumberOfParts); 4704 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4705 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4706 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4707 SourceLocation()); 4708 4709 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4710 SharedsTy, CapturedStruct, &IfCond, Data); 4711 } 4712 4713 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4714 // Emit outlined function for task construct. 4715 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4716 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4717 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4718 const Expr *IfCond = nullptr; 4719 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4720 if (C->getNameModifier() == OMPD_unknown || 4721 C->getNameModifier() == OMPD_task) { 4722 IfCond = C->getCondition(); 4723 break; 4724 } 4725 } 4726 4727 OMPTaskDataTy Data; 4728 // Check if we should emit tied or untied task. 4729 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4730 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4731 CGF.EmitStmt(CS->getCapturedStmt()); 4732 }; 4733 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4734 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4735 const OMPTaskDataTy &Data) { 4736 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4737 SharedsTy, CapturedStruct, IfCond, 4738 Data); 4739 }; 4740 auto LPCRegion = 4741 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4742 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4743 } 4744 4745 void CodeGenFunction::EmitOMPTaskyieldDirective( 4746 const OMPTaskyieldDirective &S) { 4747 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4748 } 4749 4750 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4751 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4752 } 4753 4754 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4755 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4756 } 4757 4758 void CodeGenFunction::EmitOMPTaskgroupDirective( 4759 const OMPTaskgroupDirective &S) { 4760 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4761 Action.Enter(CGF); 4762 if (const Expr *E = S.getReductionRef()) { 4763 SmallVector<const Expr *, 4> LHSs; 4764 SmallVector<const Expr *, 4> RHSs; 4765 OMPTaskDataTy Data; 4766 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4767 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4768 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4769 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4770 Data.ReductionOps.append(C->reduction_ops().begin(), 4771 C->reduction_ops().end()); 4772 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4773 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4774 } 4775 llvm::Value *ReductionDesc = 4776 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4777 LHSs, RHSs, Data); 4778 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4779 CGF.EmitVarDecl(*VD); 4780 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4781 /*Volatile=*/false, E->getType()); 4782 } 4783 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4784 }; 4785 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4786 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4787 } 4788 4789 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4790 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4791 ? llvm::AtomicOrdering::NotAtomic 4792 : llvm::AtomicOrdering::AcquireRelease; 4793 CGM.getOpenMPRuntime().emitFlush( 4794 *this, 4795 [&S]() -> ArrayRef<const Expr *> { 4796 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4797 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4798 FlushClause->varlist_end()); 4799 return llvm::None; 4800 }(), 4801 S.getBeginLoc(), AO); 4802 } 4803 4804 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4805 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4806 LValue DOLVal = EmitLValue(DO->getDepobj()); 4807 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4808 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4809 DC->getModifier()); 4810 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4811 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4812 *this, Dependencies, DC->getBeginLoc()); 4813 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4814 return; 4815 } 4816 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4817 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4818 return; 4819 } 4820 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4821 CGM.getOpenMPRuntime().emitUpdateClause( 4822 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4823 return; 4824 } 4825 } 4826 4827 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4828 if (!OMPParentLoopDirectiveForScan) 4829 return; 4830 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4831 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4832 SmallVector<const Expr *, 4> Shareds; 4833 SmallVector<const Expr *, 4> Privates; 4834 SmallVector<const Expr *, 4> LHSs; 4835 SmallVector<const Expr *, 4> RHSs; 4836 SmallVector<const Expr *, 4> ReductionOps; 4837 SmallVector<const Expr *, 4> CopyOps; 4838 SmallVector<const Expr *, 4> CopyArrayTemps; 4839 SmallVector<const Expr *, 4> CopyArrayElems; 4840 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4841 if (C->getModifier() != OMPC_REDUCTION_inscan) 4842 continue; 4843 Shareds.append(C->varlist_begin(), C->varlist_end()); 4844 Privates.append(C->privates().begin(), C->privates().end()); 4845 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4846 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4847 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4848 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4849 CopyArrayTemps.append(C->copy_array_temps().begin(), 4850 C->copy_array_temps().end()); 4851 CopyArrayElems.append(C->copy_array_elems().begin(), 4852 C->copy_array_elems().end()); 4853 } 4854 if (ParentDir.getDirectiveKind() == OMPD_simd || 4855 (getLangOpts().OpenMPSimd && 4856 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4857 // For simd directive and simd-based directives in simd only mode, use the 4858 // following codegen: 4859 // int x = 0; 4860 // #pragma omp simd reduction(inscan, +: x) 4861 // for (..) { 4862 // <first part> 4863 // #pragma omp scan inclusive(x) 4864 // <second part> 4865 // } 4866 // is transformed to: 4867 // int x = 0; 4868 // for (..) { 4869 // int x_priv = 0; 4870 // <first part> 4871 // x = x_priv + x; 4872 // x_priv = x; 4873 // <second part> 4874 // } 4875 // and 4876 // int x = 0; 4877 // #pragma omp simd reduction(inscan, +: x) 4878 // for (..) { 4879 // <first part> 4880 // #pragma omp scan exclusive(x) 4881 // <second part> 4882 // } 4883 // to 4884 // int x = 0; 4885 // for (..) { 4886 // int x_priv = 0; 4887 // <second part> 4888 // int temp = x; 4889 // x = x_priv + x; 4890 // x_priv = temp; 4891 // <first part> 4892 // } 4893 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4894 EmitBranch(IsInclusive 4895 ? OMPScanReduce 4896 : BreakContinueStack.back().ContinueBlock.getBlock()); 4897 EmitBlock(OMPScanDispatch); 4898 { 4899 // New scope for correct construction/destruction of temp variables for 4900 // exclusive scan. 4901 LexicalScope Scope(*this, S.getSourceRange()); 4902 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4903 EmitBlock(OMPScanReduce); 4904 if (!IsInclusive) { 4905 // Create temp var and copy LHS value to this temp value. 4906 // TMP = LHS; 4907 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4908 const Expr *PrivateExpr = Privates[I]; 4909 const Expr *TempExpr = CopyArrayTemps[I]; 4910 EmitAutoVarDecl( 4911 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4912 LValue DestLVal = EmitLValue(TempExpr); 4913 LValue SrcLVal = EmitLValue(LHSs[I]); 4914 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4915 SrcLVal.getAddress(*this), 4916 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4917 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4918 CopyOps[I]); 4919 } 4920 } 4921 CGM.getOpenMPRuntime().emitReduction( 4922 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4923 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4924 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4925 const Expr *PrivateExpr = Privates[I]; 4926 LValue DestLVal; 4927 LValue SrcLVal; 4928 if (IsInclusive) { 4929 DestLVal = EmitLValue(RHSs[I]); 4930 SrcLVal = EmitLValue(LHSs[I]); 4931 } else { 4932 const Expr *TempExpr = CopyArrayTemps[I]; 4933 DestLVal = EmitLValue(RHSs[I]); 4934 SrcLVal = EmitLValue(TempExpr); 4935 } 4936 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4937 SrcLVal.getAddress(*this), 4938 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4939 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4940 CopyOps[I]); 4941 } 4942 } 4943 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4944 OMPScanExitBlock = IsInclusive 4945 ? BreakContinueStack.back().ContinueBlock.getBlock() 4946 : OMPScanReduce; 4947 EmitBlock(OMPAfterScanBlock); 4948 return; 4949 } 4950 if (!IsInclusive) { 4951 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4952 EmitBlock(OMPScanExitBlock); 4953 } 4954 if (OMPFirstScanLoop) { 4955 // Emit buffer[i] = red; at the end of the input phase. 4956 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4957 .getIterationVariable() 4958 ->IgnoreParenImpCasts(); 4959 LValue IdxLVal = EmitLValue(IVExpr); 4960 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4961 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4962 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4963 const Expr *PrivateExpr = Privates[I]; 4964 const Expr *OrigExpr = Shareds[I]; 4965 const Expr *CopyArrayElem = CopyArrayElems[I]; 4966 OpaqueValueMapping IdxMapping( 4967 *this, 4968 cast<OpaqueValueExpr>( 4969 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4970 RValue::get(IdxVal)); 4971 LValue DestLVal = EmitLValue(CopyArrayElem); 4972 LValue SrcLVal = EmitLValue(OrigExpr); 4973 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4974 SrcLVal.getAddress(*this), 4975 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4976 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4977 CopyOps[I]); 4978 } 4979 } 4980 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4981 if (IsInclusive) { 4982 EmitBlock(OMPScanExitBlock); 4983 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4984 } 4985 EmitBlock(OMPScanDispatch); 4986 if (!OMPFirstScanLoop) { 4987 // Emit red = buffer[i]; at the entrance to the scan phase. 4988 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4989 .getIterationVariable() 4990 ->IgnoreParenImpCasts(); 4991 LValue IdxLVal = EmitLValue(IVExpr); 4992 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4993 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4994 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4995 if (!IsInclusive) { 4996 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4997 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4998 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4999 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 5000 EmitBlock(ContBB); 5001 // Use idx - 1 iteration for exclusive scan. 5002 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 5003 } 5004 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5005 const Expr *PrivateExpr = Privates[I]; 5006 const Expr *OrigExpr = Shareds[I]; 5007 const Expr *CopyArrayElem = CopyArrayElems[I]; 5008 OpaqueValueMapping IdxMapping( 5009 *this, 5010 cast<OpaqueValueExpr>( 5011 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 5012 RValue::get(IdxVal)); 5013 LValue SrcLVal = EmitLValue(CopyArrayElem); 5014 LValue DestLVal = EmitLValue(OrigExpr); 5015 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5016 SrcLVal.getAddress(*this), 5017 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5018 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5019 CopyOps[I]); 5020 } 5021 if (!IsInclusive) { 5022 EmitBlock(ExclusiveExitBB); 5023 } 5024 } 5025 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 5026 : OMPAfterScanBlock); 5027 EmitBlock(OMPAfterScanBlock); 5028 } 5029 5030 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 5031 const CodeGenLoopTy &CodeGenLoop, 5032 Expr *IncExpr) { 5033 // Emit the loop iteration variable. 5034 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 5035 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 5036 EmitVarDecl(*IVDecl); 5037 5038 // Emit the iterations count variable. 5039 // If it is not a variable, Sema decided to calculate iterations count on each 5040 // iteration (e.g., it is foldable into a constant). 5041 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5042 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5043 // Emit calculation of the iterations count. 5044 EmitIgnoredExpr(S.getCalcLastIteration()); 5045 } 5046 5047 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 5048 5049 bool HasLastprivateClause = false; 5050 // Check pre-condition. 5051 { 5052 OMPLoopScope PreInitScope(*this, S); 5053 // Skip the entire loop if we don't meet the precondition. 5054 // If the condition constant folds and can be elided, avoid emitting the 5055 // whole loop. 5056 bool CondConstant; 5057 llvm::BasicBlock *ContBlock = nullptr; 5058 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5059 if (!CondConstant) 5060 return; 5061 } else { 5062 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 5063 ContBlock = createBasicBlock("omp.precond.end"); 5064 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 5065 getProfileCount(&S)); 5066 EmitBlock(ThenBlock); 5067 incrementProfileCounter(&S); 5068 } 5069 5070 emitAlignedClause(*this, S); 5071 // Emit 'then' code. 5072 { 5073 // Emit helper vars inits. 5074 5075 LValue LB = EmitOMPHelperVar( 5076 *this, cast<DeclRefExpr>( 5077 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5078 ? S.getCombinedLowerBoundVariable() 5079 : S.getLowerBoundVariable()))); 5080 LValue UB = EmitOMPHelperVar( 5081 *this, cast<DeclRefExpr>( 5082 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5083 ? S.getCombinedUpperBoundVariable() 5084 : S.getUpperBoundVariable()))); 5085 LValue ST = 5086 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 5087 LValue IL = 5088 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 5089 5090 OMPPrivateScope LoopScope(*this); 5091 if (EmitOMPFirstprivateClause(S, LoopScope)) { 5092 // Emit implicit barrier to synchronize threads and avoid data races 5093 // on initialization of firstprivate variables and post-update of 5094 // lastprivate variables. 5095 CGM.getOpenMPRuntime().emitBarrierCall( 5096 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 5097 /*ForceSimpleCall=*/true); 5098 } 5099 EmitOMPPrivateClause(S, LoopScope); 5100 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5101 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5102 !isOpenMPTeamsDirective(S.getDirectiveKind())) 5103 EmitOMPReductionClauseInit(S, LoopScope); 5104 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 5105 EmitOMPPrivateLoopCounters(S, LoopScope); 5106 (void)LoopScope.Privatize(); 5107 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5108 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 5109 5110 // Detect the distribute schedule kind and chunk. 5111 llvm::Value *Chunk = nullptr; 5112 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 5113 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 5114 ScheduleKind = C->getDistScheduleKind(); 5115 if (const Expr *Ch = C->getChunkSize()) { 5116 Chunk = EmitScalarExpr(Ch); 5117 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 5118 S.getIterationVariable()->getType(), 5119 S.getBeginLoc()); 5120 } 5121 } else { 5122 // Default behaviour for dist_schedule clause. 5123 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 5124 *this, S, ScheduleKind, Chunk); 5125 } 5126 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 5127 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 5128 5129 // OpenMP [2.10.8, distribute Construct, Description] 5130 // If dist_schedule is specified, kind must be static. If specified, 5131 // iterations are divided into chunks of size chunk_size, chunks are 5132 // assigned to the teams of the league in a round-robin fashion in the 5133 // order of the team number. When no chunk_size is specified, the 5134 // iteration space is divided into chunks that are approximately equal 5135 // in size, and at most one chunk is distributed to each team of the 5136 // league. The size of the chunks is unspecified in this case. 5137 bool StaticChunked = RT.isStaticChunked( 5138 ScheduleKind, /* Chunked */ Chunk != nullptr) && 5139 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 5140 if (RT.isStaticNonchunked(ScheduleKind, 5141 /* Chunked */ Chunk != nullptr) || 5142 StaticChunked) { 5143 CGOpenMPRuntime::StaticRTInput StaticInit( 5144 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 5145 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5146 StaticChunked ? Chunk : nullptr); 5147 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 5148 StaticInit); 5149 JumpDest LoopExit = 5150 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 5151 // UB = min(UB, GlobalUB); 5152 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5153 ? S.getCombinedEnsureUpperBound() 5154 : S.getEnsureUpperBound()); 5155 // IV = LB; 5156 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5157 ? S.getCombinedInit() 5158 : S.getInit()); 5159 5160 const Expr *Cond = 5161 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5162 ? S.getCombinedCond() 5163 : S.getCond(); 5164 5165 if (StaticChunked) 5166 Cond = S.getCombinedDistCond(); 5167 5168 // For static unchunked schedules generate: 5169 // 5170 // 1. For distribute alone, codegen 5171 // while (idx <= UB) { 5172 // BODY; 5173 // ++idx; 5174 // } 5175 // 5176 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 5177 // while (idx <= UB) { 5178 // <CodeGen rest of pragma>(LB, UB); 5179 // idx += ST; 5180 // } 5181 // 5182 // For static chunk one schedule generate: 5183 // 5184 // while (IV <= GlobalUB) { 5185 // <CodeGen rest of pragma>(LB, UB); 5186 // LB += ST; 5187 // UB += ST; 5188 // UB = min(UB, GlobalUB); 5189 // IV = LB; 5190 // } 5191 // 5192 emitCommonSimdLoop( 5193 *this, S, 5194 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5195 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5196 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 5197 }, 5198 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 5199 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 5200 CGF.EmitOMPInnerLoop( 5201 S, LoopScope.requiresCleanups(), Cond, IncExpr, 5202 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 5203 CodeGenLoop(CGF, S, LoopExit); 5204 }, 5205 [&S, StaticChunked](CodeGenFunction &CGF) { 5206 if (StaticChunked) { 5207 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 5208 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 5209 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 5210 CGF.EmitIgnoredExpr(S.getCombinedInit()); 5211 } 5212 }); 5213 }); 5214 EmitBlock(LoopExit.getBlock()); 5215 // Tell the runtime we are done. 5216 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 5217 } else { 5218 // Emit the outer loop, which requests its work chunk [LB..UB] from 5219 // runtime and runs the inner loop to process it. 5220 const OMPLoopArguments LoopArguments = { 5221 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5222 IL.getAddress(*this), Chunk}; 5223 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 5224 CodeGenLoop); 5225 } 5226 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 5227 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 5228 return CGF.Builder.CreateIsNotNull( 5229 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5230 }); 5231 } 5232 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5233 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5234 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 5235 EmitOMPReductionClauseFinal(S, OMPD_simd); 5236 // Emit post-update of the reduction variables if IsLastIter != 0. 5237 emitPostUpdateForReductionClause( 5238 *this, S, [IL, &S](CodeGenFunction &CGF) { 5239 return CGF.Builder.CreateIsNotNull( 5240 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5241 }); 5242 } 5243 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5244 if (HasLastprivateClause) { 5245 EmitOMPLastprivateClauseFinal( 5246 S, /*NoFinals=*/false, 5247 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 5248 } 5249 } 5250 5251 // We're now done with the loop, so jump to the continuation block. 5252 if (ContBlock) { 5253 EmitBranch(ContBlock); 5254 EmitBlock(ContBlock, true); 5255 } 5256 } 5257 } 5258 5259 void CodeGenFunction::EmitOMPDistributeDirective( 5260 const OMPDistributeDirective &S) { 5261 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5262 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5263 }; 5264 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5265 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 5266 } 5267 5268 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 5269 const CapturedStmt *S, 5270 SourceLocation Loc) { 5271 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 5272 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 5273 CGF.CapturedStmtInfo = &CapStmtInfo; 5274 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 5275 Fn->setDoesNotRecurse(); 5276 return Fn; 5277 } 5278 5279 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 5280 if (S.hasClausesOfKind<OMPDependClause>()) { 5281 assert(!S.hasAssociatedStmt() && 5282 "No associated statement must be in ordered depend construct."); 5283 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 5284 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 5285 return; 5286 } 5287 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5288 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 5289 PrePostActionTy &Action) { 5290 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5291 if (C) { 5292 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5293 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5294 llvm::Function *OutlinedFn = 5295 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5296 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 5297 OutlinedFn, CapturedVars); 5298 } else { 5299 Action.Enter(CGF); 5300 CGF.EmitStmt(CS->getCapturedStmt()); 5301 } 5302 }; 5303 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5304 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 5305 } 5306 5307 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 5308 QualType SrcType, QualType DestType, 5309 SourceLocation Loc) { 5310 assert(CGF.hasScalarEvaluationKind(DestType) && 5311 "DestType must have scalar evaluation kind."); 5312 assert(!Val.isAggregate() && "Must be a scalar or complex."); 5313 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 5314 DestType, Loc) 5315 : CGF.EmitComplexToScalarConversion( 5316 Val.getComplexVal(), SrcType, DestType, Loc); 5317 } 5318 5319 static CodeGenFunction::ComplexPairTy 5320 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 5321 QualType DestType, SourceLocation Loc) { 5322 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 5323 "DestType must have complex evaluation kind."); 5324 CodeGenFunction::ComplexPairTy ComplexVal; 5325 if (Val.isScalar()) { 5326 // Convert the input element to the element type of the complex. 5327 QualType DestElementType = 5328 DestType->castAs<ComplexType>()->getElementType(); 5329 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 5330 Val.getScalarVal(), SrcType, DestElementType, Loc); 5331 ComplexVal = CodeGenFunction::ComplexPairTy( 5332 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 5333 } else { 5334 assert(Val.isComplex() && "Must be a scalar or complex."); 5335 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 5336 QualType DestElementType = 5337 DestType->castAs<ComplexType>()->getElementType(); 5338 ComplexVal.first = CGF.EmitScalarConversion( 5339 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 5340 ComplexVal.second = CGF.EmitScalarConversion( 5341 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 5342 } 5343 return ComplexVal; 5344 } 5345 5346 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5347 LValue LVal, RValue RVal) { 5348 if (LVal.isGlobalReg()) 5349 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 5350 else 5351 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 5352 } 5353 5354 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 5355 llvm::AtomicOrdering AO, LValue LVal, 5356 SourceLocation Loc) { 5357 if (LVal.isGlobalReg()) 5358 return CGF.EmitLoadOfLValue(LVal, Loc); 5359 return CGF.EmitAtomicLoad( 5360 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 5361 LVal.isVolatile()); 5362 } 5363 5364 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 5365 QualType RValTy, SourceLocation Loc) { 5366 switch (getEvaluationKind(LVal.getType())) { 5367 case TEK_Scalar: 5368 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 5369 *this, RVal, RValTy, LVal.getType(), Loc)), 5370 LVal); 5371 break; 5372 case TEK_Complex: 5373 EmitStoreOfComplex( 5374 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 5375 /*isInit=*/false); 5376 break; 5377 case TEK_Aggregate: 5378 llvm_unreachable("Must be a scalar or complex."); 5379 } 5380 } 5381 5382 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5383 const Expr *X, const Expr *V, 5384 SourceLocation Loc) { 5385 // v = x; 5386 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 5387 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 5388 LValue XLValue = CGF.EmitLValue(X); 5389 LValue VLValue = CGF.EmitLValue(V); 5390 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 5391 // OpenMP, 2.17.7, atomic Construct 5392 // If the read or capture clause is specified and the acquire, acq_rel, or 5393 // seq_cst clause is specified then the strong flush on exit from the atomic 5394 // operation is also an acquire flush. 5395 switch (AO) { 5396 case llvm::AtomicOrdering::Acquire: 5397 case llvm::AtomicOrdering::AcquireRelease: 5398 case llvm::AtomicOrdering::SequentiallyConsistent: 5399 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5400 llvm::AtomicOrdering::Acquire); 5401 break; 5402 case llvm::AtomicOrdering::Monotonic: 5403 case llvm::AtomicOrdering::Release: 5404 break; 5405 case llvm::AtomicOrdering::NotAtomic: 5406 case llvm::AtomicOrdering::Unordered: 5407 llvm_unreachable("Unexpected ordering."); 5408 } 5409 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 5410 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5411 } 5412 5413 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 5414 llvm::AtomicOrdering AO, const Expr *X, 5415 const Expr *E, SourceLocation Loc) { 5416 // x = expr; 5417 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 5418 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 5419 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5420 // OpenMP, 2.17.7, atomic Construct 5421 // If the write, update, or capture clause is specified and the release, 5422 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5423 // the atomic operation is also a release flush. 5424 switch (AO) { 5425 case llvm::AtomicOrdering::Release: 5426 case llvm::AtomicOrdering::AcquireRelease: 5427 case llvm::AtomicOrdering::SequentiallyConsistent: 5428 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5429 llvm::AtomicOrdering::Release); 5430 break; 5431 case llvm::AtomicOrdering::Acquire: 5432 case llvm::AtomicOrdering::Monotonic: 5433 break; 5434 case llvm::AtomicOrdering::NotAtomic: 5435 case llvm::AtomicOrdering::Unordered: 5436 llvm_unreachable("Unexpected ordering."); 5437 } 5438 } 5439 5440 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 5441 RValue Update, 5442 BinaryOperatorKind BO, 5443 llvm::AtomicOrdering AO, 5444 bool IsXLHSInRHSPart) { 5445 ASTContext &Context = CGF.getContext(); 5446 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 5447 // expression is simple and atomic is allowed for the given type for the 5448 // target platform. 5449 if (BO == BO_Comma || !Update.isScalar() || 5450 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 5451 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 5452 (Update.getScalarVal()->getType() != 5453 X.getAddress(CGF).getElementType())) || 5454 !X.getAddress(CGF).getElementType()->isIntegerTy() || 5455 !Context.getTargetInfo().hasBuiltinAtomic( 5456 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 5457 return std::make_pair(false, RValue::get(nullptr)); 5458 5459 llvm::AtomicRMWInst::BinOp RMWOp; 5460 switch (BO) { 5461 case BO_Add: 5462 RMWOp = llvm::AtomicRMWInst::Add; 5463 break; 5464 case BO_Sub: 5465 if (!IsXLHSInRHSPart) 5466 return std::make_pair(false, RValue::get(nullptr)); 5467 RMWOp = llvm::AtomicRMWInst::Sub; 5468 break; 5469 case BO_And: 5470 RMWOp = llvm::AtomicRMWInst::And; 5471 break; 5472 case BO_Or: 5473 RMWOp = llvm::AtomicRMWInst::Or; 5474 break; 5475 case BO_Xor: 5476 RMWOp = llvm::AtomicRMWInst::Xor; 5477 break; 5478 case BO_LT: 5479 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5480 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 5481 : llvm::AtomicRMWInst::Max) 5482 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 5483 : llvm::AtomicRMWInst::UMax); 5484 break; 5485 case BO_GT: 5486 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5487 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 5488 : llvm::AtomicRMWInst::Min) 5489 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 5490 : llvm::AtomicRMWInst::UMin); 5491 break; 5492 case BO_Assign: 5493 RMWOp = llvm::AtomicRMWInst::Xchg; 5494 break; 5495 case BO_Mul: 5496 case BO_Div: 5497 case BO_Rem: 5498 case BO_Shl: 5499 case BO_Shr: 5500 case BO_LAnd: 5501 case BO_LOr: 5502 return std::make_pair(false, RValue::get(nullptr)); 5503 case BO_PtrMemD: 5504 case BO_PtrMemI: 5505 case BO_LE: 5506 case BO_GE: 5507 case BO_EQ: 5508 case BO_NE: 5509 case BO_Cmp: 5510 case BO_AddAssign: 5511 case BO_SubAssign: 5512 case BO_AndAssign: 5513 case BO_OrAssign: 5514 case BO_XorAssign: 5515 case BO_MulAssign: 5516 case BO_DivAssign: 5517 case BO_RemAssign: 5518 case BO_ShlAssign: 5519 case BO_ShrAssign: 5520 case BO_Comma: 5521 llvm_unreachable("Unsupported atomic update operation"); 5522 } 5523 llvm::Value *UpdateVal = Update.getScalarVal(); 5524 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 5525 UpdateVal = CGF.Builder.CreateIntCast( 5526 IC, X.getAddress(CGF).getElementType(), 5527 X.getType()->hasSignedIntegerRepresentation()); 5528 } 5529 llvm::Value *Res = 5530 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 5531 return std::make_pair(true, RValue::get(Res)); 5532 } 5533 5534 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 5535 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 5536 llvm::AtomicOrdering AO, SourceLocation Loc, 5537 const llvm::function_ref<RValue(RValue)> CommonGen) { 5538 // Update expressions are allowed to have the following forms: 5539 // x binop= expr; -> xrval + expr; 5540 // x++, ++x -> xrval + 1; 5541 // x--, --x -> xrval - 1; 5542 // x = x binop expr; -> xrval binop expr 5543 // x = expr Op x; - > expr binop xrval; 5544 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 5545 if (!Res.first) { 5546 if (X.isGlobalReg()) { 5547 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 5548 // 'xrval'. 5549 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 5550 } else { 5551 // Perform compare-and-swap procedure. 5552 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 5553 } 5554 } 5555 return Res; 5556 } 5557 5558 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 5559 llvm::AtomicOrdering AO, const Expr *X, 5560 const Expr *E, const Expr *UE, 5561 bool IsXLHSInRHSPart, SourceLocation Loc) { 5562 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5563 "Update expr in 'atomic update' must be a binary operator."); 5564 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5565 // Update expressions are allowed to have the following forms: 5566 // x binop= expr; -> xrval + expr; 5567 // x++, ++x -> xrval + 1; 5568 // x--, --x -> xrval - 1; 5569 // x = x binop expr; -> xrval binop expr 5570 // x = expr Op x; - > expr binop xrval; 5571 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 5572 LValue XLValue = CGF.EmitLValue(X); 5573 RValue ExprRValue = CGF.EmitAnyExpr(E); 5574 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5575 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5576 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5577 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5578 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 5579 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5580 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5581 return CGF.EmitAnyExpr(UE); 5582 }; 5583 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 5584 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5585 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5586 // OpenMP, 2.17.7, atomic Construct 5587 // If the write, update, or capture clause is specified and the release, 5588 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5589 // the atomic operation is also a release flush. 5590 switch (AO) { 5591 case llvm::AtomicOrdering::Release: 5592 case llvm::AtomicOrdering::AcquireRelease: 5593 case llvm::AtomicOrdering::SequentiallyConsistent: 5594 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5595 llvm::AtomicOrdering::Release); 5596 break; 5597 case llvm::AtomicOrdering::Acquire: 5598 case llvm::AtomicOrdering::Monotonic: 5599 break; 5600 case llvm::AtomicOrdering::NotAtomic: 5601 case llvm::AtomicOrdering::Unordered: 5602 llvm_unreachable("Unexpected ordering."); 5603 } 5604 } 5605 5606 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5607 QualType SourceType, QualType ResType, 5608 SourceLocation Loc) { 5609 switch (CGF.getEvaluationKind(ResType)) { 5610 case TEK_Scalar: 5611 return RValue::get( 5612 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5613 case TEK_Complex: { 5614 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5615 return RValue::getComplex(Res.first, Res.second); 5616 } 5617 case TEK_Aggregate: 5618 break; 5619 } 5620 llvm_unreachable("Must be a scalar or complex."); 5621 } 5622 5623 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5624 llvm::AtomicOrdering AO, 5625 bool IsPostfixUpdate, const Expr *V, 5626 const Expr *X, const Expr *E, 5627 const Expr *UE, bool IsXLHSInRHSPart, 5628 SourceLocation Loc) { 5629 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5630 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5631 RValue NewVVal; 5632 LValue VLValue = CGF.EmitLValue(V); 5633 LValue XLValue = CGF.EmitLValue(X); 5634 RValue ExprRValue = CGF.EmitAnyExpr(E); 5635 QualType NewVValType; 5636 if (UE) { 5637 // 'x' is updated with some additional value. 5638 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5639 "Update expr in 'atomic capture' must be a binary operator."); 5640 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5641 // Update expressions are allowed to have the following forms: 5642 // x binop= expr; -> xrval + expr; 5643 // x++, ++x -> xrval + 1; 5644 // x--, --x -> xrval - 1; 5645 // x = x binop expr; -> xrval binop expr 5646 // x = expr Op x; - > expr binop xrval; 5647 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5648 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5649 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5650 NewVValType = XRValExpr->getType(); 5651 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5652 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5653 IsPostfixUpdate](RValue XRValue) { 5654 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5655 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5656 RValue Res = CGF.EmitAnyExpr(UE); 5657 NewVVal = IsPostfixUpdate ? XRValue : Res; 5658 return Res; 5659 }; 5660 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5661 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5662 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5663 if (Res.first) { 5664 // 'atomicrmw' instruction was generated. 5665 if (IsPostfixUpdate) { 5666 // Use old value from 'atomicrmw'. 5667 NewVVal = Res.second; 5668 } else { 5669 // 'atomicrmw' does not provide new value, so evaluate it using old 5670 // value of 'x'. 5671 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5672 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5673 NewVVal = CGF.EmitAnyExpr(UE); 5674 } 5675 } 5676 } else { 5677 // 'x' is simply rewritten with some 'expr'. 5678 NewVValType = X->getType().getNonReferenceType(); 5679 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5680 X->getType().getNonReferenceType(), Loc); 5681 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5682 NewVVal = XRValue; 5683 return ExprRValue; 5684 }; 5685 // Try to perform atomicrmw xchg, otherwise simple exchange. 5686 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5687 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5688 Loc, Gen); 5689 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5690 if (Res.first) { 5691 // 'atomicrmw' instruction was generated. 5692 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5693 } 5694 } 5695 // Emit post-update store to 'v' of old/new 'x' value. 5696 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5697 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5698 // OpenMP, 2.17.7, atomic Construct 5699 // If the write, update, or capture clause is specified and the release, 5700 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5701 // the atomic operation is also a release flush. 5702 // If the read or capture clause is specified and the acquire, acq_rel, or 5703 // seq_cst clause is specified then the strong flush on exit from the atomic 5704 // operation is also an acquire flush. 5705 switch (AO) { 5706 case llvm::AtomicOrdering::Release: 5707 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5708 llvm::AtomicOrdering::Release); 5709 break; 5710 case llvm::AtomicOrdering::Acquire: 5711 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5712 llvm::AtomicOrdering::Acquire); 5713 break; 5714 case llvm::AtomicOrdering::AcquireRelease: 5715 case llvm::AtomicOrdering::SequentiallyConsistent: 5716 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5717 llvm::AtomicOrdering::AcquireRelease); 5718 break; 5719 case llvm::AtomicOrdering::Monotonic: 5720 break; 5721 case llvm::AtomicOrdering::NotAtomic: 5722 case llvm::AtomicOrdering::Unordered: 5723 llvm_unreachable("Unexpected ordering."); 5724 } 5725 } 5726 5727 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5728 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5729 const Expr *X, const Expr *V, const Expr *E, 5730 const Expr *UE, bool IsXLHSInRHSPart, 5731 SourceLocation Loc) { 5732 switch (Kind) { 5733 case OMPC_read: 5734 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5735 break; 5736 case OMPC_write: 5737 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5738 break; 5739 case OMPC_unknown: 5740 case OMPC_update: 5741 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5742 break; 5743 case OMPC_capture: 5744 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5745 IsXLHSInRHSPart, Loc); 5746 break; 5747 case OMPC_if: 5748 case OMPC_final: 5749 case OMPC_num_threads: 5750 case OMPC_private: 5751 case OMPC_firstprivate: 5752 case OMPC_lastprivate: 5753 case OMPC_reduction: 5754 case OMPC_task_reduction: 5755 case OMPC_in_reduction: 5756 case OMPC_safelen: 5757 case OMPC_simdlen: 5758 case OMPC_sizes: 5759 case OMPC_allocator: 5760 case OMPC_allocate: 5761 case OMPC_collapse: 5762 case OMPC_default: 5763 case OMPC_seq_cst: 5764 case OMPC_acq_rel: 5765 case OMPC_acquire: 5766 case OMPC_release: 5767 case OMPC_relaxed: 5768 case OMPC_shared: 5769 case OMPC_linear: 5770 case OMPC_aligned: 5771 case OMPC_copyin: 5772 case OMPC_copyprivate: 5773 case OMPC_flush: 5774 case OMPC_depobj: 5775 case OMPC_proc_bind: 5776 case OMPC_schedule: 5777 case OMPC_ordered: 5778 case OMPC_nowait: 5779 case OMPC_untied: 5780 case OMPC_threadprivate: 5781 case OMPC_depend: 5782 case OMPC_mergeable: 5783 case OMPC_device: 5784 case OMPC_threads: 5785 case OMPC_simd: 5786 case OMPC_map: 5787 case OMPC_num_teams: 5788 case OMPC_thread_limit: 5789 case OMPC_priority: 5790 case OMPC_grainsize: 5791 case OMPC_nogroup: 5792 case OMPC_num_tasks: 5793 case OMPC_hint: 5794 case OMPC_dist_schedule: 5795 case OMPC_defaultmap: 5796 case OMPC_uniform: 5797 case OMPC_to: 5798 case OMPC_from: 5799 case OMPC_use_device_ptr: 5800 case OMPC_use_device_addr: 5801 case OMPC_is_device_ptr: 5802 case OMPC_unified_address: 5803 case OMPC_unified_shared_memory: 5804 case OMPC_reverse_offload: 5805 case OMPC_dynamic_allocators: 5806 case OMPC_atomic_default_mem_order: 5807 case OMPC_device_type: 5808 case OMPC_match: 5809 case OMPC_nontemporal: 5810 case OMPC_order: 5811 case OMPC_destroy: 5812 case OMPC_detach: 5813 case OMPC_inclusive: 5814 case OMPC_exclusive: 5815 case OMPC_uses_allocators: 5816 case OMPC_affinity: 5817 case OMPC_init: 5818 case OMPC_inbranch: 5819 case OMPC_notinbranch: 5820 case OMPC_link: 5821 case OMPC_use: 5822 case OMPC_novariants: 5823 case OMPC_nocontext: 5824 case OMPC_filter: 5825 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5826 } 5827 } 5828 5829 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5830 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5831 bool MemOrderingSpecified = false; 5832 if (S.getSingleClause<OMPSeqCstClause>()) { 5833 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5834 MemOrderingSpecified = true; 5835 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5836 AO = llvm::AtomicOrdering::AcquireRelease; 5837 MemOrderingSpecified = true; 5838 } else if (S.getSingleClause<OMPAcquireClause>()) { 5839 AO = llvm::AtomicOrdering::Acquire; 5840 MemOrderingSpecified = true; 5841 } else if (S.getSingleClause<OMPReleaseClause>()) { 5842 AO = llvm::AtomicOrdering::Release; 5843 MemOrderingSpecified = true; 5844 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5845 AO = llvm::AtomicOrdering::Monotonic; 5846 MemOrderingSpecified = true; 5847 } 5848 OpenMPClauseKind Kind = OMPC_unknown; 5849 for (const OMPClause *C : S.clauses()) { 5850 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5851 // if it is first). 5852 if (C->getClauseKind() != OMPC_seq_cst && 5853 C->getClauseKind() != OMPC_acq_rel && 5854 C->getClauseKind() != OMPC_acquire && 5855 C->getClauseKind() != OMPC_release && 5856 C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) { 5857 Kind = C->getClauseKind(); 5858 break; 5859 } 5860 } 5861 if (!MemOrderingSpecified) { 5862 llvm::AtomicOrdering DefaultOrder = 5863 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5864 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5865 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5866 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5867 Kind == OMPC_capture)) { 5868 AO = DefaultOrder; 5869 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5870 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5871 AO = llvm::AtomicOrdering::Release; 5872 } else if (Kind == OMPC_read) { 5873 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5874 AO = llvm::AtomicOrdering::Acquire; 5875 } 5876 } 5877 } 5878 5879 LexicalScope Scope(*this, S.getSourceRange()); 5880 EmitStopPoint(S.getAssociatedStmt()); 5881 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5882 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5883 S.getBeginLoc()); 5884 } 5885 5886 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5887 const OMPExecutableDirective &S, 5888 const RegionCodeGenTy &CodeGen) { 5889 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5890 CodeGenModule &CGM = CGF.CGM; 5891 5892 // On device emit this construct as inlined code. 5893 if (CGM.getLangOpts().OpenMPIsDevice) { 5894 OMPLexicalScope Scope(CGF, S, OMPD_target); 5895 CGM.getOpenMPRuntime().emitInlinedDirective( 5896 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5897 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5898 }); 5899 return; 5900 } 5901 5902 auto LPCRegion = 5903 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5904 llvm::Function *Fn = nullptr; 5905 llvm::Constant *FnID = nullptr; 5906 5907 const Expr *IfCond = nullptr; 5908 // Check for the at most one if clause associated with the target region. 5909 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5910 if (C->getNameModifier() == OMPD_unknown || 5911 C->getNameModifier() == OMPD_target) { 5912 IfCond = C->getCondition(); 5913 break; 5914 } 5915 } 5916 5917 // Check if we have any device clause associated with the directive. 5918 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5919 nullptr, OMPC_DEVICE_unknown); 5920 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5921 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5922 5923 // Check if we have an if clause whose conditional always evaluates to false 5924 // or if we do not have any targets specified. If so the target region is not 5925 // an offload entry point. 5926 bool IsOffloadEntry = true; 5927 if (IfCond) { 5928 bool Val; 5929 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5930 IsOffloadEntry = false; 5931 } 5932 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5933 IsOffloadEntry = false; 5934 5935 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5936 StringRef ParentName; 5937 // In case we have Ctors/Dtors we use the complete type variant to produce 5938 // the mangling of the device outlined kernel. 5939 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5940 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5941 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5942 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5943 else 5944 ParentName = 5945 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5946 5947 // Emit target region as a standalone region. 5948 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5949 IsOffloadEntry, CodeGen); 5950 OMPLexicalScope Scope(CGF, S, OMPD_task); 5951 auto &&SizeEmitter = 5952 [IsOffloadEntry](CodeGenFunction &CGF, 5953 const OMPLoopDirective &D) -> llvm::Value * { 5954 if (IsOffloadEntry) { 5955 OMPLoopScope(CGF, D); 5956 // Emit calculation of the iterations count. 5957 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5958 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5959 /*isSigned=*/false); 5960 return NumIterations; 5961 } 5962 return nullptr; 5963 }; 5964 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5965 SizeEmitter); 5966 } 5967 5968 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5969 PrePostActionTy &Action) { 5970 Action.Enter(CGF); 5971 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5972 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5973 CGF.EmitOMPPrivateClause(S, PrivateScope); 5974 (void)PrivateScope.Privatize(); 5975 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5976 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5977 5978 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5979 CGF.EnsureInsertPoint(); 5980 } 5981 5982 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5983 StringRef ParentName, 5984 const OMPTargetDirective &S) { 5985 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5986 emitTargetRegion(CGF, S, Action); 5987 }; 5988 llvm::Function *Fn; 5989 llvm::Constant *Addr; 5990 // Emit target region as a standalone region. 5991 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5992 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5993 assert(Fn && Addr && "Target device function emission failed."); 5994 } 5995 5996 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5997 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5998 emitTargetRegion(CGF, S, Action); 5999 }; 6000 emitCommonOMPTargetDirective(*this, S, CodeGen); 6001 } 6002 6003 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 6004 const OMPExecutableDirective &S, 6005 OpenMPDirectiveKind InnermostKind, 6006 const RegionCodeGenTy &CodeGen) { 6007 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 6008 llvm::Function *OutlinedFn = 6009 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 6010 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 6011 6012 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 6013 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 6014 if (NT || TL) { 6015 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 6016 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 6017 6018 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 6019 S.getBeginLoc()); 6020 } 6021 6022 OMPTeamsScope Scope(CGF, S); 6023 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 6024 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 6025 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 6026 CapturedVars); 6027 } 6028 6029 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 6030 // Emit teams region as a standalone region. 6031 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6032 Action.Enter(CGF); 6033 OMPPrivateScope PrivateScope(CGF); 6034 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6035 CGF.EmitOMPPrivateClause(S, PrivateScope); 6036 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6037 (void)PrivateScope.Privatize(); 6038 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 6039 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6040 }; 6041 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6042 emitPostUpdateForReductionClause(*this, S, 6043 [](CodeGenFunction &) { return nullptr; }); 6044 } 6045 6046 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6047 const OMPTargetTeamsDirective &S) { 6048 auto *CS = S.getCapturedStmt(OMPD_teams); 6049 Action.Enter(CGF); 6050 // Emit teams region as a standalone region. 6051 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6052 Action.Enter(CGF); 6053 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6054 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6055 CGF.EmitOMPPrivateClause(S, PrivateScope); 6056 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6057 (void)PrivateScope.Privatize(); 6058 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6059 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6060 CGF.EmitStmt(CS->getCapturedStmt()); 6061 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6062 }; 6063 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 6064 emitPostUpdateForReductionClause(CGF, S, 6065 [](CodeGenFunction &) { return nullptr; }); 6066 } 6067 6068 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 6069 CodeGenModule &CGM, StringRef ParentName, 6070 const OMPTargetTeamsDirective &S) { 6071 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6072 emitTargetTeamsRegion(CGF, Action, S); 6073 }; 6074 llvm::Function *Fn; 6075 llvm::Constant *Addr; 6076 // Emit target region as a standalone region. 6077 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6078 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6079 assert(Fn && Addr && "Target device function emission failed."); 6080 } 6081 6082 void CodeGenFunction::EmitOMPTargetTeamsDirective( 6083 const OMPTargetTeamsDirective &S) { 6084 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6085 emitTargetTeamsRegion(CGF, Action, S); 6086 }; 6087 emitCommonOMPTargetDirective(*this, S, CodeGen); 6088 } 6089 6090 static void 6091 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6092 const OMPTargetTeamsDistributeDirective &S) { 6093 Action.Enter(CGF); 6094 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6095 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6096 }; 6097 6098 // Emit teams region as a standalone region. 6099 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6100 PrePostActionTy &Action) { 6101 Action.Enter(CGF); 6102 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6103 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6104 (void)PrivateScope.Privatize(); 6105 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6106 CodeGenDistribute); 6107 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6108 }; 6109 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 6110 emitPostUpdateForReductionClause(CGF, S, 6111 [](CodeGenFunction &) { return nullptr; }); 6112 } 6113 6114 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 6115 CodeGenModule &CGM, StringRef ParentName, 6116 const OMPTargetTeamsDistributeDirective &S) { 6117 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6118 emitTargetTeamsDistributeRegion(CGF, Action, S); 6119 }; 6120 llvm::Function *Fn; 6121 llvm::Constant *Addr; 6122 // Emit target region as a standalone region. 6123 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6124 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6125 assert(Fn && Addr && "Target device function emission failed."); 6126 } 6127 6128 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 6129 const OMPTargetTeamsDistributeDirective &S) { 6130 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6131 emitTargetTeamsDistributeRegion(CGF, Action, S); 6132 }; 6133 emitCommonOMPTargetDirective(*this, S, CodeGen); 6134 } 6135 6136 static void emitTargetTeamsDistributeSimdRegion( 6137 CodeGenFunction &CGF, PrePostActionTy &Action, 6138 const OMPTargetTeamsDistributeSimdDirective &S) { 6139 Action.Enter(CGF); 6140 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6141 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6142 }; 6143 6144 // Emit teams region as a standalone region. 6145 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6146 PrePostActionTy &Action) { 6147 Action.Enter(CGF); 6148 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6149 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6150 (void)PrivateScope.Privatize(); 6151 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6152 CodeGenDistribute); 6153 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6154 }; 6155 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 6156 emitPostUpdateForReductionClause(CGF, S, 6157 [](CodeGenFunction &) { return nullptr; }); 6158 } 6159 6160 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 6161 CodeGenModule &CGM, StringRef ParentName, 6162 const OMPTargetTeamsDistributeSimdDirective &S) { 6163 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6164 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6165 }; 6166 llvm::Function *Fn; 6167 llvm::Constant *Addr; 6168 // Emit target region as a standalone region. 6169 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6170 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6171 assert(Fn && Addr && "Target device function emission failed."); 6172 } 6173 6174 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 6175 const OMPTargetTeamsDistributeSimdDirective &S) { 6176 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6177 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6178 }; 6179 emitCommonOMPTargetDirective(*this, S, CodeGen); 6180 } 6181 6182 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 6183 const OMPTeamsDistributeDirective &S) { 6184 6185 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6186 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6187 }; 6188 6189 // Emit teams region as a standalone region. 6190 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6191 PrePostActionTy &Action) { 6192 Action.Enter(CGF); 6193 OMPPrivateScope PrivateScope(CGF); 6194 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6195 (void)PrivateScope.Privatize(); 6196 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6197 CodeGenDistribute); 6198 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6199 }; 6200 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6201 emitPostUpdateForReductionClause(*this, S, 6202 [](CodeGenFunction &) { return nullptr; }); 6203 } 6204 6205 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 6206 const OMPTeamsDistributeSimdDirective &S) { 6207 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6208 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6209 }; 6210 6211 // Emit teams region as a standalone region. 6212 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6213 PrePostActionTy &Action) { 6214 Action.Enter(CGF); 6215 OMPPrivateScope PrivateScope(CGF); 6216 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6217 (void)PrivateScope.Privatize(); 6218 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 6219 CodeGenDistribute); 6220 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6221 }; 6222 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 6223 emitPostUpdateForReductionClause(*this, S, 6224 [](CodeGenFunction &) { return nullptr; }); 6225 } 6226 6227 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 6228 const OMPTeamsDistributeParallelForDirective &S) { 6229 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6230 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6231 S.getDistInc()); 6232 }; 6233 6234 // Emit teams region as a standalone region. 6235 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6236 PrePostActionTy &Action) { 6237 Action.Enter(CGF); 6238 OMPPrivateScope PrivateScope(CGF); 6239 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6240 (void)PrivateScope.Privatize(); 6241 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6242 CodeGenDistribute); 6243 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6244 }; 6245 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 6246 emitPostUpdateForReductionClause(*this, S, 6247 [](CodeGenFunction &) { return nullptr; }); 6248 } 6249 6250 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 6251 const OMPTeamsDistributeParallelForSimdDirective &S) { 6252 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6253 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6254 S.getDistInc()); 6255 }; 6256 6257 // Emit teams region as a standalone region. 6258 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6259 PrePostActionTy &Action) { 6260 Action.Enter(CGF); 6261 OMPPrivateScope PrivateScope(CGF); 6262 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6263 (void)PrivateScope.Privatize(); 6264 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6265 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6266 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6267 }; 6268 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 6269 CodeGen); 6270 emitPostUpdateForReductionClause(*this, S, 6271 [](CodeGenFunction &) { return nullptr; }); 6272 } 6273 6274 static void emitTargetTeamsDistributeParallelForRegion( 6275 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 6276 PrePostActionTy &Action) { 6277 Action.Enter(CGF); 6278 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6279 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6280 S.getDistInc()); 6281 }; 6282 6283 // Emit teams region as a standalone region. 6284 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6285 PrePostActionTy &Action) { 6286 Action.Enter(CGF); 6287 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6288 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6289 (void)PrivateScope.Privatize(); 6290 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6291 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6292 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6293 }; 6294 6295 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 6296 CodeGenTeams); 6297 emitPostUpdateForReductionClause(CGF, S, 6298 [](CodeGenFunction &) { return nullptr; }); 6299 } 6300 6301 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 6302 CodeGenModule &CGM, StringRef ParentName, 6303 const OMPTargetTeamsDistributeParallelForDirective &S) { 6304 // Emit SPMD target teams distribute parallel for region as a standalone 6305 // region. 6306 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6307 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6308 }; 6309 llvm::Function *Fn; 6310 llvm::Constant *Addr; 6311 // Emit target region as a standalone region. 6312 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6313 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6314 assert(Fn && Addr && "Target device function emission failed."); 6315 } 6316 6317 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 6318 const OMPTargetTeamsDistributeParallelForDirective &S) { 6319 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6320 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6321 }; 6322 emitCommonOMPTargetDirective(*this, S, CodeGen); 6323 } 6324 6325 static void emitTargetTeamsDistributeParallelForSimdRegion( 6326 CodeGenFunction &CGF, 6327 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 6328 PrePostActionTy &Action) { 6329 Action.Enter(CGF); 6330 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6331 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6332 S.getDistInc()); 6333 }; 6334 6335 // Emit teams region as a standalone region. 6336 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6337 PrePostActionTy &Action) { 6338 Action.Enter(CGF); 6339 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6340 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6341 (void)PrivateScope.Privatize(); 6342 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6343 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6344 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6345 }; 6346 6347 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 6348 CodeGenTeams); 6349 emitPostUpdateForReductionClause(CGF, S, 6350 [](CodeGenFunction &) { return nullptr; }); 6351 } 6352 6353 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 6354 CodeGenModule &CGM, StringRef ParentName, 6355 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6356 // Emit SPMD target teams distribute parallel for simd region as a standalone 6357 // region. 6358 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6359 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6360 }; 6361 llvm::Function *Fn; 6362 llvm::Constant *Addr; 6363 // Emit target region as a standalone region. 6364 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6365 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6366 assert(Fn && Addr && "Target device function emission failed."); 6367 } 6368 6369 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 6370 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6371 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6372 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6373 }; 6374 emitCommonOMPTargetDirective(*this, S, CodeGen); 6375 } 6376 6377 void CodeGenFunction::EmitOMPCancellationPointDirective( 6378 const OMPCancellationPointDirective &S) { 6379 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 6380 S.getCancelRegion()); 6381 } 6382 6383 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 6384 const Expr *IfCond = nullptr; 6385 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6386 if (C->getNameModifier() == OMPD_unknown || 6387 C->getNameModifier() == OMPD_cancel) { 6388 IfCond = C->getCondition(); 6389 break; 6390 } 6391 } 6392 if (CGM.getLangOpts().OpenMPIRBuilder) { 6393 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6394 // TODO: This check is necessary as we only generate `omp parallel` through 6395 // the OpenMPIRBuilder for now. 6396 if (S.getCancelRegion() == OMPD_parallel || 6397 S.getCancelRegion() == OMPD_sections || 6398 S.getCancelRegion() == OMPD_section) { 6399 llvm::Value *IfCondition = nullptr; 6400 if (IfCond) 6401 IfCondition = EmitScalarExpr(IfCond, 6402 /*IgnoreResultAssign=*/true); 6403 return Builder.restoreIP( 6404 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion())); 6405 } 6406 } 6407 6408 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 6409 S.getCancelRegion()); 6410 } 6411 6412 CodeGenFunction::JumpDest 6413 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 6414 if (Kind == OMPD_parallel || Kind == OMPD_task || 6415 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 6416 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 6417 return ReturnBlock; 6418 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 6419 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 6420 Kind == OMPD_distribute_parallel_for || 6421 Kind == OMPD_target_parallel_for || 6422 Kind == OMPD_teams_distribute_parallel_for || 6423 Kind == OMPD_target_teams_distribute_parallel_for); 6424 return OMPCancelStack.getExitBlock(); 6425 } 6426 6427 void CodeGenFunction::EmitOMPUseDevicePtrClause( 6428 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 6429 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6430 auto OrigVarIt = C.varlist_begin(); 6431 auto InitIt = C.inits().begin(); 6432 for (const Expr *PvtVarIt : C.private_copies()) { 6433 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 6434 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 6435 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 6436 6437 // In order to identify the right initializer we need to match the 6438 // declaration used by the mapping logic. In some cases we may get 6439 // OMPCapturedExprDecl that refers to the original declaration. 6440 const ValueDecl *MatchingVD = OrigVD; 6441 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6442 // OMPCapturedExprDecl are used to privative fields of the current 6443 // structure. 6444 const auto *ME = cast<MemberExpr>(OED->getInit()); 6445 assert(isa<CXXThisExpr>(ME->getBase()) && 6446 "Base should be the current struct!"); 6447 MatchingVD = ME->getMemberDecl(); 6448 } 6449 6450 // If we don't have information about the current list item, move on to 6451 // the next one. 6452 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6453 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6454 continue; 6455 6456 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 6457 InitAddrIt, InitVD, 6458 PvtVD]() { 6459 // Initialize the temporary initialization variable with the address we 6460 // get from the runtime library. We have to cast the source address 6461 // because it is always a void *. References are materialized in the 6462 // privatization scope, so the initialization here disregards the fact 6463 // the original variable is a reference. 6464 QualType AddrQTy = 6465 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 6466 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 6467 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 6468 setAddrOfLocalVar(InitVD, InitAddr); 6469 6470 // Emit private declaration, it will be initialized by the value we 6471 // declaration we just added to the local declarations map. 6472 EmitDecl(*PvtVD); 6473 6474 // The initialization variables reached its purpose in the emission 6475 // of the previous declaration, so we don't need it anymore. 6476 LocalDeclMap.erase(InitVD); 6477 6478 // Return the address of the private variable. 6479 return GetAddrOfLocalVar(PvtVD); 6480 }); 6481 assert(IsRegistered && "firstprivate var already registered as private"); 6482 // Silence the warning about unused variable. 6483 (void)IsRegistered; 6484 6485 ++OrigVarIt; 6486 ++InitIt; 6487 } 6488 } 6489 6490 static const VarDecl *getBaseDecl(const Expr *Ref) { 6491 const Expr *Base = Ref->IgnoreParenImpCasts(); 6492 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 6493 Base = OASE->getBase()->IgnoreParenImpCasts(); 6494 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 6495 Base = ASE->getBase()->IgnoreParenImpCasts(); 6496 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 6497 } 6498 6499 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 6500 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 6501 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6502 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 6503 for (const Expr *Ref : C.varlists()) { 6504 const VarDecl *OrigVD = getBaseDecl(Ref); 6505 if (!Processed.insert(OrigVD).second) 6506 continue; 6507 // In order to identify the right initializer we need to match the 6508 // declaration used by the mapping logic. In some cases we may get 6509 // OMPCapturedExprDecl that refers to the original declaration. 6510 const ValueDecl *MatchingVD = OrigVD; 6511 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6512 // OMPCapturedExprDecl are used to privative fields of the current 6513 // structure. 6514 const auto *ME = cast<MemberExpr>(OED->getInit()); 6515 assert(isa<CXXThisExpr>(ME->getBase()) && 6516 "Base should be the current struct!"); 6517 MatchingVD = ME->getMemberDecl(); 6518 } 6519 6520 // If we don't have information about the current list item, move on to 6521 // the next one. 6522 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6523 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6524 continue; 6525 6526 Address PrivAddr = InitAddrIt->getSecond(); 6527 // For declrefs and variable length array need to load the pointer for 6528 // correct mapping, since the pointer to the data was passed to the runtime. 6529 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 6530 MatchingVD->getType()->isArrayType()) 6531 PrivAddr = 6532 EmitLoadOfPointer(PrivAddr, getContext() 6533 .getPointerType(OrigVD->getType()) 6534 ->castAs<PointerType>()); 6535 llvm::Type *RealTy = 6536 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 6537 ->getPointerTo(); 6538 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 6539 6540 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 6541 } 6542 } 6543 6544 // Generate the instructions for '#pragma omp target data' directive. 6545 void CodeGenFunction::EmitOMPTargetDataDirective( 6546 const OMPTargetDataDirective &S) { 6547 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true, 6548 /*SeparateBeginEndCalls=*/true); 6549 6550 // Create a pre/post action to signal the privatization of the device pointer. 6551 // This action can be replaced by the OpenMP runtime code generation to 6552 // deactivate privatization. 6553 bool PrivatizeDevicePointers = false; 6554 class DevicePointerPrivActionTy : public PrePostActionTy { 6555 bool &PrivatizeDevicePointers; 6556 6557 public: 6558 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 6559 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 6560 void Enter(CodeGenFunction &CGF) override { 6561 PrivatizeDevicePointers = true; 6562 } 6563 }; 6564 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 6565 6566 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 6567 CodeGenFunction &CGF, PrePostActionTy &Action) { 6568 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6569 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 6570 }; 6571 6572 // Codegen that selects whether to generate the privatization code or not. 6573 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 6574 &InnermostCodeGen](CodeGenFunction &CGF, 6575 PrePostActionTy &Action) { 6576 RegionCodeGenTy RCG(InnermostCodeGen); 6577 PrivatizeDevicePointers = false; 6578 6579 // Call the pre-action to change the status of PrivatizeDevicePointers if 6580 // needed. 6581 Action.Enter(CGF); 6582 6583 if (PrivatizeDevicePointers) { 6584 OMPPrivateScope PrivateScope(CGF); 6585 // Emit all instances of the use_device_ptr clause. 6586 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 6587 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 6588 Info.CaptureDeviceAddrMap); 6589 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 6590 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 6591 Info.CaptureDeviceAddrMap); 6592 (void)PrivateScope.Privatize(); 6593 RCG(CGF); 6594 } else { 6595 OMPLexicalScope Scope(CGF, S, OMPD_unknown); 6596 RCG(CGF); 6597 } 6598 }; 6599 6600 // Forward the provided action to the privatization codegen. 6601 RegionCodeGenTy PrivRCG(PrivCodeGen); 6602 PrivRCG.setAction(Action); 6603 6604 // Notwithstanding the body of the region is emitted as inlined directive, 6605 // we don't use an inline scope as changes in the references inside the 6606 // region are expected to be visible outside, so we do not privative them. 6607 OMPLexicalScope Scope(CGF, S); 6608 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 6609 PrivRCG); 6610 }; 6611 6612 RegionCodeGenTy RCG(CodeGen); 6613 6614 // If we don't have target devices, don't bother emitting the data mapping 6615 // code. 6616 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6617 RCG(*this); 6618 return; 6619 } 6620 6621 // Check if we have any if clause associated with the directive. 6622 const Expr *IfCond = nullptr; 6623 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6624 IfCond = C->getCondition(); 6625 6626 // Check if we have any device clause associated with the directive. 6627 const Expr *Device = nullptr; 6628 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6629 Device = C->getDevice(); 6630 6631 // Set the action to signal privatization of device pointers. 6632 RCG.setAction(PrivAction); 6633 6634 // Emit region code. 6635 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6636 Info); 6637 } 6638 6639 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6640 const OMPTargetEnterDataDirective &S) { 6641 // If we don't have target devices, don't bother emitting the data mapping 6642 // code. 6643 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6644 return; 6645 6646 // Check if we have any if clause associated with the directive. 6647 const Expr *IfCond = nullptr; 6648 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6649 IfCond = C->getCondition(); 6650 6651 // Check if we have any device clause associated with the directive. 6652 const Expr *Device = nullptr; 6653 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6654 Device = C->getDevice(); 6655 6656 OMPLexicalScope Scope(*this, S, OMPD_task); 6657 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6658 } 6659 6660 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6661 const OMPTargetExitDataDirective &S) { 6662 // If we don't have target devices, don't bother emitting the data mapping 6663 // code. 6664 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6665 return; 6666 6667 // Check if we have any if clause associated with the directive. 6668 const Expr *IfCond = nullptr; 6669 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6670 IfCond = C->getCondition(); 6671 6672 // Check if we have any device clause associated with the directive. 6673 const Expr *Device = nullptr; 6674 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6675 Device = C->getDevice(); 6676 6677 OMPLexicalScope Scope(*this, S, OMPD_task); 6678 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6679 } 6680 6681 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6682 const OMPTargetParallelDirective &S, 6683 PrePostActionTy &Action) { 6684 // Get the captured statement associated with the 'parallel' region. 6685 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6686 Action.Enter(CGF); 6687 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6688 Action.Enter(CGF); 6689 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6690 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6691 CGF.EmitOMPPrivateClause(S, PrivateScope); 6692 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6693 (void)PrivateScope.Privatize(); 6694 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6695 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6696 // TODO: Add support for clauses. 6697 CGF.EmitStmt(CS->getCapturedStmt()); 6698 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6699 }; 6700 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6701 emitEmptyBoundParameters); 6702 emitPostUpdateForReductionClause(CGF, S, 6703 [](CodeGenFunction &) { return nullptr; }); 6704 } 6705 6706 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6707 CodeGenModule &CGM, StringRef ParentName, 6708 const OMPTargetParallelDirective &S) { 6709 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6710 emitTargetParallelRegion(CGF, S, Action); 6711 }; 6712 llvm::Function *Fn; 6713 llvm::Constant *Addr; 6714 // Emit target region as a standalone region. 6715 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6716 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6717 assert(Fn && Addr && "Target device function emission failed."); 6718 } 6719 6720 void CodeGenFunction::EmitOMPTargetParallelDirective( 6721 const OMPTargetParallelDirective &S) { 6722 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6723 emitTargetParallelRegion(CGF, S, Action); 6724 }; 6725 emitCommonOMPTargetDirective(*this, S, CodeGen); 6726 } 6727 6728 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6729 const OMPTargetParallelForDirective &S, 6730 PrePostActionTy &Action) { 6731 Action.Enter(CGF); 6732 // Emit directive as a combined directive that consists of two implicit 6733 // directives: 'parallel' with 'for' directive. 6734 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6735 Action.Enter(CGF); 6736 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6737 CGF, OMPD_target_parallel_for, S.hasCancel()); 6738 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6739 emitDispatchForLoopBounds); 6740 }; 6741 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6742 emitEmptyBoundParameters); 6743 } 6744 6745 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6746 CodeGenModule &CGM, StringRef ParentName, 6747 const OMPTargetParallelForDirective &S) { 6748 // Emit SPMD target parallel for region as a standalone region. 6749 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6750 emitTargetParallelForRegion(CGF, S, Action); 6751 }; 6752 llvm::Function *Fn; 6753 llvm::Constant *Addr; 6754 // Emit target region as a standalone region. 6755 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6756 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6757 assert(Fn && Addr && "Target device function emission failed."); 6758 } 6759 6760 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6761 const OMPTargetParallelForDirective &S) { 6762 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6763 emitTargetParallelForRegion(CGF, S, Action); 6764 }; 6765 emitCommonOMPTargetDirective(*this, S, CodeGen); 6766 } 6767 6768 static void 6769 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6770 const OMPTargetParallelForSimdDirective &S, 6771 PrePostActionTy &Action) { 6772 Action.Enter(CGF); 6773 // Emit directive as a combined directive that consists of two implicit 6774 // directives: 'parallel' with 'for' directive. 6775 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6776 Action.Enter(CGF); 6777 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6778 emitDispatchForLoopBounds); 6779 }; 6780 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6781 emitEmptyBoundParameters); 6782 } 6783 6784 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6785 CodeGenModule &CGM, StringRef ParentName, 6786 const OMPTargetParallelForSimdDirective &S) { 6787 // Emit SPMD target parallel for region as a standalone region. 6788 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6789 emitTargetParallelForSimdRegion(CGF, S, Action); 6790 }; 6791 llvm::Function *Fn; 6792 llvm::Constant *Addr; 6793 // Emit target region as a standalone region. 6794 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6795 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6796 assert(Fn && Addr && "Target device function emission failed."); 6797 } 6798 6799 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6800 const OMPTargetParallelForSimdDirective &S) { 6801 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6802 emitTargetParallelForSimdRegion(CGF, S, Action); 6803 }; 6804 emitCommonOMPTargetDirective(*this, S, CodeGen); 6805 } 6806 6807 /// Emit a helper variable and return corresponding lvalue. 6808 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6809 const ImplicitParamDecl *PVD, 6810 CodeGenFunction::OMPPrivateScope &Privates) { 6811 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6812 Privates.addPrivate(VDecl, 6813 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6814 } 6815 6816 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6817 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6818 // Emit outlined function for task construct. 6819 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6820 Address CapturedStruct = Address::invalid(); 6821 { 6822 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6823 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6824 } 6825 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6826 const Expr *IfCond = nullptr; 6827 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6828 if (C->getNameModifier() == OMPD_unknown || 6829 C->getNameModifier() == OMPD_taskloop) { 6830 IfCond = C->getCondition(); 6831 break; 6832 } 6833 } 6834 6835 OMPTaskDataTy Data; 6836 // Check if taskloop must be emitted without taskgroup. 6837 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6838 // TODO: Check if we should emit tied or untied task. 6839 Data.Tied = true; 6840 // Set scheduling for taskloop 6841 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6842 // grainsize clause 6843 Data.Schedule.setInt(/*IntVal=*/false); 6844 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6845 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6846 // num_tasks clause 6847 Data.Schedule.setInt(/*IntVal=*/true); 6848 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6849 } 6850 6851 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6852 // if (PreCond) { 6853 // for (IV in 0..LastIteration) BODY; 6854 // <Final counter/linear vars updates>; 6855 // } 6856 // 6857 6858 // Emit: if (PreCond) - begin. 6859 // If the condition constant folds and can be elided, avoid emitting the 6860 // whole loop. 6861 bool CondConstant; 6862 llvm::BasicBlock *ContBlock = nullptr; 6863 OMPLoopScope PreInitScope(CGF, S); 6864 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6865 if (!CondConstant) 6866 return; 6867 } else { 6868 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6869 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6870 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6871 CGF.getProfileCount(&S)); 6872 CGF.EmitBlock(ThenBlock); 6873 CGF.incrementProfileCounter(&S); 6874 } 6875 6876 (void)CGF.EmitOMPLinearClauseInit(S); 6877 6878 OMPPrivateScope LoopScope(CGF); 6879 // Emit helper vars inits. 6880 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6881 auto *I = CS->getCapturedDecl()->param_begin(); 6882 auto *LBP = std::next(I, LowerBound); 6883 auto *UBP = std::next(I, UpperBound); 6884 auto *STP = std::next(I, Stride); 6885 auto *LIP = std::next(I, LastIter); 6886 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6887 LoopScope); 6888 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6889 LoopScope); 6890 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6891 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6892 LoopScope); 6893 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6894 CGF.EmitOMPLinearClause(S, LoopScope); 6895 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6896 (void)LoopScope.Privatize(); 6897 // Emit the loop iteration variable. 6898 const Expr *IVExpr = S.getIterationVariable(); 6899 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6900 CGF.EmitVarDecl(*IVDecl); 6901 CGF.EmitIgnoredExpr(S.getInit()); 6902 6903 // Emit the iterations count variable. 6904 // If it is not a variable, Sema decided to calculate iterations count on 6905 // each iteration (e.g., it is foldable into a constant). 6906 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6907 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6908 // Emit calculation of the iterations count. 6909 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6910 } 6911 6912 { 6913 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6914 emitCommonSimdLoop( 6915 CGF, S, 6916 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6917 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6918 CGF.EmitOMPSimdInit(S); 6919 }, 6920 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6921 CGF.EmitOMPInnerLoop( 6922 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6923 [&S](CodeGenFunction &CGF) { 6924 emitOMPLoopBodyWithStopPoint(CGF, S, 6925 CodeGenFunction::JumpDest()); 6926 }, 6927 [](CodeGenFunction &) {}); 6928 }); 6929 } 6930 // Emit: if (PreCond) - end. 6931 if (ContBlock) { 6932 CGF.EmitBranch(ContBlock); 6933 CGF.EmitBlock(ContBlock, true); 6934 } 6935 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6936 if (HasLastprivateClause) { 6937 CGF.EmitOMPLastprivateClauseFinal( 6938 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6939 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6940 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6941 (*LIP)->getType(), S.getBeginLoc()))); 6942 } 6943 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6944 return CGF.Builder.CreateIsNotNull( 6945 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6946 (*LIP)->getType(), S.getBeginLoc())); 6947 }); 6948 }; 6949 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6950 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6951 const OMPTaskDataTy &Data) { 6952 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6953 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6954 OMPLoopScope PreInitScope(CGF, S); 6955 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6956 OutlinedFn, SharedsTy, 6957 CapturedStruct, IfCond, Data); 6958 }; 6959 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6960 CodeGen); 6961 }; 6962 if (Data.Nogroup) { 6963 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6964 } else { 6965 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6966 *this, 6967 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6968 PrePostActionTy &Action) { 6969 Action.Enter(CGF); 6970 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6971 Data); 6972 }, 6973 S.getBeginLoc()); 6974 } 6975 } 6976 6977 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6978 auto LPCRegion = 6979 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6980 EmitOMPTaskLoopBasedDirective(S); 6981 } 6982 6983 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6984 const OMPTaskLoopSimdDirective &S) { 6985 auto LPCRegion = 6986 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6987 OMPLexicalScope Scope(*this, S); 6988 EmitOMPTaskLoopBasedDirective(S); 6989 } 6990 6991 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6992 const OMPMasterTaskLoopDirective &S) { 6993 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6994 Action.Enter(CGF); 6995 EmitOMPTaskLoopBasedDirective(S); 6996 }; 6997 auto LPCRegion = 6998 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6999 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 7000 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7001 } 7002 7003 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 7004 const OMPMasterTaskLoopSimdDirective &S) { 7005 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7006 Action.Enter(CGF); 7007 EmitOMPTaskLoopBasedDirective(S); 7008 }; 7009 auto LPCRegion = 7010 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7011 OMPLexicalScope Scope(*this, S); 7012 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7013 } 7014 7015 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 7016 const OMPParallelMasterTaskLoopDirective &S) { 7017 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7018 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7019 PrePostActionTy &Action) { 7020 Action.Enter(CGF); 7021 CGF.EmitOMPTaskLoopBasedDirective(S); 7022 }; 7023 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7024 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7025 S.getBeginLoc()); 7026 }; 7027 auto LPCRegion = 7028 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7029 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 7030 emitEmptyBoundParameters); 7031 } 7032 7033 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 7034 const OMPParallelMasterTaskLoopSimdDirective &S) { 7035 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7036 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7037 PrePostActionTy &Action) { 7038 Action.Enter(CGF); 7039 CGF.EmitOMPTaskLoopBasedDirective(S); 7040 }; 7041 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7042 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7043 S.getBeginLoc()); 7044 }; 7045 auto LPCRegion = 7046 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7047 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 7048 emitEmptyBoundParameters); 7049 } 7050 7051 // Generate the instructions for '#pragma omp target update' directive. 7052 void CodeGenFunction::EmitOMPTargetUpdateDirective( 7053 const OMPTargetUpdateDirective &S) { 7054 // If we don't have target devices, don't bother emitting the data mapping 7055 // code. 7056 if (CGM.getLangOpts().OMPTargetTriples.empty()) 7057 return; 7058 7059 // Check if we have any if clause associated with the directive. 7060 const Expr *IfCond = nullptr; 7061 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7062 IfCond = C->getCondition(); 7063 7064 // Check if we have any device clause associated with the directive. 7065 const Expr *Device = nullptr; 7066 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7067 Device = C->getDevice(); 7068 7069 OMPLexicalScope Scope(*this, S, OMPD_task); 7070 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 7071 } 7072 7073 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 7074 const OMPExecutableDirective &D) { 7075 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 7076 EmitOMPScanDirective(*SD); 7077 return; 7078 } 7079 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 7080 return; 7081 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 7082 OMPPrivateScope GlobalsScope(CGF); 7083 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 7084 // Capture global firstprivates to avoid crash. 7085 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 7086 for (const Expr *Ref : C->varlists()) { 7087 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 7088 if (!DRE) 7089 continue; 7090 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 7091 if (!VD || VD->hasLocalStorage()) 7092 continue; 7093 if (!CGF.LocalDeclMap.count(VD)) { 7094 LValue GlobLVal = CGF.EmitLValue(Ref); 7095 GlobalsScope.addPrivate( 7096 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7097 } 7098 } 7099 } 7100 } 7101 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 7102 (void)GlobalsScope.Privatize(); 7103 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 7104 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 7105 } else { 7106 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 7107 for (const Expr *E : LD->counters()) { 7108 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 7109 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 7110 LValue GlobLVal = CGF.EmitLValue(E); 7111 GlobalsScope.addPrivate( 7112 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 7113 } 7114 if (isa<OMPCapturedExprDecl>(VD)) { 7115 // Emit only those that were not explicitly referenced in clauses. 7116 if (!CGF.LocalDeclMap.count(VD)) 7117 CGF.EmitVarDecl(*VD); 7118 } 7119 } 7120 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 7121 if (!C->getNumForLoops()) 7122 continue; 7123 for (unsigned I = LD->getLoopsNumber(), 7124 E = C->getLoopNumIterations().size(); 7125 I < E; ++I) { 7126 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 7127 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 7128 // Emit only those that were not explicitly referenced in clauses. 7129 if (!CGF.LocalDeclMap.count(VD)) 7130 CGF.EmitVarDecl(*VD); 7131 } 7132 } 7133 } 7134 } 7135 (void)GlobalsScope.Privatize(); 7136 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 7137 } 7138 }; 7139 if (D.getDirectiveKind() == OMPD_atomic || 7140 D.getDirectiveKind() == OMPD_critical || 7141 D.getDirectiveKind() == OMPD_section || 7142 D.getDirectiveKind() == OMPD_master || 7143 D.getDirectiveKind() == OMPD_masked) { 7144 EmitStmt(D.getAssociatedStmt()); 7145 } else { 7146 auto LPCRegion = 7147 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 7148 OMPSimdLexicalScope Scope(*this, D); 7149 CGM.getOpenMPRuntime().emitInlinedDirective( 7150 *this, 7151 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 7152 : D.getDirectiveKind(), 7153 CodeGen); 7154 } 7155 // Check for outer lastprivate conditional update. 7156 checkForLastprivateConditionalUpdate(*this, D); 7157 } 7158