1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/AST/StmtVisitor.h" 25 #include "clang/Basic/OpenMPKinds.h" 26 #include "clang/Basic/PrettyStackTrace.h" 27 #include "llvm/ADT/SmallSet.h" 28 #include "llvm/BinaryFormat/Dwarf.h" 29 #include "llvm/Frontend/OpenMP/OMPConstants.h" 30 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DebugInfoMetadata.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Metadata.h" 36 #include "llvm/Support/AtomicOrdering.h" 37 using namespace clang; 38 using namespace CodeGen; 39 using namespace llvm::omp; 40 41 static const VarDecl *getBaseDecl(const Expr *Ref); 42 43 namespace { 44 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 45 /// for captured expressions. 46 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 47 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 48 for (const auto *C : S.clauses()) { 49 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 50 if (const auto *PreInit = 51 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 52 for (const auto *I : PreInit->decls()) { 53 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 54 CGF.EmitVarDecl(cast<VarDecl>(*I)); 55 } else { 56 CodeGenFunction::AutoVarEmission Emission = 57 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 58 CGF.EmitAutoVarCleanups(Emission); 59 } 60 } 61 } 62 } 63 } 64 } 65 CodeGenFunction::OMPPrivateScope InlinedShareds; 66 67 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 68 return CGF.LambdaCaptureFields.lookup(VD) || 69 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 70 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 71 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 72 } 73 74 public: 75 OMPLexicalScope( 76 CodeGenFunction &CGF, const OMPExecutableDirective &S, 77 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 78 const bool EmitPreInitStmt = true) 79 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 80 InlinedShareds(CGF) { 81 if (EmitPreInitStmt) 82 emitPreInitStmt(CGF, S); 83 if (!CapturedRegion.hasValue()) 84 return; 85 assert(S.hasAssociatedStmt() && 86 "Expected associated statement for inlined directive."); 87 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 88 for (const auto &C : CS->captures()) { 89 if (C.capturesVariable() || C.capturesVariableByCopy()) { 90 auto *VD = C.getCapturedVar(); 91 assert(VD == VD->getCanonicalDecl() && 92 "Canonical decl must be captured."); 93 DeclRefExpr DRE( 94 CGF.getContext(), const_cast<VarDecl *>(VD), 95 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 96 InlinedShareds.isGlobalVarCaptured(VD)), 97 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 98 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF)); 99 } 100 } 101 (void)InlinedShareds.Privatize(); 102 } 103 }; 104 105 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 106 /// for captured expressions. 107 class OMPParallelScope final : public OMPLexicalScope { 108 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 109 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 110 return !(isOpenMPTargetExecutionDirective(Kind) || 111 isOpenMPLoopBoundSharingDirective(Kind)) && 112 isOpenMPParallelDirective(Kind); 113 } 114 115 public: 116 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 117 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 118 EmitPreInitStmt(S)) {} 119 }; 120 121 /// Lexical scope for OpenMP teams construct, that handles correct codegen 122 /// for captured expressions. 123 class OMPTeamsScope final : public OMPLexicalScope { 124 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 125 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 126 return !isOpenMPTargetExecutionDirective(Kind) && 127 isOpenMPTeamsDirective(Kind); 128 } 129 130 public: 131 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 132 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 133 EmitPreInitStmt(S)) {} 134 }; 135 136 /// Private scope for OpenMP loop-based directives, that supports capturing 137 /// of used expression from loop statement. 138 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 139 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) { 140 const DeclStmt *PreInits; 141 CodeGenFunction::OMPMapVars PreCondVars; 142 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 143 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 144 for (const auto *E : LD->counters()) { 145 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 146 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 147 (void)PreCondVars.setVarAddr( 148 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 149 } 150 // Mark private vars as undefs. 151 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) { 152 for (const Expr *IRef : C->varlists()) { 153 const auto *OrigVD = 154 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 155 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 156 QualType OrigVDTy = OrigVD->getType().getNonReferenceType(); 157 (void)PreCondVars.setVarAddr( 158 CGF, OrigVD, 159 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem( 160 CGF.getContext().getPointerType(OrigVDTy))), 161 CGF.ConvertTypeForMem(OrigVDTy), 162 CGF.getContext().getDeclAlign(OrigVD))); 163 } 164 } 165 } 166 (void)PreCondVars.apply(CGF); 167 // Emit init, __range and __end variables for C++ range loops. 168 (void)OMPLoopBasedDirective::doForAllLoops( 169 LD->getInnermostCapturedStmt()->getCapturedStmt(), 170 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(), 171 [&CGF](unsigned Cnt, const Stmt *CurStmt) { 172 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) { 173 if (const Stmt *Init = CXXFor->getInit()) 174 CGF.EmitStmt(Init); 175 CGF.EmitStmt(CXXFor->getRangeStmt()); 176 CGF.EmitStmt(CXXFor->getEndStmt()); 177 } 178 return false; 179 }); 180 PreInits = cast_or_null<DeclStmt>(LD->getPreInits()); 181 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) { 182 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits()); 183 } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) { 184 PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits()); 185 } else { 186 llvm_unreachable("Unknown loop-based directive kind."); 187 } 188 if (PreInits) { 189 for (const auto *I : PreInits->decls()) 190 CGF.EmitVarDecl(cast<VarDecl>(*I)); 191 } 192 PreCondVars.restore(CGF); 193 } 194 195 public: 196 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) 197 : CodeGenFunction::RunCleanupsScope(CGF) { 198 emitPreInitStmt(CGF, S); 199 } 200 }; 201 202 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 203 CodeGenFunction::OMPPrivateScope InlinedShareds; 204 205 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 206 return CGF.LambdaCaptureFields.lookup(VD) || 207 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 208 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 209 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 210 } 211 212 public: 213 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 214 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 215 InlinedShareds(CGF) { 216 for (const auto *C : S.clauses()) { 217 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 218 if (const auto *PreInit = 219 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 220 for (const auto *I : PreInit->decls()) { 221 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 222 CGF.EmitVarDecl(cast<VarDecl>(*I)); 223 } else { 224 CodeGenFunction::AutoVarEmission Emission = 225 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 226 CGF.EmitAutoVarCleanups(Emission); 227 } 228 } 229 } 230 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 231 for (const Expr *E : UDP->varlists()) { 232 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 234 CGF.EmitVarDecl(*OED); 235 } 236 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 237 for (const Expr *E : UDP->varlists()) { 238 const Decl *D = getBaseDecl(E); 239 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 240 CGF.EmitVarDecl(*OED); 241 } 242 } 243 } 244 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 245 CGF.EmitOMPPrivateClause(S, InlinedShareds); 246 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 247 if (const Expr *E = TG->getReductionRef()) 248 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 249 } 250 // Temp copy arrays for inscan reductions should not be emitted as they are 251 // not used in simd only mode. 252 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps; 253 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 254 if (C->getModifier() != OMPC_REDUCTION_inscan) 255 continue; 256 for (const Expr *E : C->copy_array_temps()) 257 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl()); 258 } 259 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 260 while (CS) { 261 for (auto &C : CS->captures()) { 262 if (C.capturesVariable() || C.capturesVariableByCopy()) { 263 auto *VD = C.getCapturedVar(); 264 if (CopyArrayTemps.contains(VD)) 265 continue; 266 assert(VD == VD->getCanonicalDecl() && 267 "Canonical decl must be captured."); 268 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 269 isCapturedVar(CGF, VD) || 270 (CGF.CapturedStmtInfo && 271 InlinedShareds.isGlobalVarCaptured(VD)), 272 VD->getType().getNonReferenceType(), VK_LValue, 273 C.getLocation()); 274 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF)); 275 } 276 } 277 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 278 } 279 (void)InlinedShareds.Privatize(); 280 } 281 }; 282 283 } // namespace 284 285 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 286 const OMPExecutableDirective &S, 287 const RegionCodeGenTy &CodeGen); 288 289 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 290 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 291 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 292 OrigVD = OrigVD->getCanonicalDecl(); 293 bool IsCaptured = 294 LambdaCaptureFields.lookup(OrigVD) || 295 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 296 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 297 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 298 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 299 return EmitLValue(&DRE); 300 } 301 } 302 return EmitLValue(E); 303 } 304 305 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 306 ASTContext &C = getContext(); 307 llvm::Value *Size = nullptr; 308 auto SizeInChars = C.getTypeSizeInChars(Ty); 309 if (SizeInChars.isZero()) { 310 // getTypeSizeInChars() returns 0 for a VLA. 311 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 312 VlaSizePair VlaSize = getVLASize(VAT); 313 Ty = VlaSize.Type; 314 Size = 315 Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts; 316 } 317 SizeInChars = C.getTypeSizeInChars(Ty); 318 if (SizeInChars.isZero()) 319 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 320 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 321 } 322 return CGM.getSize(SizeInChars); 323 } 324 325 void CodeGenFunction::GenerateOpenMPCapturedVars( 326 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 327 const RecordDecl *RD = S.getCapturedRecordDecl(); 328 auto CurField = RD->field_begin(); 329 auto CurCap = S.captures().begin(); 330 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 331 E = S.capture_init_end(); 332 I != E; ++I, ++CurField, ++CurCap) { 333 if (CurField->hasCapturedVLAType()) { 334 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 335 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 336 CapturedVars.push_back(Val); 337 } else if (CurCap->capturesThis()) { 338 CapturedVars.push_back(CXXThisValue); 339 } else if (CurCap->capturesVariableByCopy()) { 340 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 341 342 // If the field is not a pointer, we need to save the actual value 343 // and load it as a void pointer. 344 if (!CurField->getType()->isAnyPointerType()) { 345 ASTContext &Ctx = getContext(); 346 Address DstAddr = CreateMemTemp( 347 Ctx.getUIntPtrType(), 348 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 349 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 350 351 llvm::Value *SrcAddrVal = EmitScalarConversion( 352 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 353 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 354 LValue SrcLV = 355 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 356 357 // Store the value using the source type pointer. 358 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 359 360 // Load the value using the destination type pointer. 361 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 362 } 363 CapturedVars.push_back(CV); 364 } else { 365 assert(CurCap->capturesVariable() && "Expected capture by reference."); 366 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 367 } 368 } 369 } 370 371 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 372 QualType DstType, StringRef Name, 373 LValue AddrLV) { 374 ASTContext &Ctx = CGF.getContext(); 375 376 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 377 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 378 Ctx.getPointerType(DstType), Loc); 379 Address TmpAddr = 380 CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF); 381 return TmpAddr; 382 } 383 384 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 385 if (T->isLValueReferenceType()) 386 return C.getLValueReferenceType( 387 getCanonicalParamType(C, T.getNonReferenceType()), 388 /*SpelledAsLValue=*/false); 389 if (T->isPointerType()) 390 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 391 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 392 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 393 return getCanonicalParamType(C, VLA->getElementType()); 394 if (!A->isVariablyModifiedType()) 395 return C.getCanonicalType(T); 396 } 397 return C.getCanonicalParamType(T); 398 } 399 400 namespace { 401 /// Contains required data for proper outlined function codegen. 402 struct FunctionOptions { 403 /// Captured statement for which the function is generated. 404 const CapturedStmt *S = nullptr; 405 /// true if cast to/from UIntPtr is required for variables captured by 406 /// value. 407 const bool UIntPtrCastRequired = true; 408 /// true if only casted arguments must be registered as local args or VLA 409 /// sizes. 410 const bool RegisterCastedArgsOnly = false; 411 /// Name of the generated function. 412 const StringRef FunctionName; 413 /// Location of the non-debug version of the outlined function. 414 SourceLocation Loc; 415 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 416 bool RegisterCastedArgsOnly, StringRef FunctionName, 417 SourceLocation Loc) 418 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 419 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 420 FunctionName(FunctionName), Loc(Loc) {} 421 }; 422 } // namespace 423 424 static llvm::Function *emitOutlinedFunctionPrologue( 425 CodeGenFunction &CGF, FunctionArgList &Args, 426 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 427 &LocalAddrs, 428 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 429 &VLASizes, 430 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 431 const CapturedDecl *CD = FO.S->getCapturedDecl(); 432 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 433 assert(CD->hasBody() && "missing CapturedDecl body"); 434 435 CXXThisValue = nullptr; 436 // Build the argument list. 437 CodeGenModule &CGM = CGF.CGM; 438 ASTContext &Ctx = CGM.getContext(); 439 FunctionArgList TargetArgs; 440 Args.append(CD->param_begin(), 441 std::next(CD->param_begin(), CD->getContextParamPosition())); 442 TargetArgs.append( 443 CD->param_begin(), 444 std::next(CD->param_begin(), CD->getContextParamPosition())); 445 auto I = FO.S->captures().begin(); 446 FunctionDecl *DebugFunctionDecl = nullptr; 447 if (!FO.UIntPtrCastRequired) { 448 FunctionProtoType::ExtProtoInfo EPI; 449 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 450 DebugFunctionDecl = FunctionDecl::Create( 451 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 452 SourceLocation(), DeclarationName(), FunctionTy, 453 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 454 /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false, 455 /*hasWrittenPrototype=*/false); 456 } 457 for (const FieldDecl *FD : RD->fields()) { 458 QualType ArgType = FD->getType(); 459 IdentifierInfo *II = nullptr; 460 VarDecl *CapVar = nullptr; 461 462 // If this is a capture by copy and the type is not a pointer, the outlined 463 // function argument type should be uintptr and the value properly casted to 464 // uintptr. This is necessary given that the runtime library is only able to 465 // deal with pointers. We can pass in the same way the VLA type sizes to the 466 // outlined function. 467 if (FO.UIntPtrCastRequired && 468 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 469 I->capturesVariableArrayType())) 470 ArgType = Ctx.getUIntPtrType(); 471 472 if (I->capturesVariable() || I->capturesVariableByCopy()) { 473 CapVar = I->getCapturedVar(); 474 II = CapVar->getIdentifier(); 475 } else if (I->capturesThis()) { 476 II = &Ctx.Idents.get("this"); 477 } else { 478 assert(I->capturesVariableArrayType()); 479 II = &Ctx.Idents.get("vla"); 480 } 481 if (ArgType->isVariablyModifiedType()) 482 ArgType = getCanonicalParamType(Ctx, ArgType); 483 VarDecl *Arg; 484 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 485 Arg = ParmVarDecl::Create( 486 Ctx, DebugFunctionDecl, 487 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 488 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 489 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 490 } else { 491 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 492 II, ArgType, ImplicitParamDecl::Other); 493 } 494 Args.emplace_back(Arg); 495 // Do not cast arguments if we emit function with non-original types. 496 TargetArgs.emplace_back( 497 FO.UIntPtrCastRequired 498 ? Arg 499 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 500 ++I; 501 } 502 Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 503 CD->param_end()); 504 TargetArgs.append( 505 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 506 CD->param_end()); 507 508 // Create the function declaration. 509 const CGFunctionInfo &FuncInfo = 510 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 511 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 512 513 auto *F = 514 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 515 FO.FunctionName, &CGM.getModule()); 516 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 517 if (CD->isNothrow()) 518 F->setDoesNotThrow(); 519 F->setDoesNotRecurse(); 520 521 // Always inline the outlined function if optimizations are enabled. 522 if (CGM.getCodeGenOpts().OptimizationLevel != 0) { 523 F->removeFnAttr(llvm::Attribute::NoInline); 524 F->addFnAttr(llvm::Attribute::AlwaysInline); 525 } 526 527 // Generate the function. 528 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 529 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 530 FO.UIntPtrCastRequired ? FO.Loc 531 : CD->getBody()->getBeginLoc()); 532 unsigned Cnt = CD->getContextParamPosition(); 533 I = FO.S->captures().begin(); 534 for (const FieldDecl *FD : RD->fields()) { 535 // Do not map arguments if we emit function with non-original types. 536 Address LocalAddr(Address::invalid()); 537 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 538 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 539 TargetArgs[Cnt]); 540 } else { 541 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 542 } 543 // If we are capturing a pointer by copy we don't need to do anything, just 544 // use the value that we get from the arguments. 545 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 546 const VarDecl *CurVD = I->getCapturedVar(); 547 if (!FO.RegisterCastedArgsOnly) 548 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 549 ++Cnt; 550 ++I; 551 continue; 552 } 553 554 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 555 AlignmentSource::Decl); 556 if (FD->hasCapturedVLAType()) { 557 if (FO.UIntPtrCastRequired) { 558 ArgLVal = CGF.MakeAddrLValue( 559 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 560 Args[Cnt]->getName(), ArgLVal), 561 FD->getType(), AlignmentSource::Decl); 562 } 563 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 564 const VariableArrayType *VAT = FD->getCapturedVLAType(); 565 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 566 } else if (I->capturesVariable()) { 567 const VarDecl *Var = I->getCapturedVar(); 568 QualType VarTy = Var->getType(); 569 Address ArgAddr = ArgLVal.getAddress(CGF); 570 if (ArgLVal.getType()->isLValueReferenceType()) { 571 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 572 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 573 assert(ArgLVal.getType()->isPointerType()); 574 ArgAddr = CGF.EmitLoadOfPointer( 575 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 576 } 577 if (!FO.RegisterCastedArgsOnly) { 578 LocalAddrs.insert( 579 {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}}); 580 } 581 } else if (I->capturesVariableByCopy()) { 582 assert(!FD->getType()->isAnyPointerType() && 583 "Not expecting a captured pointer."); 584 const VarDecl *Var = I->getCapturedVar(); 585 LocalAddrs.insert({Args[Cnt], 586 {Var, FO.UIntPtrCastRequired 587 ? castValueFromUintptr( 588 CGF, I->getLocation(), FD->getType(), 589 Args[Cnt]->getName(), ArgLVal) 590 : ArgLVal.getAddress(CGF)}}); 591 } else { 592 // If 'this' is captured, load it into CXXThisValue. 593 assert(I->capturesThis()); 594 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 595 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 596 } 597 ++Cnt; 598 ++I; 599 } 600 601 return F; 602 } 603 604 llvm::Function * 605 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 606 SourceLocation Loc) { 607 assert( 608 CapturedStmtInfo && 609 "CapturedStmtInfo should be set when generating the captured function"); 610 const CapturedDecl *CD = S.getCapturedDecl(); 611 // Build the argument list. 612 bool NeedWrapperFunction = 613 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 614 FunctionArgList Args; 615 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 616 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 617 SmallString<256> Buffer; 618 llvm::raw_svector_ostream Out(Buffer); 619 Out << CapturedStmtInfo->getHelperName(); 620 if (NeedWrapperFunction) 621 Out << "_debug__"; 622 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 623 Out.str(), Loc); 624 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 625 VLASizes, CXXThisValue, FO); 626 CodeGenFunction::OMPPrivateScope LocalScope(*this); 627 for (const auto &LocalAddrPair : LocalAddrs) { 628 if (LocalAddrPair.second.first) { 629 LocalScope.addPrivate(LocalAddrPair.second.first, 630 LocalAddrPair.second.second); 631 } 632 } 633 (void)LocalScope.Privatize(); 634 for (const auto &VLASizePair : VLASizes) 635 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 636 PGO.assignRegionCounters(GlobalDecl(CD), F); 637 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 638 (void)LocalScope.ForceCleanup(); 639 FinishFunction(CD->getBodyRBrace()); 640 if (!NeedWrapperFunction) 641 return F; 642 643 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 644 /*RegisterCastedArgsOnly=*/true, 645 CapturedStmtInfo->getHelperName(), Loc); 646 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 647 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 648 Args.clear(); 649 LocalAddrs.clear(); 650 VLASizes.clear(); 651 llvm::Function *WrapperF = 652 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 653 WrapperCGF.CXXThisValue, WrapperFO); 654 llvm::SmallVector<llvm::Value *, 4> CallArgs; 655 auto *PI = F->arg_begin(); 656 for (const auto *Arg : Args) { 657 llvm::Value *CallArg; 658 auto I = LocalAddrs.find(Arg); 659 if (I != LocalAddrs.end()) { 660 LValue LV = WrapperCGF.MakeAddrLValue( 661 I->second.second, 662 I->second.first ? I->second.first->getType() : Arg->getType(), 663 AlignmentSource::Decl); 664 if (LV.getType()->isAnyComplexType()) 665 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 666 LV.getAddress(WrapperCGF), 667 PI->getType()->getPointerTo( 668 LV.getAddress(WrapperCGF).getAddressSpace()), 669 PI->getType())); 670 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 671 } else { 672 auto EI = VLASizes.find(Arg); 673 if (EI != VLASizes.end()) { 674 CallArg = EI->second.second; 675 } else { 676 LValue LV = 677 WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 678 Arg->getType(), AlignmentSource::Decl); 679 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 680 } 681 } 682 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 683 ++PI; 684 } 685 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 686 WrapperCGF.FinishFunction(); 687 return WrapperF; 688 } 689 690 //===----------------------------------------------------------------------===// 691 // OpenMP Directive Emission 692 //===----------------------------------------------------------------------===// 693 void CodeGenFunction::EmitOMPAggregateAssign( 694 Address DestAddr, Address SrcAddr, QualType OriginalType, 695 const llvm::function_ref<void(Address, Address)> CopyGen) { 696 // Perform element-by-element initialization. 697 QualType ElementTy; 698 699 // Drill down to the base element type on both arrays. 700 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 701 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 702 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 703 704 llvm::Value *SrcBegin = SrcAddr.getPointer(); 705 llvm::Value *DestBegin = DestAddr.getPointer(); 706 // Cast from pointer to array type to pointer to single element. 707 llvm::Value *DestEnd = 708 Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements); 709 // The basic structure here is a while-do loop. 710 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 711 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 712 llvm::Value *IsEmpty = 713 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 714 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 715 716 // Enter the loop body, making that address the current address. 717 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 718 EmitBlock(BodyBB); 719 720 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 721 722 llvm::PHINode *SrcElementPHI = 723 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 724 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 725 Address SrcElementCurrent = 726 Address(SrcElementPHI, SrcAddr.getElementType(), 727 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 728 729 llvm::PHINode *DestElementPHI = Builder.CreatePHI( 730 DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 731 DestElementPHI->addIncoming(DestBegin, EntryBB); 732 Address DestElementCurrent = 733 Address(DestElementPHI, DestAddr.getElementType(), 734 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 735 736 // Emit copy. 737 CopyGen(DestElementCurrent, SrcElementCurrent); 738 739 // Shift the address forward by one element. 740 llvm::Value *DestElementNext = 741 Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI, 742 /*Idx0=*/1, "omp.arraycpy.dest.element"); 743 llvm::Value *SrcElementNext = 744 Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI, 745 /*Idx0=*/1, "omp.arraycpy.src.element"); 746 // Check whether we've reached the end. 747 llvm::Value *Done = 748 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 749 Builder.CreateCondBr(Done, DoneBB, BodyBB); 750 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 751 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 752 753 // Done. 754 EmitBlock(DoneBB, /*IsFinished=*/true); 755 } 756 757 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 758 Address SrcAddr, const VarDecl *DestVD, 759 const VarDecl *SrcVD, const Expr *Copy) { 760 if (OriginalType->isArrayType()) { 761 const auto *BO = dyn_cast<BinaryOperator>(Copy); 762 if (BO && BO->getOpcode() == BO_Assign) { 763 // Perform simple memcpy for simple copying. 764 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 765 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 766 EmitAggregateAssign(Dest, Src, OriginalType); 767 } else { 768 // For arrays with complex element types perform element by element 769 // copying. 770 EmitOMPAggregateAssign( 771 DestAddr, SrcAddr, OriginalType, 772 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 773 // Working with the single array element, so have to remap 774 // destination and source variables to corresponding array 775 // elements. 776 CodeGenFunction::OMPPrivateScope Remap(*this); 777 Remap.addPrivate(DestVD, DestElement); 778 Remap.addPrivate(SrcVD, SrcElement); 779 (void)Remap.Privatize(); 780 EmitIgnoredExpr(Copy); 781 }); 782 } 783 } else { 784 // Remap pseudo source variable to private copy. 785 CodeGenFunction::OMPPrivateScope Remap(*this); 786 Remap.addPrivate(SrcVD, SrcAddr); 787 Remap.addPrivate(DestVD, DestAddr); 788 (void)Remap.Privatize(); 789 // Emit copying of the whole variable. 790 EmitIgnoredExpr(Copy); 791 } 792 } 793 794 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 795 OMPPrivateScope &PrivateScope) { 796 if (!HaveInsertPoint()) 797 return false; 798 bool DeviceConstTarget = 799 getLangOpts().OpenMPIsDevice && 800 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 801 bool FirstprivateIsLastprivate = false; 802 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 803 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 804 for (const auto *D : C->varlists()) 805 Lastprivates.try_emplace( 806 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 807 C->getKind()); 808 } 809 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 810 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 811 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 812 // Force emission of the firstprivate copy if the directive does not emit 813 // outlined function, like omp for, omp simd, omp distribute etc. 814 bool MustEmitFirstprivateCopy = 815 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 816 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 817 const auto *IRef = C->varlist_begin(); 818 const auto *InitsRef = C->inits().begin(); 819 for (const Expr *IInit : C->private_copies()) { 820 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 821 bool ThisFirstprivateIsLastprivate = 822 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 823 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 824 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 825 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 826 !FD->getType()->isReferenceType() && 827 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 828 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 829 ++IRef; 830 ++InitsRef; 831 continue; 832 } 833 // Do not emit copy for firstprivate constant variables in target regions, 834 // captured by reference. 835 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 836 FD && FD->getType()->isReferenceType() && 837 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 838 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 839 ++IRef; 840 ++InitsRef; 841 continue; 842 } 843 FirstprivateIsLastprivate = 844 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 845 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 846 const auto *VDInit = 847 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 848 bool IsRegistered; 849 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 850 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 851 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 852 LValue OriginalLVal; 853 if (!FD) { 854 // Check if the firstprivate variable is just a constant value. 855 ConstantEmission CE = tryEmitAsConstant(&DRE); 856 if (CE && !CE.isReference()) { 857 // Constant value, no need to create a copy. 858 ++IRef; 859 ++InitsRef; 860 continue; 861 } 862 if (CE && CE.isReference()) { 863 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 864 } else { 865 assert(!CE && "Expected non-constant firstprivate."); 866 OriginalLVal = EmitLValue(&DRE); 867 } 868 } else { 869 OriginalLVal = EmitLValue(&DRE); 870 } 871 QualType Type = VD->getType(); 872 if (Type->isArrayType()) { 873 // Emit VarDecl with copy init for arrays. 874 // Get the address of the original variable captured in current 875 // captured region. 876 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 877 const Expr *Init = VD->getInit(); 878 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 879 // Perform simple memcpy. 880 LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type); 881 EmitAggregateAssign(Dest, OriginalLVal, Type); 882 } else { 883 EmitOMPAggregateAssign( 884 Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this), 885 Type, 886 [this, VDInit, Init](Address DestElement, Address SrcElement) { 887 // Clean up any temporaries needed by the 888 // initialization. 889 RunCleanupsScope InitScope(*this); 890 // Emit initialization for single element. 891 setAddrOfLocalVar(VDInit, SrcElement); 892 EmitAnyExprToMem(Init, DestElement, 893 Init->getType().getQualifiers(), 894 /*IsInitializer*/ false); 895 LocalDeclMap.erase(VDInit); 896 }); 897 } 898 EmitAutoVarCleanups(Emission); 899 IsRegistered = 900 PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress()); 901 } else { 902 Address OriginalAddr = OriginalLVal.getAddress(*this); 903 // Emit private VarDecl with copy init. 904 // Remap temp VDInit variable to the address of the original 905 // variable (for proper handling of captured global variables). 906 setAddrOfLocalVar(VDInit, OriginalAddr); 907 EmitDecl(*VD); 908 LocalDeclMap.erase(VDInit); 909 Address VDAddr = GetAddrOfLocalVar(VD); 910 if (ThisFirstprivateIsLastprivate && 911 Lastprivates[OrigVD->getCanonicalDecl()] == 912 OMPC_LASTPRIVATE_conditional) { 913 // Create/init special variable for lastprivate conditionals. 914 llvm::Value *V = 915 EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(), 916 AlignmentSource::Decl), 917 (*IRef)->getExprLoc()); 918 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 919 *this, OrigVD); 920 EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(), 921 AlignmentSource::Decl)); 922 LocalDeclMap.erase(VD); 923 setAddrOfLocalVar(VD, VDAddr); 924 } 925 IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr); 926 } 927 assert(IsRegistered && 928 "firstprivate var already registered as private"); 929 // Silence the warning about unused variable. 930 (void)IsRegistered; 931 } 932 ++IRef; 933 ++InitsRef; 934 } 935 } 936 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 937 } 938 939 void CodeGenFunction::EmitOMPPrivateClause( 940 const OMPExecutableDirective &D, 941 CodeGenFunction::OMPPrivateScope &PrivateScope) { 942 if (!HaveInsertPoint()) 943 return; 944 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 945 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 946 auto IRef = C->varlist_begin(); 947 for (const Expr *IInit : C->private_copies()) { 948 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 949 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 950 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 951 EmitDecl(*VD); 952 // Emit private VarDecl with copy init. 953 bool IsRegistered = 954 PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD)); 955 assert(IsRegistered && "private var already registered as private"); 956 // Silence the warning about unused variable. 957 (void)IsRegistered; 958 } 959 ++IRef; 960 } 961 } 962 } 963 964 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 965 if (!HaveInsertPoint()) 966 return false; 967 // threadprivate_var1 = master_threadprivate_var1; 968 // operator=(threadprivate_var2, master_threadprivate_var2); 969 // ... 970 // __kmpc_barrier(&loc, global_tid); 971 llvm::DenseSet<const VarDecl *> CopiedVars; 972 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 973 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 974 auto IRef = C->varlist_begin(); 975 auto ISrcRef = C->source_exprs().begin(); 976 auto IDestRef = C->destination_exprs().begin(); 977 for (const Expr *AssignOp : C->assignment_ops()) { 978 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 979 QualType Type = VD->getType(); 980 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 981 // Get the address of the master variable. If we are emitting code with 982 // TLS support, the address is passed from the master as field in the 983 // captured declaration. 984 Address MasterAddr = Address::invalid(); 985 if (getLangOpts().OpenMPUseTLS && 986 getContext().getTargetInfo().isTLSSupported()) { 987 assert(CapturedStmtInfo->lookup(VD) && 988 "Copyin threadprivates should have been captured!"); 989 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 990 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 991 MasterAddr = EmitLValue(&DRE).getAddress(*this); 992 LocalDeclMap.erase(VD); 993 } else { 994 MasterAddr = 995 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 996 : CGM.GetAddrOfGlobal(VD), 997 CGM.getTypes().ConvertTypeForMem(VD->getType()), 998 getContext().getDeclAlign(VD)); 999 } 1000 // Get the address of the threadprivate variable. 1001 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 1002 if (CopiedVars.size() == 1) { 1003 // At first check if current thread is a master thread. If it is, no 1004 // need to copy data. 1005 CopyBegin = createBasicBlock("copyin.not.master"); 1006 CopyEnd = createBasicBlock("copyin.not.master.end"); 1007 // TODO: Avoid ptrtoint conversion. 1008 auto *MasterAddrInt = 1009 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy); 1010 auto *PrivateAddrInt = 1011 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy); 1012 Builder.CreateCondBr( 1013 Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin, 1014 CopyEnd); 1015 EmitBlock(CopyBegin); 1016 } 1017 const auto *SrcVD = 1018 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1019 const auto *DestVD = 1020 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1021 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 1022 } 1023 ++IRef; 1024 ++ISrcRef; 1025 ++IDestRef; 1026 } 1027 } 1028 if (CopyEnd) { 1029 // Exit out of copying procedure for non-master thread. 1030 EmitBlock(CopyEnd, /*IsFinished=*/true); 1031 return true; 1032 } 1033 return false; 1034 } 1035 1036 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1037 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1038 if (!HaveInsertPoint()) 1039 return false; 1040 bool HasAtLeastOneLastprivate = false; 1041 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1042 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1043 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1044 for (const Expr *C : LoopDirective->counters()) { 1045 SIMDLCVs.insert( 1046 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1047 } 1048 } 1049 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1050 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1051 HasAtLeastOneLastprivate = true; 1052 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1053 !getLangOpts().OpenMPSimd) 1054 break; 1055 const auto *IRef = C->varlist_begin(); 1056 const auto *IDestRef = C->destination_exprs().begin(); 1057 for (const Expr *IInit : C->private_copies()) { 1058 // Keep the address of the original variable for future update at the end 1059 // of the loop. 1060 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1061 // Taskloops do not require additional initialization, it is done in 1062 // runtime support library. 1063 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1064 const auto *DestVD = 1065 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1066 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1067 /*RefersToEnclosingVariableOrCapture=*/ 1068 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1069 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1070 PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this)); 1071 // Check if the variable is also a firstprivate: in this case IInit is 1072 // not generated. Initialization of this variable will happen in codegen 1073 // for 'firstprivate' clause. 1074 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1075 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1076 Address VDAddr = Address::invalid(); 1077 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1078 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 1079 *this, OrigVD); 1080 setAddrOfLocalVar(VD, VDAddr); 1081 } else { 1082 // Emit private VarDecl with copy init. 1083 EmitDecl(*VD); 1084 VDAddr = GetAddrOfLocalVar(VD); 1085 } 1086 bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr); 1087 assert(IsRegistered && 1088 "lastprivate var already registered as private"); 1089 (void)IsRegistered; 1090 } 1091 } 1092 ++IRef; 1093 ++IDestRef; 1094 } 1095 } 1096 return HasAtLeastOneLastprivate; 1097 } 1098 1099 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1100 const OMPExecutableDirective &D, bool NoFinals, 1101 llvm::Value *IsLastIterCond) { 1102 if (!HaveInsertPoint()) 1103 return; 1104 // Emit following code: 1105 // if (<IsLastIterCond>) { 1106 // orig_var1 = private_orig_var1; 1107 // ... 1108 // orig_varn = private_orig_varn; 1109 // } 1110 llvm::BasicBlock *ThenBB = nullptr; 1111 llvm::BasicBlock *DoneBB = nullptr; 1112 if (IsLastIterCond) { 1113 // Emit implicit barrier if at least one lastprivate conditional is found 1114 // and this is not a simd mode. 1115 if (!getLangOpts().OpenMPSimd && 1116 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1117 [](const OMPLastprivateClause *C) { 1118 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1119 })) { 1120 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1121 OMPD_unknown, 1122 /*EmitChecks=*/false, 1123 /*ForceSimpleCall=*/true); 1124 } 1125 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1126 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1127 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1128 EmitBlock(ThenBB); 1129 } 1130 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1131 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1132 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1133 auto IC = LoopDirective->counters().begin(); 1134 for (const Expr *F : LoopDirective->finals()) { 1135 const auto *D = 1136 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1137 if (NoFinals) 1138 AlreadyEmittedVars.insert(D); 1139 else 1140 LoopCountersAndUpdates[D] = F; 1141 ++IC; 1142 } 1143 } 1144 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1145 auto IRef = C->varlist_begin(); 1146 auto ISrcRef = C->source_exprs().begin(); 1147 auto IDestRef = C->destination_exprs().begin(); 1148 for (const Expr *AssignOp : C->assignment_ops()) { 1149 const auto *PrivateVD = 1150 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1151 QualType Type = PrivateVD->getType(); 1152 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1153 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1154 // If lastprivate variable is a loop control variable for loop-based 1155 // directive, update its value before copyin back to original 1156 // variable. 1157 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1158 EmitIgnoredExpr(FinalExpr); 1159 const auto *SrcVD = 1160 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1161 const auto *DestVD = 1162 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1163 // Get the address of the private variable. 1164 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1165 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1166 PrivateAddr = Address( 1167 Builder.CreateLoad(PrivateAddr), 1168 CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()), 1169 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1170 // Store the last value to the private copy in the last iteration. 1171 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1172 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1173 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1174 (*IRef)->getExprLoc()); 1175 // Get the address of the original variable. 1176 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1177 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1178 } 1179 ++IRef; 1180 ++ISrcRef; 1181 ++IDestRef; 1182 } 1183 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1184 EmitIgnoredExpr(PostUpdate); 1185 } 1186 if (IsLastIterCond) 1187 EmitBlock(DoneBB, /*IsFinished=*/true); 1188 } 1189 1190 void CodeGenFunction::EmitOMPReductionClauseInit( 1191 const OMPExecutableDirective &D, 1192 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1193 if (!HaveInsertPoint()) 1194 return; 1195 SmallVector<const Expr *, 4> Shareds; 1196 SmallVector<const Expr *, 4> Privates; 1197 SmallVector<const Expr *, 4> ReductionOps; 1198 SmallVector<const Expr *, 4> LHSs; 1199 SmallVector<const Expr *, 4> RHSs; 1200 OMPTaskDataTy Data; 1201 SmallVector<const Expr *, 4> TaskLHSs; 1202 SmallVector<const Expr *, 4> TaskRHSs; 1203 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1204 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1205 continue; 1206 Shareds.append(C->varlist_begin(), C->varlist_end()); 1207 Privates.append(C->privates().begin(), C->privates().end()); 1208 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1209 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1210 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1211 if (C->getModifier() == OMPC_REDUCTION_task) { 1212 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1213 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1214 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1215 Data.ReductionOps.append(C->reduction_ops().begin(), 1216 C->reduction_ops().end()); 1217 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1218 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1219 } 1220 } 1221 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1222 unsigned Count = 0; 1223 auto *ILHS = LHSs.begin(); 1224 auto *IRHS = RHSs.begin(); 1225 auto *IPriv = Privates.begin(); 1226 for (const Expr *IRef : Shareds) { 1227 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1228 // Emit private VarDecl with reduction init. 1229 RedCG.emitSharedOrigLValue(*this, Count); 1230 RedCG.emitAggregateType(*this, Count); 1231 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1232 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1233 RedCG.getSharedLValue(Count).getAddress(*this), 1234 [&Emission](CodeGenFunction &CGF) { 1235 CGF.EmitAutoVarInit(Emission); 1236 return true; 1237 }); 1238 EmitAutoVarCleanups(Emission); 1239 Address BaseAddr = RedCG.adjustPrivateAddress( 1240 *this, Count, Emission.getAllocatedAddress()); 1241 bool IsRegistered = 1242 PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr); 1243 assert(IsRegistered && "private var already registered as private"); 1244 // Silence the warning about unused variable. 1245 (void)IsRegistered; 1246 1247 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1248 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1249 QualType Type = PrivateVD->getType(); 1250 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1251 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1252 // Store the address of the original variable associated with the LHS 1253 // implicit variable. 1254 PrivateScope.addPrivate(LHSVD, 1255 RedCG.getSharedLValue(Count).getAddress(*this)); 1256 PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD)); 1257 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1258 isa<ArraySubscriptExpr>(IRef)) { 1259 // Store the address of the original variable associated with the LHS 1260 // implicit variable. 1261 PrivateScope.addPrivate(LHSVD, 1262 RedCG.getSharedLValue(Count).getAddress(*this)); 1263 PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast( 1264 GetAddrOfLocalVar(PrivateVD), 1265 ConvertTypeForMem(RHSVD->getType()), 1266 "rhs.begin")); 1267 } else { 1268 QualType Type = PrivateVD->getType(); 1269 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1270 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1271 // Store the address of the original variable associated with the LHS 1272 // implicit variable. 1273 if (IsArray) { 1274 OriginalAddr = Builder.CreateElementBitCast( 1275 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1276 } 1277 PrivateScope.addPrivate(LHSVD, OriginalAddr); 1278 PrivateScope.addPrivate( 1279 RHSVD, IsArray ? Builder.CreateElementBitCast( 1280 GetAddrOfLocalVar(PrivateVD), 1281 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1282 : GetAddrOfLocalVar(PrivateVD)); 1283 } 1284 ++ILHS; 1285 ++IRHS; 1286 ++IPriv; 1287 ++Count; 1288 } 1289 if (!Data.ReductionVars.empty()) { 1290 Data.IsReductionWithTaskMod = true; 1291 Data.IsWorksharingReduction = 1292 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1293 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1294 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1295 const Expr *TaskRedRef = nullptr; 1296 switch (D.getDirectiveKind()) { 1297 case OMPD_parallel: 1298 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1299 break; 1300 case OMPD_for: 1301 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1302 break; 1303 case OMPD_sections: 1304 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1305 break; 1306 case OMPD_parallel_for: 1307 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1308 break; 1309 case OMPD_parallel_master: 1310 TaskRedRef = 1311 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1312 break; 1313 case OMPD_parallel_sections: 1314 TaskRedRef = 1315 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1316 break; 1317 case OMPD_target_parallel: 1318 TaskRedRef = 1319 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1320 break; 1321 case OMPD_target_parallel_for: 1322 TaskRedRef = 1323 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1324 break; 1325 case OMPD_distribute_parallel_for: 1326 TaskRedRef = 1327 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1328 break; 1329 case OMPD_teams_distribute_parallel_for: 1330 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1331 .getTaskReductionRefExpr(); 1332 break; 1333 case OMPD_target_teams_distribute_parallel_for: 1334 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1335 .getTaskReductionRefExpr(); 1336 break; 1337 case OMPD_simd: 1338 case OMPD_for_simd: 1339 case OMPD_section: 1340 case OMPD_single: 1341 case OMPD_master: 1342 case OMPD_critical: 1343 case OMPD_parallel_for_simd: 1344 case OMPD_task: 1345 case OMPD_taskyield: 1346 case OMPD_barrier: 1347 case OMPD_taskwait: 1348 case OMPD_taskgroup: 1349 case OMPD_flush: 1350 case OMPD_depobj: 1351 case OMPD_scan: 1352 case OMPD_ordered: 1353 case OMPD_atomic: 1354 case OMPD_teams: 1355 case OMPD_target: 1356 case OMPD_cancellation_point: 1357 case OMPD_cancel: 1358 case OMPD_target_data: 1359 case OMPD_target_enter_data: 1360 case OMPD_target_exit_data: 1361 case OMPD_taskloop: 1362 case OMPD_taskloop_simd: 1363 case OMPD_master_taskloop: 1364 case OMPD_master_taskloop_simd: 1365 case OMPD_parallel_master_taskloop: 1366 case OMPD_parallel_master_taskloop_simd: 1367 case OMPD_distribute: 1368 case OMPD_target_update: 1369 case OMPD_distribute_parallel_for_simd: 1370 case OMPD_distribute_simd: 1371 case OMPD_target_parallel_for_simd: 1372 case OMPD_target_simd: 1373 case OMPD_teams_distribute: 1374 case OMPD_teams_distribute_simd: 1375 case OMPD_teams_distribute_parallel_for_simd: 1376 case OMPD_target_teams: 1377 case OMPD_target_teams_distribute: 1378 case OMPD_target_teams_distribute_parallel_for_simd: 1379 case OMPD_target_teams_distribute_simd: 1380 case OMPD_declare_target: 1381 case OMPD_end_declare_target: 1382 case OMPD_threadprivate: 1383 case OMPD_allocate: 1384 case OMPD_declare_reduction: 1385 case OMPD_declare_mapper: 1386 case OMPD_declare_simd: 1387 case OMPD_requires: 1388 case OMPD_declare_variant: 1389 case OMPD_begin_declare_variant: 1390 case OMPD_end_declare_variant: 1391 case OMPD_unknown: 1392 default: 1393 llvm_unreachable("Enexpected directive with task reductions."); 1394 } 1395 1396 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1397 EmitVarDecl(*VD); 1398 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1399 /*Volatile=*/false, TaskRedRef->getType()); 1400 } 1401 } 1402 1403 void CodeGenFunction::EmitOMPReductionClauseFinal( 1404 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1405 if (!HaveInsertPoint()) 1406 return; 1407 llvm::SmallVector<const Expr *, 8> Privates; 1408 llvm::SmallVector<const Expr *, 8> LHSExprs; 1409 llvm::SmallVector<const Expr *, 8> RHSExprs; 1410 llvm::SmallVector<const Expr *, 8> ReductionOps; 1411 bool HasAtLeastOneReduction = false; 1412 bool IsReductionWithTaskMod = false; 1413 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1414 // Do not emit for inscan reductions. 1415 if (C->getModifier() == OMPC_REDUCTION_inscan) 1416 continue; 1417 HasAtLeastOneReduction = true; 1418 Privates.append(C->privates().begin(), C->privates().end()); 1419 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1420 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1421 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1422 IsReductionWithTaskMod = 1423 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1424 } 1425 if (HasAtLeastOneReduction) { 1426 if (IsReductionWithTaskMod) { 1427 CGM.getOpenMPRuntime().emitTaskReductionFini( 1428 *this, D.getBeginLoc(), 1429 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1430 } 1431 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1432 isOpenMPParallelDirective(D.getDirectiveKind()) || 1433 ReductionKind == OMPD_simd; 1434 bool SimpleReduction = ReductionKind == OMPD_simd; 1435 // Emit nowait reduction if nowait clause is present or directive is a 1436 // parallel directive (it always has implicit barrier). 1437 CGM.getOpenMPRuntime().emitReduction( 1438 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1439 {WithNowait, SimpleReduction, ReductionKind}); 1440 } 1441 } 1442 1443 static void emitPostUpdateForReductionClause( 1444 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1445 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1446 if (!CGF.HaveInsertPoint()) 1447 return; 1448 llvm::BasicBlock *DoneBB = nullptr; 1449 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1450 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1451 if (!DoneBB) { 1452 if (llvm::Value *Cond = CondGen(CGF)) { 1453 // If the first post-update expression is found, emit conditional 1454 // block if it was requested. 1455 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1456 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1457 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1458 CGF.EmitBlock(ThenBB); 1459 } 1460 } 1461 CGF.EmitIgnoredExpr(PostUpdate); 1462 } 1463 } 1464 if (DoneBB) 1465 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1466 } 1467 1468 namespace { 1469 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1470 /// parallel function. This is necessary for combined constructs such as 1471 /// 'distribute parallel for' 1472 typedef llvm::function_ref<void(CodeGenFunction &, 1473 const OMPExecutableDirective &, 1474 llvm::SmallVectorImpl<llvm::Value *> &)> 1475 CodeGenBoundParametersTy; 1476 } // anonymous namespace 1477 1478 static void 1479 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1480 const OMPExecutableDirective &S) { 1481 if (CGF.getLangOpts().OpenMP < 50) 1482 return; 1483 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1484 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1485 for (const Expr *Ref : C->varlists()) { 1486 if (!Ref->getType()->isScalarType()) 1487 continue; 1488 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1489 if (!DRE) 1490 continue; 1491 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1492 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1493 } 1494 } 1495 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1496 for (const Expr *Ref : C->varlists()) { 1497 if (!Ref->getType()->isScalarType()) 1498 continue; 1499 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1500 if (!DRE) 1501 continue; 1502 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1503 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1504 } 1505 } 1506 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1507 for (const Expr *Ref : C->varlists()) { 1508 if (!Ref->getType()->isScalarType()) 1509 continue; 1510 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1511 if (!DRE) 1512 continue; 1513 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1514 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1515 } 1516 } 1517 // Privates should ne analyzed since they are not captured at all. 1518 // Task reductions may be skipped - tasks are ignored. 1519 // Firstprivates do not return value but may be passed by reference - no need 1520 // to check for updated lastprivate conditional. 1521 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1522 for (const Expr *Ref : C->varlists()) { 1523 if (!Ref->getType()->isScalarType()) 1524 continue; 1525 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1526 if (!DRE) 1527 continue; 1528 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1529 } 1530 } 1531 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1532 CGF, S, PrivateDecls); 1533 } 1534 1535 static void emitCommonOMPParallelDirective( 1536 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1537 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1538 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1539 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1540 llvm::Value *NumThreads = nullptr; 1541 llvm::Function *OutlinedFn = 1542 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1543 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1544 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1545 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1546 NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1547 /*IgnoreResultAssign=*/true); 1548 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1549 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1550 } 1551 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1552 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1553 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1554 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1555 } 1556 const Expr *IfCond = nullptr; 1557 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1558 if (C->getNameModifier() == OMPD_unknown || 1559 C->getNameModifier() == OMPD_parallel) { 1560 IfCond = C->getCondition(); 1561 break; 1562 } 1563 } 1564 1565 OMPParallelScope Scope(CGF, S); 1566 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1567 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1568 // lower and upper bounds with the pragma 'for' chunking mechanism. 1569 // The following lambda takes care of appending the lower and upper bound 1570 // parameters when necessary 1571 CodeGenBoundParameters(CGF, S, CapturedVars); 1572 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1573 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1574 CapturedVars, IfCond, NumThreads); 1575 } 1576 1577 static bool isAllocatableDecl(const VarDecl *VD) { 1578 const VarDecl *CVD = VD->getCanonicalDecl(); 1579 if (!CVD->hasAttr<OMPAllocateDeclAttr>()) 1580 return false; 1581 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1582 // Use the default allocation. 1583 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || 1584 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && 1585 !AA->getAllocator()); 1586 } 1587 1588 static void emitEmptyBoundParameters(CodeGenFunction &, 1589 const OMPExecutableDirective &, 1590 llvm::SmallVectorImpl<llvm::Value *> &) {} 1591 1592 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable( 1593 CodeGenFunction &CGF, const VarDecl *VD) { 1594 CodeGenModule &CGM = CGF.CGM; 1595 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1596 1597 if (!VD) 1598 return Address::invalid(); 1599 const VarDecl *CVD = VD->getCanonicalDecl(); 1600 if (!isAllocatableDecl(CVD)) 1601 return Address::invalid(); 1602 llvm::Value *Size; 1603 CharUnits Align = CGM.getContext().getDeclAlign(CVD); 1604 if (CVD->getType()->isVariablyModifiedType()) { 1605 Size = CGF.getTypeSize(CVD->getType()); 1606 // Align the size: ((size + align - 1) / align) * align 1607 Size = CGF.Builder.CreateNUWAdd( 1608 Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); 1609 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); 1610 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); 1611 } else { 1612 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); 1613 Size = CGM.getSize(Sz.alignTo(Align)); 1614 } 1615 1616 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1617 assert(AA->getAllocator() && 1618 "Expected allocator expression for non-default allocator."); 1619 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); 1620 // According to the standard, the original allocator type is a enum (integer). 1621 // Convert to pointer type, if required. 1622 if (Allocator->getType()->isIntegerTy()) 1623 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy); 1624 else if (Allocator->getType()->isPointerTy()) 1625 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator, 1626 CGM.VoidPtrTy); 1627 1628 llvm::Value *Addr = OMPBuilder.createOMPAlloc( 1629 CGF.Builder, Size, Allocator, 1630 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", ".")); 1631 llvm::CallInst *FreeCI = 1632 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator); 1633 1634 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI); 1635 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1636 Addr, 1637 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())), 1638 getNameWithSeparators({CVD->getName(), ".addr"}, ".", ".")); 1639 return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align); 1640 } 1641 1642 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 1643 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, 1644 SourceLocation Loc) { 1645 CodeGenModule &CGM = CGF.CGM; 1646 if (CGM.getLangOpts().OpenMPUseTLS && 1647 CGM.getContext().getTargetInfo().isTLSSupported()) 1648 return VDAddr; 1649 1650 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1651 1652 llvm::Type *VarTy = VDAddr.getElementType(); 1653 llvm::Value *Data = 1654 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); 1655 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); 1656 std::string Suffix = getNameWithSeparators({"cache", ""}); 1657 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); 1658 1659 llvm::CallInst *ThreadPrivateCacheCall = 1660 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName); 1661 1662 return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment()); 1663 } 1664 1665 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators( 1666 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) { 1667 SmallString<128> Buffer; 1668 llvm::raw_svector_ostream OS(Buffer); 1669 StringRef Sep = FirstSeparator; 1670 for (StringRef Part : Parts) { 1671 OS << Sep << Part; 1672 Sep = Separator; 1673 } 1674 return OS.str().str(); 1675 } 1676 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1677 if (CGM.getLangOpts().OpenMPIRBuilder) { 1678 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1679 // Check if we have any if clause associated with the directive. 1680 llvm::Value *IfCond = nullptr; 1681 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1682 IfCond = EmitScalarExpr(C->getCondition(), 1683 /*IgnoreResultAssign=*/true); 1684 1685 llvm::Value *NumThreads = nullptr; 1686 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1687 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1688 /*IgnoreResultAssign=*/true); 1689 1690 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1691 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1692 ProcBind = ProcBindClause->getProcBindKind(); 1693 1694 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1695 1696 // The cleanup callback that finalizes all variabels at the given location, 1697 // thus calls destructors etc. 1698 auto FiniCB = [this](InsertPointTy IP) { 1699 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1700 }; 1701 1702 // Privatization callback that performs appropriate action for 1703 // shared/private/firstprivate/lastprivate/copyin/... variables. 1704 // 1705 // TODO: This defaults to shared right now. 1706 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1707 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 1708 // The next line is appropriate only for variables (Val) with the 1709 // data-sharing attribute "shared". 1710 ReplVal = &Val; 1711 1712 return CodeGenIP; 1713 }; 1714 1715 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1716 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1717 1718 auto BodyGenCB = [ParallelRegionBodyStmt, 1719 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1720 llvm::BasicBlock &ContinuationBB) { 1721 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1722 ContinuationBB); 1723 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1724 CodeGenIP, ContinuationBB); 1725 }; 1726 1727 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1728 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1729 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 1730 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 1731 Builder.restoreIP( 1732 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB, 1733 IfCond, NumThreads, ProcBind, S.hasCancel())); 1734 return; 1735 } 1736 1737 // Emit parallel region as a standalone region. 1738 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1739 Action.Enter(CGF); 1740 OMPPrivateScope PrivateScope(CGF); 1741 bool Copyins = CGF.EmitOMPCopyinClause(S); 1742 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1743 if (Copyins) { 1744 // Emit implicit barrier to synchronize threads and avoid data races on 1745 // propagation master's thread values of threadprivate variables to local 1746 // instances of that variables of all other implicit threads. 1747 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1748 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1749 /*ForceSimpleCall=*/true); 1750 } 1751 CGF.EmitOMPPrivateClause(S, PrivateScope); 1752 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1753 (void)PrivateScope.Privatize(); 1754 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1755 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1756 }; 1757 { 1758 auto LPCRegion = 1759 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1760 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1761 emitEmptyBoundParameters); 1762 emitPostUpdateForReductionClause(*this, S, 1763 [](CodeGenFunction &) { return nullptr; }); 1764 } 1765 // Check for outer lastprivate conditional update. 1766 checkForLastprivateConditionalUpdate(*this, S); 1767 } 1768 1769 void CodeGenFunction::EmitOMPMetaDirective(const OMPMetaDirective &S) { 1770 EmitStmt(S.getIfStmt()); 1771 } 1772 1773 namespace { 1774 /// RAII to handle scopes for loop transformation directives. 1775 class OMPTransformDirectiveScopeRAII { 1776 OMPLoopScope *Scope = nullptr; 1777 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr; 1778 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr; 1779 1780 public: 1781 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) { 1782 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) { 1783 Scope = new OMPLoopScope(CGF, *Dir); 1784 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); 1785 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); 1786 } 1787 } 1788 ~OMPTransformDirectiveScopeRAII() { 1789 if (!Scope) 1790 return; 1791 delete CapInfoRAII; 1792 delete CGSI; 1793 delete Scope; 1794 } 1795 }; 1796 } // namespace 1797 1798 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1799 int MaxLevel, int Level = 0) { 1800 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1801 const Stmt *SimplifiedS = S->IgnoreContainers(); 1802 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1803 PrettyStackTraceLoc CrashInfo( 1804 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1805 "LLVM IR generation of compound statement ('{}')"); 1806 1807 // Keep track of the current cleanup stack depth, including debug scopes. 1808 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1809 for (const Stmt *CurStmt : CS->body()) 1810 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1811 return; 1812 } 1813 if (SimplifiedS == NextLoop) { 1814 if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS)) 1815 SimplifiedS = Dir->getTransformedStmt(); 1816 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS)) 1817 SimplifiedS = CanonLoop->getLoopStmt(); 1818 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1819 S = For->getBody(); 1820 } else { 1821 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1822 "Expected canonical for loop or range-based for loop."); 1823 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1824 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1825 S = CXXFor->getBody(); 1826 } 1827 if (Level + 1 < MaxLevel) { 1828 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1829 S, /*TryImperfectlyNestedLoops=*/true); 1830 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1831 return; 1832 } 1833 } 1834 CGF.EmitStmt(S); 1835 } 1836 1837 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1838 JumpDest LoopExit) { 1839 RunCleanupsScope BodyScope(*this); 1840 // Update counters values on current iteration. 1841 for (const Expr *UE : D.updates()) 1842 EmitIgnoredExpr(UE); 1843 // Update the linear variables. 1844 // In distribute directives only loop counters may be marked as linear, no 1845 // need to generate the code for them. 1846 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1847 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1848 for (const Expr *UE : C->updates()) 1849 EmitIgnoredExpr(UE); 1850 } 1851 } 1852 1853 // On a continue in the body, jump to the end. 1854 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1855 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1856 for (const Expr *E : D.finals_conditions()) { 1857 if (!E) 1858 continue; 1859 // Check that loop counter in non-rectangular nest fits into the iteration 1860 // space. 1861 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1862 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1863 getProfileCount(D.getBody())); 1864 EmitBlock(NextBB); 1865 } 1866 1867 OMPPrivateScope InscanScope(*this); 1868 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1869 bool IsInscanRegion = InscanScope.Privatize(); 1870 if (IsInscanRegion) { 1871 // Need to remember the block before and after scan directive 1872 // to dispatch them correctly depending on the clause used in 1873 // this directive, inclusive or exclusive. For inclusive scan the natural 1874 // order of the blocks is used, for exclusive clause the blocks must be 1875 // executed in reverse order. 1876 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1877 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1878 // No need to allocate inscan exit block, in simd mode it is selected in the 1879 // codegen for the scan directive. 1880 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1881 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1882 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1883 EmitBranch(OMPScanDispatch); 1884 EmitBlock(OMPBeforeScanBlock); 1885 } 1886 1887 // Emit loop variables for C++ range loops. 1888 const Stmt *Body = 1889 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1890 // Emit loop body. 1891 emitBody(*this, Body, 1892 OMPLoopBasedDirective::tryToFindNextInnerLoop( 1893 Body, /*TryImperfectlyNestedLoops=*/true), 1894 D.getLoopsNumber()); 1895 1896 // Jump to the dispatcher at the end of the loop body. 1897 if (IsInscanRegion) 1898 EmitBranch(OMPScanExitBlock); 1899 1900 // The end (updates/cleanups). 1901 EmitBlock(Continue.getBlock()); 1902 BreakContinueStack.pop_back(); 1903 } 1904 1905 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>; 1906 1907 /// Emit a captured statement and return the function as well as its captured 1908 /// closure context. 1909 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, 1910 const CapturedStmt *S) { 1911 LValue CapStruct = ParentCGF.InitCapturedStruct(*S); 1912 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true); 1913 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI = 1914 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S); 1915 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get()); 1916 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S); 1917 1918 return {F, CapStruct.getPointer(ParentCGF)}; 1919 } 1920 1921 /// Emit a call to a previously captured closure. 1922 static llvm::CallInst * 1923 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, 1924 llvm::ArrayRef<llvm::Value *> Args) { 1925 // Append the closure context to the argument. 1926 SmallVector<llvm::Value *> EffectiveArgs; 1927 EffectiveArgs.reserve(Args.size() + 1); 1928 llvm::append_range(EffectiveArgs, Args); 1929 EffectiveArgs.push_back(Cap.second); 1930 1931 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs); 1932 } 1933 1934 llvm::CanonicalLoopInfo * 1935 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) { 1936 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented"); 1937 1938 // The caller is processing the loop-associated directive processing the \p 1939 // Depth loops nested in \p S. Put the previous pending loop-associated 1940 // directive to the stack. If the current loop-associated directive is a loop 1941 // transformation directive, it will push its generated loops onto the stack 1942 // such that together with the loops left here they form the combined loop 1943 // nest for the parent loop-associated directive. 1944 int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth; 1945 ExpectedOMPLoopDepth = Depth; 1946 1947 EmitStmt(S); 1948 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops"); 1949 1950 // The last added loop is the outermost one. 1951 llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back(); 1952 1953 // Pop the \p Depth loops requested by the call from that stack and restore 1954 // the previous context. 1955 OMPLoopNestStack.pop_back_n(Depth); 1956 ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth; 1957 1958 return Result; 1959 } 1960 1961 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { 1962 const Stmt *SyntacticalLoop = S->getLoopStmt(); 1963 if (!getLangOpts().OpenMPIRBuilder) { 1964 // Ignore if OpenMPIRBuilder is not enabled. 1965 EmitStmt(SyntacticalLoop); 1966 return; 1967 } 1968 1969 LexicalScope ForScope(*this, S->getSourceRange()); 1970 1971 // Emit init statements. The Distance/LoopVar funcs may reference variable 1972 // declarations they contain. 1973 const Stmt *BodyStmt; 1974 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) { 1975 if (const Stmt *InitStmt = For->getInit()) 1976 EmitStmt(InitStmt); 1977 BodyStmt = For->getBody(); 1978 } else if (const auto *RangeFor = 1979 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) { 1980 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt()) 1981 EmitStmt(RangeStmt); 1982 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt()) 1983 EmitStmt(BeginStmt); 1984 if (const DeclStmt *EndStmt = RangeFor->getEndStmt()) 1985 EmitStmt(EndStmt); 1986 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt()) 1987 EmitStmt(LoopVarStmt); 1988 BodyStmt = RangeFor->getBody(); 1989 } else 1990 llvm_unreachable("Expected for-stmt or range-based for-stmt"); 1991 1992 // Emit closure for later use. By-value captures will be captured here. 1993 const CapturedStmt *DistanceFunc = S->getDistanceFunc(); 1994 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc); 1995 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc(); 1996 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc); 1997 1998 // Call the distance function to get the number of iterations of the loop to 1999 // come. 2000 QualType LogicalTy = DistanceFunc->getCapturedDecl() 2001 ->getParam(0) 2002 ->getType() 2003 .getNonReferenceType(); 2004 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); 2005 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); 2006 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); 2007 2008 // Emit the loop structure. 2009 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 2010 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, 2011 llvm::Value *IndVar) { 2012 Builder.restoreIP(CodeGenIP); 2013 2014 // Emit the loop body: Convert the logical iteration number to the loop 2015 // variable and emit the body. 2016 const DeclRefExpr *LoopVarRef = S->getLoopVarRef(); 2017 LValue LCVal = EmitLValue(LoopVarRef); 2018 Address LoopVarAddress = LCVal.getAddress(*this); 2019 emitCapturedStmtCall(*this, LoopVarClosure, 2020 {LoopVarAddress.getPointer(), IndVar}); 2021 2022 RunCleanupsScope BodyScope(*this); 2023 EmitStmt(BodyStmt); 2024 }; 2025 llvm::CanonicalLoopInfo *CL = 2026 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal); 2027 2028 // Finish up the loop. 2029 Builder.restoreIP(CL->getAfterIP()); 2030 ForScope.ForceCleanup(); 2031 2032 // Remember the CanonicalLoopInfo for parent AST nodes consuming it. 2033 OMPLoopNestStack.push_back(CL); 2034 } 2035 2036 void CodeGenFunction::EmitOMPInnerLoop( 2037 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 2038 const Expr *IncExpr, 2039 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 2040 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 2041 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 2042 2043 // Start the loop with a block that tests the condition. 2044 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 2045 EmitBlock(CondBlock); 2046 const SourceRange R = S.getSourceRange(); 2047 2048 // If attributes are attached, push to the basic block with them. 2049 const auto &OMPED = cast<OMPExecutableDirective>(S); 2050 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 2051 const Stmt *SS = ICS->getCapturedStmt(); 2052 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 2053 OMPLoopNestStack.clear(); 2054 if (AS) 2055 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 2056 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 2057 SourceLocToDebugLoc(R.getEnd())); 2058 else 2059 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2060 SourceLocToDebugLoc(R.getEnd())); 2061 2062 // If there are any cleanups between here and the loop-exit scope, 2063 // create a block to stage a loop exit along. 2064 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2065 if (RequiresCleanup) 2066 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 2067 2068 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 2069 2070 // Emit condition. 2071 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 2072 if (ExitBlock != LoopExit.getBlock()) { 2073 EmitBlock(ExitBlock); 2074 EmitBranchThroughCleanup(LoopExit); 2075 } 2076 2077 EmitBlock(LoopBody); 2078 incrementProfileCounter(&S); 2079 2080 // Create a block for the increment. 2081 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 2082 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2083 2084 BodyGen(*this); 2085 2086 // Emit "IV = IV + 1" and a back-edge to the condition block. 2087 EmitBlock(Continue.getBlock()); 2088 EmitIgnoredExpr(IncExpr); 2089 PostIncGen(*this); 2090 BreakContinueStack.pop_back(); 2091 EmitBranch(CondBlock); 2092 LoopStack.pop(); 2093 // Emit the fall-through block. 2094 EmitBlock(LoopExit.getBlock()); 2095 } 2096 2097 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 2098 if (!HaveInsertPoint()) 2099 return false; 2100 // Emit inits for the linear variables. 2101 bool HasLinears = false; 2102 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2103 for (const Expr *Init : C->inits()) { 2104 HasLinears = true; 2105 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 2106 if (const auto *Ref = 2107 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 2108 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 2109 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 2110 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2111 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2112 VD->getInit()->getType(), VK_LValue, 2113 VD->getInit()->getExprLoc()); 2114 EmitExprAsInit( 2115 &DRE, VD, 2116 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()), 2117 /*capturedByInit=*/false); 2118 EmitAutoVarCleanups(Emission); 2119 } else { 2120 EmitVarDecl(*VD); 2121 } 2122 } 2123 // Emit the linear steps for the linear clauses. 2124 // If a step is not constant, it is pre-calculated before the loop. 2125 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 2126 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 2127 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 2128 // Emit calculation of the linear step. 2129 EmitIgnoredExpr(CS); 2130 } 2131 } 2132 return HasLinears; 2133 } 2134 2135 void CodeGenFunction::EmitOMPLinearClauseFinal( 2136 const OMPLoopDirective &D, 2137 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2138 if (!HaveInsertPoint()) 2139 return; 2140 llvm::BasicBlock *DoneBB = nullptr; 2141 // Emit the final values of the linear variables. 2142 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2143 auto IC = C->varlist_begin(); 2144 for (const Expr *F : C->finals()) { 2145 if (!DoneBB) { 2146 if (llvm::Value *Cond = CondGen(*this)) { 2147 // If the first post-update expression is found, emit conditional 2148 // block if it was requested. 2149 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 2150 DoneBB = createBasicBlock(".omp.linear.pu.done"); 2151 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2152 EmitBlock(ThenBB); 2153 } 2154 } 2155 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 2156 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2157 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2158 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 2159 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 2160 CodeGenFunction::OMPPrivateScope VarScope(*this); 2161 VarScope.addPrivate(OrigVD, OrigAddr); 2162 (void)VarScope.Privatize(); 2163 EmitIgnoredExpr(F); 2164 ++IC; 2165 } 2166 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 2167 EmitIgnoredExpr(PostUpdate); 2168 } 2169 if (DoneBB) 2170 EmitBlock(DoneBB, /*IsFinished=*/true); 2171 } 2172 2173 static void emitAlignedClause(CodeGenFunction &CGF, 2174 const OMPExecutableDirective &D) { 2175 if (!CGF.HaveInsertPoint()) 2176 return; 2177 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 2178 llvm::APInt ClauseAlignment(64, 0); 2179 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 2180 auto *AlignmentCI = 2181 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 2182 ClauseAlignment = AlignmentCI->getValue(); 2183 } 2184 for (const Expr *E : Clause->varlists()) { 2185 llvm::APInt Alignment(ClauseAlignment); 2186 if (Alignment == 0) { 2187 // OpenMP [2.8.1, Description] 2188 // If no optional parameter is specified, implementation-defined default 2189 // alignments for SIMD instructions on the target platforms are assumed. 2190 Alignment = 2191 CGF.getContext() 2192 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2193 E->getType()->getPointeeType())) 2194 .getQuantity(); 2195 } 2196 assert((Alignment == 0 || Alignment.isPowerOf2()) && 2197 "alignment is not power of 2"); 2198 if (Alignment != 0) { 2199 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 2200 CGF.emitAlignmentAssumption( 2201 PtrValue, E, /*No second loc needed*/ SourceLocation(), 2202 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 2203 } 2204 } 2205 } 2206 } 2207 2208 void CodeGenFunction::EmitOMPPrivateLoopCounters( 2209 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 2210 if (!HaveInsertPoint()) 2211 return; 2212 auto I = S.private_counters().begin(); 2213 for (const Expr *E : S.counters()) { 2214 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2215 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 2216 // Emit var without initialization. 2217 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 2218 EmitAutoVarCleanups(VarEmission); 2219 LocalDeclMap.erase(PrivateVD); 2220 (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress()); 2221 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 2222 VD->hasGlobalStorage()) { 2223 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 2224 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 2225 E->getType(), VK_LValue, E->getExprLoc()); 2226 (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this)); 2227 } else { 2228 (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress()); 2229 } 2230 ++I; 2231 } 2232 // Privatize extra loop counters used in loops for ordered(n) clauses. 2233 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 2234 if (!C->getNumForLoops()) 2235 continue; 2236 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size(); 2237 I < E; ++I) { 2238 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 2239 const auto *VD = cast<VarDecl>(DRE->getDecl()); 2240 // Override only those variables that can be captured to avoid re-emission 2241 // of the variables declared within the loops. 2242 if (DRE->refersToEnclosingVariableOrCapture()) { 2243 (void)LoopScope.addPrivate( 2244 VD, CreateMemTemp(DRE->getType(), VD->getName())); 2245 } 2246 } 2247 } 2248 } 2249 2250 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 2251 const Expr *Cond, llvm::BasicBlock *TrueBlock, 2252 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 2253 if (!CGF.HaveInsertPoint()) 2254 return; 2255 { 2256 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 2257 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 2258 (void)PreCondScope.Privatize(); 2259 // Get initial values of real counters. 2260 for (const Expr *I : S.inits()) { 2261 CGF.EmitIgnoredExpr(I); 2262 } 2263 } 2264 // Create temp loop control variables with their init values to support 2265 // non-rectangular loops. 2266 CodeGenFunction::OMPMapVars PreCondVars; 2267 for (const Expr *E : S.dependent_counters()) { 2268 if (!E) 2269 continue; 2270 assert(!E->getType().getNonReferenceType()->isRecordType() && 2271 "dependent counter must not be an iterator."); 2272 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2273 Address CounterAddr = 2274 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2275 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2276 } 2277 (void)PreCondVars.apply(CGF); 2278 for (const Expr *E : S.dependent_inits()) { 2279 if (!E) 2280 continue; 2281 CGF.EmitIgnoredExpr(E); 2282 } 2283 // Check that loop is executed at least one time. 2284 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2285 PreCondVars.restore(CGF); 2286 } 2287 2288 void CodeGenFunction::EmitOMPLinearClause( 2289 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2290 if (!HaveInsertPoint()) 2291 return; 2292 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2293 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2294 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2295 for (const Expr *C : LoopDirective->counters()) { 2296 SIMDLCVs.insert( 2297 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2298 } 2299 } 2300 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2301 auto CurPrivate = C->privates().begin(); 2302 for (const Expr *E : C->varlists()) { 2303 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2304 const auto *PrivateVD = 2305 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2306 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2307 // Emit private VarDecl with copy init. 2308 EmitVarDecl(*PrivateVD); 2309 bool IsRegistered = 2310 PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD)); 2311 assert(IsRegistered && "linear var already registered as private"); 2312 // Silence the warning about unused variable. 2313 (void)IsRegistered; 2314 } else { 2315 EmitVarDecl(*PrivateVD); 2316 } 2317 ++CurPrivate; 2318 } 2319 } 2320 } 2321 2322 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2323 const OMPExecutableDirective &D) { 2324 if (!CGF.HaveInsertPoint()) 2325 return; 2326 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2327 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2328 /*ignoreResult=*/true); 2329 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2330 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2331 // In presence of finite 'safelen', it may be unsafe to mark all 2332 // the memory instructions parallel, because loop-carried 2333 // dependences of 'safelen' iterations are possible. 2334 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2335 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2336 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2337 /*ignoreResult=*/true); 2338 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2339 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2340 // In presence of finite 'safelen', it may be unsafe to mark all 2341 // the memory instructions parallel, because loop-carried 2342 // dependences of 'safelen' iterations are possible. 2343 CGF.LoopStack.setParallel(/*Enable=*/false); 2344 } 2345 } 2346 2347 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) { 2348 // Walk clauses and process safelen/lastprivate. 2349 LoopStack.setParallel(/*Enable=*/true); 2350 LoopStack.setVectorizeEnable(); 2351 emitSimdlenSafelenClause(*this, D); 2352 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2353 if (C->getKind() == OMPC_ORDER_concurrent) 2354 LoopStack.setParallel(/*Enable=*/true); 2355 if ((D.getDirectiveKind() == OMPD_simd || 2356 (getLangOpts().OpenMPSimd && 2357 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2358 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2359 [](const OMPReductionClause *C) { 2360 return C->getModifier() == OMPC_REDUCTION_inscan; 2361 })) 2362 // Disable parallel access in case of prefix sum. 2363 LoopStack.setParallel(/*Enable=*/false); 2364 } 2365 2366 void CodeGenFunction::EmitOMPSimdFinal( 2367 const OMPLoopDirective &D, 2368 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2369 if (!HaveInsertPoint()) 2370 return; 2371 llvm::BasicBlock *DoneBB = nullptr; 2372 auto IC = D.counters().begin(); 2373 auto IPC = D.private_counters().begin(); 2374 for (const Expr *F : D.finals()) { 2375 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2376 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2377 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2378 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2379 OrigVD->hasGlobalStorage() || CED) { 2380 if (!DoneBB) { 2381 if (llvm::Value *Cond = CondGen(*this)) { 2382 // If the first post-update expression is found, emit conditional 2383 // block if it was requested. 2384 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2385 DoneBB = createBasicBlock(".omp.final.done"); 2386 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2387 EmitBlock(ThenBB); 2388 } 2389 } 2390 Address OrigAddr = Address::invalid(); 2391 if (CED) { 2392 OrigAddr = 2393 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2394 } else { 2395 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2396 /*RefersToEnclosingVariableOrCapture=*/false, 2397 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2398 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2399 } 2400 OMPPrivateScope VarScope(*this); 2401 VarScope.addPrivate(OrigVD, OrigAddr); 2402 (void)VarScope.Privatize(); 2403 EmitIgnoredExpr(F); 2404 } 2405 ++IC; 2406 ++IPC; 2407 } 2408 if (DoneBB) 2409 EmitBlock(DoneBB, /*IsFinished=*/true); 2410 } 2411 2412 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2413 const OMPLoopDirective &S, 2414 CodeGenFunction::JumpDest LoopExit) { 2415 CGF.EmitOMPLoopBody(S, LoopExit); 2416 CGF.EmitStopPoint(&S); 2417 } 2418 2419 /// Emit a helper variable and return corresponding lvalue. 2420 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2421 const DeclRefExpr *Helper) { 2422 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2423 CGF.EmitVarDecl(*VDecl); 2424 return CGF.EmitLValue(Helper); 2425 } 2426 2427 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2428 const RegionCodeGenTy &SimdInitGen, 2429 const RegionCodeGenTy &BodyCodeGen) { 2430 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2431 PrePostActionTy &) { 2432 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2433 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2434 SimdInitGen(CGF); 2435 2436 BodyCodeGen(CGF); 2437 }; 2438 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2439 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2440 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2441 2442 BodyCodeGen(CGF); 2443 }; 2444 const Expr *IfCond = nullptr; 2445 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2446 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2447 if (CGF.getLangOpts().OpenMP >= 50 && 2448 (C->getNameModifier() == OMPD_unknown || 2449 C->getNameModifier() == OMPD_simd)) { 2450 IfCond = C->getCondition(); 2451 break; 2452 } 2453 } 2454 } 2455 if (IfCond) { 2456 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2457 } else { 2458 RegionCodeGenTy ThenRCG(ThenGen); 2459 ThenRCG(CGF); 2460 } 2461 } 2462 2463 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2464 PrePostActionTy &Action) { 2465 Action.Enter(CGF); 2466 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2467 "Expected simd directive"); 2468 OMPLoopScope PreInitScope(CGF, S); 2469 // if (PreCond) { 2470 // for (IV in 0..LastIteration) BODY; 2471 // <Final counter/linear vars updates>; 2472 // } 2473 // 2474 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2475 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2476 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2477 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2478 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2479 } 2480 2481 // Emit: if (PreCond) - begin. 2482 // If the condition constant folds and can be elided, avoid emitting the 2483 // whole loop. 2484 bool CondConstant; 2485 llvm::BasicBlock *ContBlock = nullptr; 2486 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2487 if (!CondConstant) 2488 return; 2489 } else { 2490 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2491 ContBlock = CGF.createBasicBlock("simd.if.end"); 2492 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2493 CGF.getProfileCount(&S)); 2494 CGF.EmitBlock(ThenBlock); 2495 CGF.incrementProfileCounter(&S); 2496 } 2497 2498 // Emit the loop iteration variable. 2499 const Expr *IVExpr = S.getIterationVariable(); 2500 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2501 CGF.EmitVarDecl(*IVDecl); 2502 CGF.EmitIgnoredExpr(S.getInit()); 2503 2504 // Emit the iterations count variable. 2505 // If it is not a variable, Sema decided to calculate iterations count on 2506 // each iteration (e.g., it is foldable into a constant). 2507 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2508 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2509 // Emit calculation of the iterations count. 2510 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2511 } 2512 2513 emitAlignedClause(CGF, S); 2514 (void)CGF.EmitOMPLinearClauseInit(S); 2515 { 2516 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2517 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2518 CGF.EmitOMPLinearClause(S, LoopScope); 2519 CGF.EmitOMPPrivateClause(S, LoopScope); 2520 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2521 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2522 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2523 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2524 (void)LoopScope.Privatize(); 2525 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2526 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2527 2528 emitCommonSimdLoop( 2529 CGF, S, 2530 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2531 CGF.EmitOMPSimdInit(S); 2532 }, 2533 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2534 CGF.EmitOMPInnerLoop( 2535 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2536 [&S](CodeGenFunction &CGF) { 2537 emitOMPLoopBodyWithStopPoint(CGF, S, 2538 CodeGenFunction::JumpDest()); 2539 }, 2540 [](CodeGenFunction &) {}); 2541 }); 2542 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2543 // Emit final copy of the lastprivate variables at the end of loops. 2544 if (HasLastprivateClause) 2545 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2546 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2547 emitPostUpdateForReductionClause(CGF, S, 2548 [](CodeGenFunction &) { return nullptr; }); 2549 } 2550 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2551 // Emit: if (PreCond) - end. 2552 if (ContBlock) { 2553 CGF.EmitBranch(ContBlock); 2554 CGF.EmitBlock(ContBlock, true); 2555 } 2556 } 2557 2558 static bool isSupportedByOpenMPIRBuilder(const OMPExecutableDirective &S) { 2559 // Check for unsupported clauses 2560 if (!S.clauses().empty()) { 2561 // Currently no clause is supported 2562 return false; 2563 } 2564 2565 // Check if we have a statement with the ordered directive. 2566 // Visit the statement hierarchy to find a compound statement 2567 // with a ordered directive in it. 2568 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) { 2569 if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) { 2570 for (const Stmt *SubStmt : SyntacticalLoop->children()) { 2571 if (!SubStmt) 2572 continue; 2573 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) { 2574 for (const Stmt *CSSubStmt : CS->children()) { 2575 if (!CSSubStmt) 2576 continue; 2577 if (isa<OMPOrderedDirective>(CSSubStmt)) { 2578 return false; 2579 } 2580 } 2581 } 2582 } 2583 } 2584 } 2585 return true; 2586 } 2587 2588 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2589 bool UseOMPIRBuilder = 2590 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 2591 if (UseOMPIRBuilder) { 2592 auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF, 2593 PrePostActionTy &) { 2594 // Use the OpenMPIRBuilder if enabled. 2595 if (UseOMPIRBuilder) { 2596 // Emit the associated statement and get its loop representation. 2597 llvm::DebugLoc DL = SourceLocToDebugLoc(S.getBeginLoc()); 2598 const Stmt *Inner = S.getRawStmt(); 2599 llvm::CanonicalLoopInfo *CLI = 2600 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 2601 2602 llvm::OpenMPIRBuilder &OMPBuilder = 2603 CGM.getOpenMPRuntime().getOMPBuilder(); 2604 // Add SIMD specific metadata 2605 OMPBuilder.applySimd(DL, CLI); 2606 return; 2607 } 2608 }; 2609 { 2610 auto LPCRegion = 2611 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2612 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2613 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, 2614 CodeGenIRBuilder); 2615 } 2616 return; 2617 } 2618 2619 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2620 OMPFirstScanLoop = true; 2621 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2622 emitOMPSimdRegion(CGF, S, Action); 2623 }; 2624 { 2625 auto LPCRegion = 2626 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2627 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2628 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2629 } 2630 // Check for outer lastprivate conditional update. 2631 checkForLastprivateConditionalUpdate(*this, S); 2632 } 2633 2634 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) { 2635 // Emit the de-sugared statement. 2636 OMPTransformDirectiveScopeRAII TileScope(*this, &S); 2637 EmitStmt(S.getTransformedStmt()); 2638 } 2639 2640 void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) { 2641 bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder; 2642 2643 if (UseOMPIRBuilder) { 2644 auto DL = SourceLocToDebugLoc(S.getBeginLoc()); 2645 const Stmt *Inner = S.getRawStmt(); 2646 2647 // Consume nested loop. Clear the entire remaining loop stack because a 2648 // fully unrolled loop is non-transformable. For partial unrolling the 2649 // generated outer loop is pushed back to the stack. 2650 llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 2651 OMPLoopNestStack.clear(); 2652 2653 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 2654 2655 bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1; 2656 llvm::CanonicalLoopInfo *UnrolledCLI = nullptr; 2657 2658 if (S.hasClausesOfKind<OMPFullClause>()) { 2659 assert(ExpectedOMPLoopDepth == 0); 2660 OMPBuilder.unrollLoopFull(DL, CLI); 2661 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) { 2662 uint64_t Factor = 0; 2663 if (Expr *FactorExpr = PartialClause->getFactor()) { 2664 Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue(); 2665 assert(Factor >= 1 && "Only positive factors are valid"); 2666 } 2667 OMPBuilder.unrollLoopPartial(DL, CLI, Factor, 2668 NeedsUnrolledCLI ? &UnrolledCLI : nullptr); 2669 } else { 2670 OMPBuilder.unrollLoopHeuristic(DL, CLI); 2671 } 2672 2673 assert((!NeedsUnrolledCLI || UnrolledCLI) && 2674 "NeedsUnrolledCLI implies UnrolledCLI to be set"); 2675 if (UnrolledCLI) 2676 OMPLoopNestStack.push_back(UnrolledCLI); 2677 2678 return; 2679 } 2680 2681 // This function is only called if the unrolled loop is not consumed by any 2682 // other loop-associated construct. Such a loop-associated construct will have 2683 // used the transformed AST. 2684 2685 // Set the unroll metadata for the next emitted loop. 2686 LoopStack.setUnrollState(LoopAttributes::Enable); 2687 2688 if (S.hasClausesOfKind<OMPFullClause>()) { 2689 LoopStack.setUnrollState(LoopAttributes::Full); 2690 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) { 2691 if (Expr *FactorExpr = PartialClause->getFactor()) { 2692 uint64_t Factor = 2693 FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue(); 2694 assert(Factor >= 1 && "Only positive factors are valid"); 2695 LoopStack.setUnrollCount(Factor); 2696 } 2697 } 2698 2699 EmitStmt(S.getAssociatedStmt()); 2700 } 2701 2702 void CodeGenFunction::EmitOMPOuterLoop( 2703 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2704 CodeGenFunction::OMPPrivateScope &LoopScope, 2705 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2706 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2707 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2708 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2709 2710 const Expr *IVExpr = S.getIterationVariable(); 2711 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2712 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2713 2714 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2715 2716 // Start the loop with a block that tests the condition. 2717 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2718 EmitBlock(CondBlock); 2719 const SourceRange R = S.getSourceRange(); 2720 OMPLoopNestStack.clear(); 2721 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2722 SourceLocToDebugLoc(R.getEnd())); 2723 2724 llvm::Value *BoolCondVal = nullptr; 2725 if (!DynamicOrOrdered) { 2726 // UB = min(UB, GlobalUB) or 2727 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2728 // 'distribute parallel for') 2729 EmitIgnoredExpr(LoopArgs.EUB); 2730 // IV = LB 2731 EmitIgnoredExpr(LoopArgs.Init); 2732 // IV < UB 2733 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2734 } else { 2735 BoolCondVal = 2736 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2737 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2738 } 2739 2740 // If there are any cleanups between here and the loop-exit scope, 2741 // create a block to stage a loop exit along. 2742 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2743 if (LoopScope.requiresCleanups()) 2744 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2745 2746 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2747 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2748 if (ExitBlock != LoopExit.getBlock()) { 2749 EmitBlock(ExitBlock); 2750 EmitBranchThroughCleanup(LoopExit); 2751 } 2752 EmitBlock(LoopBody); 2753 2754 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2755 // LB for loop condition and emitted it above). 2756 if (DynamicOrOrdered) 2757 EmitIgnoredExpr(LoopArgs.Init); 2758 2759 // Create a block for the increment. 2760 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2761 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2762 2763 emitCommonSimdLoop( 2764 *this, S, 2765 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2766 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2767 // with dynamic/guided scheduling and without ordered clause. 2768 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2769 CGF.LoopStack.setParallel(!IsMonotonic); 2770 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2771 if (C->getKind() == OMPC_ORDER_concurrent) 2772 CGF.LoopStack.setParallel(/*Enable=*/true); 2773 } else { 2774 CGF.EmitOMPSimdInit(S); 2775 } 2776 }, 2777 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2778 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2779 SourceLocation Loc = S.getBeginLoc(); 2780 // when 'distribute' is not combined with a 'for': 2781 // while (idx <= UB) { BODY; ++idx; } 2782 // when 'distribute' is combined with a 'for' 2783 // (e.g. 'distribute parallel for') 2784 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2785 CGF.EmitOMPInnerLoop( 2786 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2787 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2788 CodeGenLoop(CGF, S, LoopExit); 2789 }, 2790 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2791 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2792 }); 2793 }); 2794 2795 EmitBlock(Continue.getBlock()); 2796 BreakContinueStack.pop_back(); 2797 if (!DynamicOrOrdered) { 2798 // Emit "LB = LB + Stride", "UB = UB + Stride". 2799 EmitIgnoredExpr(LoopArgs.NextLB); 2800 EmitIgnoredExpr(LoopArgs.NextUB); 2801 } 2802 2803 EmitBranch(CondBlock); 2804 OMPLoopNestStack.clear(); 2805 LoopStack.pop(); 2806 // Emit the fall-through block. 2807 EmitBlock(LoopExit.getBlock()); 2808 2809 // Tell the runtime we are done. 2810 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2811 if (!DynamicOrOrdered) 2812 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2813 S.getDirectiveKind()); 2814 }; 2815 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2816 } 2817 2818 void CodeGenFunction::EmitOMPForOuterLoop( 2819 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2820 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2821 const OMPLoopArguments &LoopArgs, 2822 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2823 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2824 2825 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2826 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule); 2827 2828 assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule, 2829 LoopArgs.Chunk != nullptr)) && 2830 "static non-chunked schedule does not need outer loop"); 2831 2832 // Emit outer loop. 2833 // 2834 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2835 // When schedule(dynamic,chunk_size) is specified, the iterations are 2836 // distributed to threads in the team in chunks as the threads request them. 2837 // Each thread executes a chunk of iterations, then requests another chunk, 2838 // until no chunks remain to be distributed. Each chunk contains chunk_size 2839 // iterations, except for the last chunk to be distributed, which may have 2840 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2841 // 2842 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2843 // to threads in the team in chunks as the executing threads request them. 2844 // Each thread executes a chunk of iterations, then requests another chunk, 2845 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2846 // each chunk is proportional to the number of unassigned iterations divided 2847 // by the number of threads in the team, decreasing to 1. For a chunk_size 2848 // with value k (greater than 1), the size of each chunk is determined in the 2849 // same way, with the restriction that the chunks do not contain fewer than k 2850 // iterations (except for the last chunk to be assigned, which may have fewer 2851 // than k iterations). 2852 // 2853 // When schedule(auto) is specified, the decision regarding scheduling is 2854 // delegated to the compiler and/or runtime system. The programmer gives the 2855 // implementation the freedom to choose any possible mapping of iterations to 2856 // threads in the team. 2857 // 2858 // When schedule(runtime) is specified, the decision regarding scheduling is 2859 // deferred until run time, and the schedule and chunk size are taken from the 2860 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2861 // implementation defined 2862 // 2863 // while(__kmpc_dispatch_next(&LB, &UB)) { 2864 // idx = LB; 2865 // while (idx <= UB) { BODY; ++idx; 2866 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2867 // } // inner loop 2868 // } 2869 // 2870 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2871 // When schedule(static, chunk_size) is specified, iterations are divided into 2872 // chunks of size chunk_size, and the chunks are assigned to the threads in 2873 // the team in a round-robin fashion in the order of the thread number. 2874 // 2875 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2876 // while (idx <= UB) { BODY; ++idx; } // inner loop 2877 // LB = LB + ST; 2878 // UB = UB + ST; 2879 // } 2880 // 2881 2882 const Expr *IVExpr = S.getIterationVariable(); 2883 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2884 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2885 2886 if (DynamicOrOrdered) { 2887 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2888 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2889 llvm::Value *LBVal = DispatchBounds.first; 2890 llvm::Value *UBVal = DispatchBounds.second; 2891 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2892 LoopArgs.Chunk}; 2893 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2894 IVSigned, Ordered, DipatchRTInputValues); 2895 } else { 2896 CGOpenMPRuntime::StaticRTInput StaticInit( 2897 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2898 LoopArgs.ST, LoopArgs.Chunk); 2899 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2900 ScheduleKind, StaticInit); 2901 } 2902 2903 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2904 const unsigned IVSize, 2905 const bool IVSigned) { 2906 if (Ordered) { 2907 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2908 IVSigned); 2909 } 2910 }; 2911 2912 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2913 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2914 OuterLoopArgs.IncExpr = S.getInc(); 2915 OuterLoopArgs.Init = S.getInit(); 2916 OuterLoopArgs.Cond = S.getCond(); 2917 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2918 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2919 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2920 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2921 } 2922 2923 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2924 const unsigned IVSize, const bool IVSigned) {} 2925 2926 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2927 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2928 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2929 const CodeGenLoopTy &CodeGenLoopContent) { 2930 2931 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2932 2933 // Emit outer loop. 2934 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2935 // dynamic 2936 // 2937 2938 const Expr *IVExpr = S.getIterationVariable(); 2939 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2940 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2941 2942 CGOpenMPRuntime::StaticRTInput StaticInit( 2943 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2944 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2945 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2946 2947 // for combined 'distribute' and 'for' the increment expression of distribute 2948 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2949 Expr *IncExpr; 2950 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2951 IncExpr = S.getDistInc(); 2952 else 2953 IncExpr = S.getInc(); 2954 2955 // this routine is shared by 'omp distribute parallel for' and 2956 // 'omp distribute': select the right EUB expression depending on the 2957 // directive 2958 OMPLoopArguments OuterLoopArgs; 2959 OuterLoopArgs.LB = LoopArgs.LB; 2960 OuterLoopArgs.UB = LoopArgs.UB; 2961 OuterLoopArgs.ST = LoopArgs.ST; 2962 OuterLoopArgs.IL = LoopArgs.IL; 2963 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2964 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2965 ? S.getCombinedEnsureUpperBound() 2966 : S.getEnsureUpperBound(); 2967 OuterLoopArgs.IncExpr = IncExpr; 2968 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2969 ? S.getCombinedInit() 2970 : S.getInit(); 2971 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2972 ? S.getCombinedCond() 2973 : S.getCond(); 2974 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2975 ? S.getCombinedNextLowerBound() 2976 : S.getNextLowerBound(); 2977 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2978 ? S.getCombinedNextUpperBound() 2979 : S.getNextUpperBound(); 2980 2981 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2982 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2983 emitEmptyOrdered); 2984 } 2985 2986 static std::pair<LValue, LValue> 2987 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2988 const OMPExecutableDirective &S) { 2989 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2990 LValue LB = 2991 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2992 LValue UB = 2993 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2994 2995 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2996 // parallel for') we need to use the 'distribute' 2997 // chunk lower and upper bounds rather than the whole loop iteration 2998 // space. These are parameters to the outlined function for 'parallel' 2999 // and we copy the bounds of the previous schedule into the 3000 // the current ones. 3001 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 3002 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 3003 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 3004 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 3005 PrevLBVal = CGF.EmitScalarConversion( 3006 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 3007 LS.getIterationVariable()->getType(), 3008 LS.getPrevLowerBoundVariable()->getExprLoc()); 3009 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 3010 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 3011 PrevUBVal = CGF.EmitScalarConversion( 3012 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 3013 LS.getIterationVariable()->getType(), 3014 LS.getPrevUpperBoundVariable()->getExprLoc()); 3015 3016 CGF.EmitStoreOfScalar(PrevLBVal, LB); 3017 CGF.EmitStoreOfScalar(PrevUBVal, UB); 3018 3019 return {LB, UB}; 3020 } 3021 3022 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 3023 /// we need to use the LB and UB expressions generated by the worksharing 3024 /// code generation support, whereas in non combined situations we would 3025 /// just emit 0 and the LastIteration expression 3026 /// This function is necessary due to the difference of the LB and UB 3027 /// types for the RT emission routines for 'for_static_init' and 3028 /// 'for_dispatch_init' 3029 static std::pair<llvm::Value *, llvm::Value *> 3030 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 3031 const OMPExecutableDirective &S, 3032 Address LB, Address UB) { 3033 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 3034 const Expr *IVExpr = LS.getIterationVariable(); 3035 // when implementing a dynamic schedule for a 'for' combined with a 3036 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 3037 // is not normalized as each team only executes its own assigned 3038 // distribute chunk 3039 QualType IteratorTy = IVExpr->getType(); 3040 llvm::Value *LBVal = 3041 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 3042 llvm::Value *UBVal = 3043 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 3044 return {LBVal, UBVal}; 3045 } 3046 3047 static void emitDistributeParallelForDistributeInnerBoundParams( 3048 CodeGenFunction &CGF, const OMPExecutableDirective &S, 3049 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 3050 const auto &Dir = cast<OMPLoopDirective>(S); 3051 LValue LB = 3052 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 3053 llvm::Value *LBCast = 3054 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 3055 CGF.SizeTy, /*isSigned=*/false); 3056 CapturedVars.push_back(LBCast); 3057 LValue UB = 3058 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 3059 3060 llvm::Value *UBCast = 3061 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 3062 CGF.SizeTy, /*isSigned=*/false); 3063 CapturedVars.push_back(UBCast); 3064 } 3065 3066 static void 3067 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 3068 const OMPLoopDirective &S, 3069 CodeGenFunction::JumpDest LoopExit) { 3070 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 3071 PrePostActionTy &Action) { 3072 Action.Enter(CGF); 3073 bool HasCancel = false; 3074 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 3075 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 3076 HasCancel = D->hasCancel(); 3077 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 3078 HasCancel = D->hasCancel(); 3079 else if (const auto *D = 3080 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 3081 HasCancel = D->hasCancel(); 3082 } 3083 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3084 HasCancel); 3085 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 3086 emitDistributeParallelForInnerBounds, 3087 emitDistributeParallelForDispatchBounds); 3088 }; 3089 3090 emitCommonOMPParallelDirective( 3091 CGF, S, 3092 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 3093 CGInlinedWorksharingLoop, 3094 emitDistributeParallelForDistributeInnerBoundParams); 3095 } 3096 3097 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 3098 const OMPDistributeParallelForDirective &S) { 3099 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3100 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 3101 S.getDistInc()); 3102 }; 3103 OMPLexicalScope Scope(*this, S, OMPD_parallel); 3104 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 3105 } 3106 3107 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 3108 const OMPDistributeParallelForSimdDirective &S) { 3109 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3110 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 3111 S.getDistInc()); 3112 }; 3113 OMPLexicalScope Scope(*this, S, OMPD_parallel); 3114 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 3115 } 3116 3117 void CodeGenFunction::EmitOMPDistributeSimdDirective( 3118 const OMPDistributeSimdDirective &S) { 3119 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3120 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3121 }; 3122 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3123 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3124 } 3125 3126 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 3127 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 3128 // Emit SPMD target parallel for region as a standalone region. 3129 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3130 emitOMPSimdRegion(CGF, S, Action); 3131 }; 3132 llvm::Function *Fn; 3133 llvm::Constant *Addr; 3134 // Emit target region as a standalone region. 3135 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3136 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3137 assert(Fn && Addr && "Target device function emission failed."); 3138 } 3139 3140 void CodeGenFunction::EmitOMPTargetSimdDirective( 3141 const OMPTargetSimdDirective &S) { 3142 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3143 emitOMPSimdRegion(CGF, S, Action); 3144 }; 3145 emitCommonOMPTargetDirective(*this, S, CodeGen); 3146 } 3147 3148 namespace { 3149 struct ScheduleKindModifiersTy { 3150 OpenMPScheduleClauseKind Kind; 3151 OpenMPScheduleClauseModifier M1; 3152 OpenMPScheduleClauseModifier M2; 3153 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 3154 OpenMPScheduleClauseModifier M1, 3155 OpenMPScheduleClauseModifier M2) 3156 : Kind(Kind), M1(M1), M2(M2) {} 3157 }; 3158 } // namespace 3159 3160 bool CodeGenFunction::EmitOMPWorksharingLoop( 3161 const OMPLoopDirective &S, Expr *EUB, 3162 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 3163 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 3164 // Emit the loop iteration variable. 3165 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3166 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3167 EmitVarDecl(*IVDecl); 3168 3169 // Emit the iterations count variable. 3170 // If it is not a variable, Sema decided to calculate iterations count on each 3171 // iteration (e.g., it is foldable into a constant). 3172 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3173 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3174 // Emit calculation of the iterations count. 3175 EmitIgnoredExpr(S.getCalcLastIteration()); 3176 } 3177 3178 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3179 3180 bool HasLastprivateClause; 3181 // Check pre-condition. 3182 { 3183 OMPLoopScope PreInitScope(*this, S); 3184 // Skip the entire loop if we don't meet the precondition. 3185 // If the condition constant folds and can be elided, avoid emitting the 3186 // whole loop. 3187 bool CondConstant; 3188 llvm::BasicBlock *ContBlock = nullptr; 3189 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3190 if (!CondConstant) 3191 return false; 3192 } else { 3193 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3194 ContBlock = createBasicBlock("omp.precond.end"); 3195 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3196 getProfileCount(&S)); 3197 EmitBlock(ThenBlock); 3198 incrementProfileCounter(&S); 3199 } 3200 3201 RunCleanupsScope DoacrossCleanupScope(*this); 3202 bool Ordered = false; 3203 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 3204 if (OrderedClause->getNumForLoops()) 3205 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 3206 else 3207 Ordered = true; 3208 } 3209 3210 llvm::DenseSet<const Expr *> EmittedFinals; 3211 emitAlignedClause(*this, S); 3212 bool HasLinears = EmitOMPLinearClauseInit(S); 3213 // Emit helper vars inits. 3214 3215 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 3216 LValue LB = Bounds.first; 3217 LValue UB = Bounds.second; 3218 LValue ST = 3219 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3220 LValue IL = 3221 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3222 3223 // Emit 'then' code. 3224 { 3225 OMPPrivateScope LoopScope(*this); 3226 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 3227 // Emit implicit barrier to synchronize threads and avoid data races on 3228 // initialization of firstprivate variables and post-update of 3229 // lastprivate variables. 3230 CGM.getOpenMPRuntime().emitBarrierCall( 3231 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3232 /*ForceSimpleCall=*/true); 3233 } 3234 EmitOMPPrivateClause(S, LoopScope); 3235 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 3236 *this, S, EmitLValue(S.getIterationVariable())); 3237 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3238 EmitOMPReductionClauseInit(S, LoopScope); 3239 EmitOMPPrivateLoopCounters(S, LoopScope); 3240 EmitOMPLinearClause(S, LoopScope); 3241 (void)LoopScope.Privatize(); 3242 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3243 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3244 3245 // Detect the loop schedule kind and chunk. 3246 const Expr *ChunkExpr = nullptr; 3247 OpenMPScheduleTy ScheduleKind; 3248 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 3249 ScheduleKind.Schedule = C->getScheduleKind(); 3250 ScheduleKind.M1 = C->getFirstScheduleModifier(); 3251 ScheduleKind.M2 = C->getSecondScheduleModifier(); 3252 ChunkExpr = C->getChunkSize(); 3253 } else { 3254 // Default behaviour for schedule clause. 3255 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 3256 *this, S, ScheduleKind.Schedule, ChunkExpr); 3257 } 3258 bool HasChunkSizeOne = false; 3259 llvm::Value *Chunk = nullptr; 3260 if (ChunkExpr) { 3261 Chunk = EmitScalarExpr(ChunkExpr); 3262 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 3263 S.getIterationVariable()->getType(), 3264 S.getBeginLoc()); 3265 Expr::EvalResult Result; 3266 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 3267 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 3268 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 3269 } 3270 } 3271 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3272 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3273 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 3274 // If the static schedule kind is specified or if the ordered clause is 3275 // specified, and if no monotonic modifier is specified, the effect will 3276 // be as if the monotonic modifier was specified. 3277 bool StaticChunkedOne = 3278 RT.isStaticChunked(ScheduleKind.Schedule, 3279 /* Chunked */ Chunk != nullptr) && 3280 HasChunkSizeOne && 3281 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3282 bool IsMonotonic = 3283 Ordered || 3284 (ScheduleKind.Schedule == OMPC_SCHEDULE_static && 3285 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 3286 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 3287 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 3288 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 3289 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 3290 /* Chunked */ Chunk != nullptr) || 3291 StaticChunkedOne) && 3292 !Ordered) { 3293 JumpDest LoopExit = 3294 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3295 emitCommonSimdLoop( 3296 *this, S, 3297 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3298 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3299 CGF.EmitOMPSimdInit(S); 3300 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 3301 if (C->getKind() == OMPC_ORDER_concurrent) 3302 CGF.LoopStack.setParallel(/*Enable=*/true); 3303 } 3304 }, 3305 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 3306 &S, ScheduleKind, LoopExit, 3307 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 3308 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 3309 // When no chunk_size is specified, the iteration space is divided 3310 // into chunks that are approximately equal in size, and at most 3311 // one chunk is distributed to each thread. Note that the size of 3312 // the chunks is unspecified in this case. 3313 CGOpenMPRuntime::StaticRTInput StaticInit( 3314 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 3315 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 3316 StaticChunkedOne ? Chunk : nullptr); 3317 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3318 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 3319 StaticInit); 3320 // UB = min(UB, GlobalUB); 3321 if (!StaticChunkedOne) 3322 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 3323 // IV = LB; 3324 CGF.EmitIgnoredExpr(S.getInit()); 3325 // For unchunked static schedule generate: 3326 // 3327 // while (idx <= UB) { 3328 // BODY; 3329 // ++idx; 3330 // } 3331 // 3332 // For static schedule with chunk one: 3333 // 3334 // while (IV <= PrevUB) { 3335 // BODY; 3336 // IV += ST; 3337 // } 3338 CGF.EmitOMPInnerLoop( 3339 S, LoopScope.requiresCleanups(), 3340 StaticChunkedOne ? S.getCombinedParForInDistCond() 3341 : S.getCond(), 3342 StaticChunkedOne ? S.getDistInc() : S.getInc(), 3343 [&S, LoopExit](CodeGenFunction &CGF) { 3344 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 3345 }, 3346 [](CodeGenFunction &) {}); 3347 }); 3348 EmitBlock(LoopExit.getBlock()); 3349 // Tell the runtime we are done. 3350 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3351 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3352 S.getDirectiveKind()); 3353 }; 3354 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 3355 } else { 3356 // Emit the outer loop, which requests its work chunk [LB..UB] from 3357 // runtime and runs the inner loop to process it. 3358 const OMPLoopArguments LoopArguments( 3359 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3360 IL.getAddress(*this), Chunk, EUB); 3361 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 3362 LoopArguments, CGDispatchBounds); 3363 } 3364 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3365 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3366 return CGF.Builder.CreateIsNotNull( 3367 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3368 }); 3369 } 3370 EmitOMPReductionClauseFinal( 3371 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 3372 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 3373 : /*Parallel only*/ OMPD_parallel); 3374 // Emit post-update of the reduction variables if IsLastIter != 0. 3375 emitPostUpdateForReductionClause( 3376 *this, S, [IL, &S](CodeGenFunction &CGF) { 3377 return CGF.Builder.CreateIsNotNull( 3378 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3379 }); 3380 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3381 if (HasLastprivateClause) 3382 EmitOMPLastprivateClauseFinal( 3383 S, isOpenMPSimdDirective(S.getDirectiveKind()), 3384 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3385 } 3386 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 3387 return CGF.Builder.CreateIsNotNull( 3388 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3389 }); 3390 DoacrossCleanupScope.ForceCleanup(); 3391 // We're now done with the loop, so jump to the continuation block. 3392 if (ContBlock) { 3393 EmitBranch(ContBlock); 3394 EmitBlock(ContBlock, /*IsFinished=*/true); 3395 } 3396 } 3397 return HasLastprivateClause; 3398 } 3399 3400 /// The following two functions generate expressions for the loop lower 3401 /// and upper bounds in case of static and dynamic (dispatch) schedule 3402 /// of the associated 'for' or 'distribute' loop. 3403 static std::pair<LValue, LValue> 3404 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3405 const auto &LS = cast<OMPLoopDirective>(S); 3406 LValue LB = 3407 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3408 LValue UB = 3409 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3410 return {LB, UB}; 3411 } 3412 3413 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3414 /// consider the lower and upper bound expressions generated by the 3415 /// worksharing loop support, but we use 0 and the iteration space size as 3416 /// constants 3417 static std::pair<llvm::Value *, llvm::Value *> 3418 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3419 Address LB, Address UB) { 3420 const auto &LS = cast<OMPLoopDirective>(S); 3421 const Expr *IVExpr = LS.getIterationVariable(); 3422 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3423 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3424 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3425 return {LBVal, UBVal}; 3426 } 3427 3428 /// Emits internal temp array declarations for the directive with inscan 3429 /// reductions. 3430 /// The code is the following: 3431 /// \code 3432 /// size num_iters = <num_iters>; 3433 /// <type> buffer[num_iters]; 3434 /// \endcode 3435 static void emitScanBasedDirectiveDecls( 3436 CodeGenFunction &CGF, const OMPLoopDirective &S, 3437 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) { 3438 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3439 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3440 SmallVector<const Expr *, 4> Shareds; 3441 SmallVector<const Expr *, 4> Privates; 3442 SmallVector<const Expr *, 4> ReductionOps; 3443 SmallVector<const Expr *, 4> CopyArrayTemps; 3444 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3445 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3446 "Only inscan reductions are expected."); 3447 Shareds.append(C->varlist_begin(), C->varlist_end()); 3448 Privates.append(C->privates().begin(), C->privates().end()); 3449 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3450 CopyArrayTemps.append(C->copy_array_temps().begin(), 3451 C->copy_array_temps().end()); 3452 } 3453 { 3454 // Emit buffers for each reduction variables. 3455 // ReductionCodeGen is required to emit correctly the code for array 3456 // reductions. 3457 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3458 unsigned Count = 0; 3459 auto *ITA = CopyArrayTemps.begin(); 3460 for (const Expr *IRef : Privates) { 3461 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3462 // Emit variably modified arrays, used for arrays/array sections 3463 // reductions. 3464 if (PrivateVD->getType()->isVariablyModifiedType()) { 3465 RedCG.emitSharedOrigLValue(CGF, Count); 3466 RedCG.emitAggregateType(CGF, Count); 3467 } 3468 CodeGenFunction::OpaqueValueMapping DimMapping( 3469 CGF, 3470 cast<OpaqueValueExpr>( 3471 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3472 ->getSizeExpr()), 3473 RValue::get(OMPScanNumIterations)); 3474 // Emit temp buffer. 3475 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3476 ++ITA; 3477 ++Count; 3478 } 3479 } 3480 } 3481 3482 /// Emits the code for the directive with inscan reductions. 3483 /// The code is the following: 3484 /// \code 3485 /// #pragma omp ... 3486 /// for (i: 0..<num_iters>) { 3487 /// <input phase>; 3488 /// buffer[i] = red; 3489 /// } 3490 /// #pragma omp master // in parallel region 3491 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3492 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3493 /// buffer[i] op= buffer[i-pow(2,k)]; 3494 /// #pragma omp barrier // in parallel region 3495 /// #pragma omp ... 3496 /// for (0..<num_iters>) { 3497 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3498 /// <scan phase>; 3499 /// } 3500 /// \endcode 3501 static void emitScanBasedDirective( 3502 CodeGenFunction &CGF, const OMPLoopDirective &S, 3503 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3504 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3505 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3506 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3507 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3508 SmallVector<const Expr *, 4> Privates; 3509 SmallVector<const Expr *, 4> ReductionOps; 3510 SmallVector<const Expr *, 4> LHSs; 3511 SmallVector<const Expr *, 4> RHSs; 3512 SmallVector<const Expr *, 4> CopyArrayElems; 3513 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3514 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3515 "Only inscan reductions are expected."); 3516 Privates.append(C->privates().begin(), C->privates().end()); 3517 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3518 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3519 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3520 CopyArrayElems.append(C->copy_array_elems().begin(), 3521 C->copy_array_elems().end()); 3522 } 3523 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3524 { 3525 // Emit loop with input phase: 3526 // #pragma omp ... 3527 // for (i: 0..<num_iters>) { 3528 // <input phase>; 3529 // buffer[i] = red; 3530 // } 3531 CGF.OMPFirstScanLoop = true; 3532 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3533 FirstGen(CGF); 3534 } 3535 // #pragma omp barrier // in parallel region 3536 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems, 3537 &ReductionOps, 3538 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) { 3539 Action.Enter(CGF); 3540 // Emit prefix reduction: 3541 // #pragma omp master // in parallel region 3542 // for (int k = 0; k <= ceil(log2(n)); ++k) 3543 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3544 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3545 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3546 llvm::Function *F = 3547 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3548 llvm::Value *Arg = 3549 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3550 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3551 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3552 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3553 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3554 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3555 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3556 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3557 CGF.EmitBlock(LoopBB); 3558 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3559 // size pow2k = 1; 3560 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3561 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3562 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3563 // for (size i = n - 1; i >= 2 ^ k; --i) 3564 // tmp[i] op= tmp[i-pow2k]; 3565 llvm::BasicBlock *InnerLoopBB = 3566 CGF.createBasicBlock("omp.inner.log.scan.body"); 3567 llvm::BasicBlock *InnerExitBB = 3568 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3569 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3570 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3571 CGF.EmitBlock(InnerLoopBB); 3572 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3573 IVal->addIncoming(NMin1, LoopBB); 3574 { 3575 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3576 auto *ILHS = LHSs.begin(); 3577 auto *IRHS = RHSs.begin(); 3578 for (const Expr *CopyArrayElem : CopyArrayElems) { 3579 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3580 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3581 Address LHSAddr = Address::invalid(); 3582 { 3583 CodeGenFunction::OpaqueValueMapping IdxMapping( 3584 CGF, 3585 cast<OpaqueValueExpr>( 3586 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3587 RValue::get(IVal)); 3588 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3589 } 3590 PrivScope.addPrivate(LHSVD, LHSAddr); 3591 Address RHSAddr = Address::invalid(); 3592 { 3593 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3594 CodeGenFunction::OpaqueValueMapping IdxMapping( 3595 CGF, 3596 cast<OpaqueValueExpr>( 3597 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3598 RValue::get(OffsetIVal)); 3599 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3600 } 3601 PrivScope.addPrivate(RHSVD, RHSAddr); 3602 ++ILHS; 3603 ++IRHS; 3604 } 3605 PrivScope.Privatize(); 3606 CGF.CGM.getOpenMPRuntime().emitReduction( 3607 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3608 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3609 } 3610 llvm::Value *NextIVal = 3611 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3612 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3613 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3614 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3615 CGF.EmitBlock(InnerExitBB); 3616 llvm::Value *Next = 3617 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3618 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3619 // pow2k <<= 1; 3620 llvm::Value *NextPow2K = 3621 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3622 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3623 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3624 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3625 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3626 CGF.EmitBlock(ExitBB); 3627 }; 3628 if (isOpenMPParallelDirective(S.getDirectiveKind())) { 3629 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3630 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3631 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3632 /*ForceSimpleCall=*/true); 3633 } else { 3634 RegionCodeGenTy RCG(CodeGen); 3635 RCG(CGF); 3636 } 3637 3638 CGF.OMPFirstScanLoop = false; 3639 SecondGen(CGF); 3640 } 3641 3642 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3643 const OMPLoopDirective &S, 3644 bool HasCancel) { 3645 bool HasLastprivates; 3646 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3647 [](const OMPReductionClause *C) { 3648 return C->getModifier() == OMPC_REDUCTION_inscan; 3649 })) { 3650 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3651 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3652 OMPLoopScope LoopScope(CGF, S); 3653 return CGF.EmitScalarExpr(S.getNumIterations()); 3654 }; 3655 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3656 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3657 CGF, S.getDirectiveKind(), HasCancel); 3658 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3659 emitForLoopBounds, 3660 emitDispatchForLoopBounds); 3661 // Emit an implicit barrier at the end. 3662 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3663 OMPD_for); 3664 }; 3665 const auto &&SecondGen = [&S, HasCancel, 3666 &HasLastprivates](CodeGenFunction &CGF) { 3667 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3668 CGF, S.getDirectiveKind(), HasCancel); 3669 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3670 emitForLoopBounds, 3671 emitDispatchForLoopBounds); 3672 }; 3673 if (!isOpenMPParallelDirective(S.getDirectiveKind())) 3674 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen); 3675 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3676 } else { 3677 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3678 HasCancel); 3679 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3680 emitForLoopBounds, 3681 emitDispatchForLoopBounds); 3682 } 3683 return HasLastprivates; 3684 } 3685 3686 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) { 3687 if (S.hasCancel()) 3688 return false; 3689 for (OMPClause *C : S.clauses()) { 3690 if (isa<OMPNowaitClause>(C)) 3691 continue; 3692 3693 if (auto *SC = dyn_cast<OMPScheduleClause>(C)) { 3694 if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) 3695 return false; 3696 if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) 3697 return false; 3698 switch (SC->getScheduleKind()) { 3699 case OMPC_SCHEDULE_auto: 3700 case OMPC_SCHEDULE_dynamic: 3701 case OMPC_SCHEDULE_runtime: 3702 case OMPC_SCHEDULE_guided: 3703 case OMPC_SCHEDULE_static: 3704 continue; 3705 case OMPC_SCHEDULE_unknown: 3706 return false; 3707 } 3708 } 3709 3710 return false; 3711 } 3712 3713 return true; 3714 } 3715 3716 static llvm::omp::ScheduleKind 3717 convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) { 3718 switch (ScheduleClauseKind) { 3719 case OMPC_SCHEDULE_unknown: 3720 return llvm::omp::OMP_SCHEDULE_Default; 3721 case OMPC_SCHEDULE_auto: 3722 return llvm::omp::OMP_SCHEDULE_Auto; 3723 case OMPC_SCHEDULE_dynamic: 3724 return llvm::omp::OMP_SCHEDULE_Dynamic; 3725 case OMPC_SCHEDULE_guided: 3726 return llvm::omp::OMP_SCHEDULE_Guided; 3727 case OMPC_SCHEDULE_runtime: 3728 return llvm::omp::OMP_SCHEDULE_Runtime; 3729 case OMPC_SCHEDULE_static: 3730 return llvm::omp::OMP_SCHEDULE_Static; 3731 } 3732 llvm_unreachable("Unhandled schedule kind"); 3733 } 3734 3735 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3736 bool HasLastprivates = false; 3737 bool UseOMPIRBuilder = 3738 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 3739 auto &&CodeGen = [this, &S, &HasLastprivates, 3740 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) { 3741 // Use the OpenMPIRBuilder if enabled. 3742 if (UseOMPIRBuilder) { 3743 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>(); 3744 3745 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default; 3746 llvm::Value *ChunkSize = nullptr; 3747 if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) { 3748 SchedKind = 3749 convertClauseKindToSchedKind(SchedClause->getScheduleKind()); 3750 if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize()) 3751 ChunkSize = EmitScalarExpr(ChunkSizeExpr); 3752 } 3753 3754 // Emit the associated statement and get its loop representation. 3755 const Stmt *Inner = S.getRawStmt(); 3756 llvm::CanonicalLoopInfo *CLI = 3757 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 3758 3759 llvm::OpenMPIRBuilder &OMPBuilder = 3760 CGM.getOpenMPRuntime().getOMPBuilder(); 3761 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3762 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3763 OMPBuilder.applyWorkshareLoop(Builder.getCurrentDebugLocation(), CLI, 3764 AllocaIP, NeedsBarrier, SchedKind, 3765 ChunkSize); 3766 return; 3767 } 3768 3769 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3770 }; 3771 { 3772 auto LPCRegion = 3773 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3774 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3775 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3776 S.hasCancel()); 3777 } 3778 3779 if (!UseOMPIRBuilder) { 3780 // Emit an implicit barrier at the end. 3781 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3782 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3783 } 3784 // Check for outer lastprivate conditional update. 3785 checkForLastprivateConditionalUpdate(*this, S); 3786 } 3787 3788 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3789 bool HasLastprivates = false; 3790 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3791 PrePostActionTy &) { 3792 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3793 }; 3794 { 3795 auto LPCRegion = 3796 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3797 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3798 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3799 } 3800 3801 // Emit an implicit barrier at the end. 3802 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3803 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3804 // Check for outer lastprivate conditional update. 3805 checkForLastprivateConditionalUpdate(*this, S); 3806 } 3807 3808 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3809 const Twine &Name, 3810 llvm::Value *Init = nullptr) { 3811 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3812 if (Init) 3813 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3814 return LVal; 3815 } 3816 3817 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3818 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3819 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3820 bool HasLastprivates = false; 3821 auto &&CodeGen = [&S, CapturedStmt, CS, 3822 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3823 const ASTContext &C = CGF.getContext(); 3824 QualType KmpInt32Ty = 3825 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3826 // Emit helper vars inits. 3827 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3828 CGF.Builder.getInt32(0)); 3829 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3830 ? CGF.Builder.getInt32(CS->size() - 1) 3831 : CGF.Builder.getInt32(0); 3832 LValue UB = 3833 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3834 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3835 CGF.Builder.getInt32(1)); 3836 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3837 CGF.Builder.getInt32(0)); 3838 // Loop counter. 3839 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3840 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3841 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3842 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3843 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3844 // Generate condition for loop. 3845 BinaryOperator *Cond = BinaryOperator::Create( 3846 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary, 3847 S.getBeginLoc(), FPOptionsOverride()); 3848 // Increment for loop counter. 3849 UnaryOperator *Inc = UnaryOperator::Create( 3850 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary, 3851 S.getBeginLoc(), true, FPOptionsOverride()); 3852 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3853 // Iterate through all sections and emit a switch construct: 3854 // switch (IV) { 3855 // case 0: 3856 // <SectionStmt[0]>; 3857 // break; 3858 // ... 3859 // case <NumSection> - 1: 3860 // <SectionStmt[<NumSection> - 1]>; 3861 // break; 3862 // } 3863 // .omp.sections.exit: 3864 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3865 llvm::SwitchInst *SwitchStmt = 3866 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3867 ExitBB, CS == nullptr ? 1 : CS->size()); 3868 if (CS) { 3869 unsigned CaseNumber = 0; 3870 for (const Stmt *SubStmt : CS->children()) { 3871 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3872 CGF.EmitBlock(CaseBB); 3873 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3874 CGF.EmitStmt(SubStmt); 3875 CGF.EmitBranch(ExitBB); 3876 ++CaseNumber; 3877 } 3878 } else { 3879 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3880 CGF.EmitBlock(CaseBB); 3881 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3882 CGF.EmitStmt(CapturedStmt); 3883 CGF.EmitBranch(ExitBB); 3884 } 3885 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3886 }; 3887 3888 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3889 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3890 // Emit implicit barrier to synchronize threads and avoid data races on 3891 // initialization of firstprivate variables and post-update of lastprivate 3892 // variables. 3893 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3894 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3895 /*ForceSimpleCall=*/true); 3896 } 3897 CGF.EmitOMPPrivateClause(S, LoopScope); 3898 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3899 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3900 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3901 (void)LoopScope.Privatize(); 3902 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3903 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3904 3905 // Emit static non-chunked loop. 3906 OpenMPScheduleTy ScheduleKind; 3907 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3908 CGOpenMPRuntime::StaticRTInput StaticInit( 3909 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3910 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3911 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3912 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3913 // UB = min(UB, GlobalUB); 3914 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3915 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3916 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3917 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3918 // IV = LB; 3919 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3920 // while (idx <= UB) { BODY; ++idx; } 3921 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3922 [](CodeGenFunction &) {}); 3923 // Tell the runtime we are done. 3924 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3925 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3926 S.getDirectiveKind()); 3927 }; 3928 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3929 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3930 // Emit post-update of the reduction variables if IsLastIter != 0. 3931 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3932 return CGF.Builder.CreateIsNotNull( 3933 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3934 }); 3935 3936 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3937 if (HasLastprivates) 3938 CGF.EmitOMPLastprivateClauseFinal( 3939 S, /*NoFinals=*/false, 3940 CGF.Builder.CreateIsNotNull( 3941 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3942 }; 3943 3944 bool HasCancel = false; 3945 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3946 HasCancel = OSD->hasCancel(); 3947 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3948 HasCancel = OPSD->hasCancel(); 3949 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3950 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3951 HasCancel); 3952 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3953 // clause. Otherwise the barrier will be generated by the codegen for the 3954 // directive. 3955 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3956 // Emit implicit barrier to synchronize threads and avoid data races on 3957 // initialization of firstprivate variables. 3958 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3959 OMPD_unknown); 3960 } 3961 } 3962 3963 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3964 if (CGM.getLangOpts().OpenMPIRBuilder) { 3965 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3966 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3967 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy; 3968 3969 auto FiniCB = [this](InsertPointTy IP) { 3970 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3971 }; 3972 3973 const CapturedStmt *ICS = S.getInnermostCapturedStmt(); 3974 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3975 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3976 llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector; 3977 if (CS) { 3978 for (const Stmt *SubStmt : CS->children()) { 3979 auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP, 3980 InsertPointTy CodeGenIP, 3981 llvm::BasicBlock &FiniBB) { 3982 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, 3983 FiniBB); 3984 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP, 3985 FiniBB); 3986 }; 3987 SectionCBVector.push_back(SectionCB); 3988 } 3989 } else { 3990 auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP, 3991 InsertPointTy CodeGenIP, 3992 llvm::BasicBlock &FiniBB) { 3993 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3994 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP, 3995 FiniBB); 3996 }; 3997 SectionCBVector.push_back(SectionCB); 3998 } 3999 4000 // Privatization callback that performs appropriate action for 4001 // shared/private/firstprivate/lastprivate/copyin/... variables. 4002 // 4003 // TODO: This defaults to shared right now. 4004 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 4005 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 4006 // The next line is appropriate only for variables (Val) with the 4007 // data-sharing attribute "shared". 4008 ReplVal = &Val; 4009 4010 return CodeGenIP; 4011 }; 4012 4013 CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP); 4014 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 4015 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 4016 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 4017 Builder.restoreIP(OMPBuilder.createSections( 4018 Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(), 4019 S.getSingleClause<OMPNowaitClause>())); 4020 return; 4021 } 4022 { 4023 auto LPCRegion = 4024 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4025 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4026 EmitSections(S); 4027 } 4028 // Emit an implicit barrier at the end. 4029 if (!S.getSingleClause<OMPNowaitClause>()) { 4030 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 4031 OMPD_sections); 4032 } 4033 // Check for outer lastprivate conditional update. 4034 checkForLastprivateConditionalUpdate(*this, S); 4035 } 4036 4037 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 4038 if (CGM.getLangOpts().OpenMPIRBuilder) { 4039 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4040 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4041 4042 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt(); 4043 auto FiniCB = [this](InsertPointTy IP) { 4044 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4045 }; 4046 4047 auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP, 4048 InsertPointTy CodeGenIP, 4049 llvm::BasicBlock &FiniBB) { 4050 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4051 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt, 4052 CodeGenIP, FiniBB); 4053 }; 4054 4055 LexicalScope Scope(*this, S.getSourceRange()); 4056 EmitStopPoint(&S); 4057 Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB)); 4058 4059 return; 4060 } 4061 LexicalScope Scope(*this, S.getSourceRange()); 4062 EmitStopPoint(&S); 4063 EmitStmt(S.getAssociatedStmt()); 4064 } 4065 4066 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 4067 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 4068 llvm::SmallVector<const Expr *, 8> DestExprs; 4069 llvm::SmallVector<const Expr *, 8> SrcExprs; 4070 llvm::SmallVector<const Expr *, 8> AssignmentOps; 4071 // Check if there are any 'copyprivate' clauses associated with this 4072 // 'single' construct. 4073 // Build a list of copyprivate variables along with helper expressions 4074 // (<source>, <destination>, <destination>=<source> expressions) 4075 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 4076 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 4077 DestExprs.append(C->destination_exprs().begin(), 4078 C->destination_exprs().end()); 4079 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 4080 AssignmentOps.append(C->assignment_ops().begin(), 4081 C->assignment_ops().end()); 4082 } 4083 // Emit code for 'single' region along with 'copyprivate' clauses 4084 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4085 Action.Enter(CGF); 4086 OMPPrivateScope SingleScope(CGF); 4087 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 4088 CGF.EmitOMPPrivateClause(S, SingleScope); 4089 (void)SingleScope.Privatize(); 4090 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4091 }; 4092 { 4093 auto LPCRegion = 4094 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4095 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4096 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 4097 CopyprivateVars, DestExprs, 4098 SrcExprs, AssignmentOps); 4099 } 4100 // Emit an implicit barrier at the end (to avoid data race on firstprivate 4101 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 4102 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 4103 CGM.getOpenMPRuntime().emitBarrierCall( 4104 *this, S.getBeginLoc(), 4105 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 4106 } 4107 // Check for outer lastprivate conditional update. 4108 checkForLastprivateConditionalUpdate(*this, S); 4109 } 4110 4111 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 4112 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4113 Action.Enter(CGF); 4114 CGF.EmitStmt(S.getRawStmt()); 4115 }; 4116 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 4117 } 4118 4119 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 4120 if (CGM.getLangOpts().OpenMPIRBuilder) { 4121 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4122 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4123 4124 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt(); 4125 4126 auto FiniCB = [this](InsertPointTy IP) { 4127 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4128 }; 4129 4130 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 4131 InsertPointTy CodeGenIP, 4132 llvm::BasicBlock &FiniBB) { 4133 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4134 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 4135 CodeGenIP, FiniBB); 4136 }; 4137 4138 LexicalScope Scope(*this, S.getSourceRange()); 4139 EmitStopPoint(&S); 4140 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB)); 4141 4142 return; 4143 } 4144 LexicalScope Scope(*this, S.getSourceRange()); 4145 EmitStopPoint(&S); 4146 emitMaster(*this, S); 4147 } 4148 4149 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 4150 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4151 Action.Enter(CGF); 4152 CGF.EmitStmt(S.getRawStmt()); 4153 }; 4154 Expr *Filter = nullptr; 4155 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 4156 Filter = FilterClause->getThreadID(); 4157 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(), 4158 Filter); 4159 } 4160 4161 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) { 4162 if (CGM.getLangOpts().OpenMPIRBuilder) { 4163 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4164 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4165 4166 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt(); 4167 const Expr *Filter = nullptr; 4168 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 4169 Filter = FilterClause->getThreadID(); 4170 llvm::Value *FilterVal = Filter 4171 ? EmitScalarExpr(Filter, CGM.Int32Ty) 4172 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); 4173 4174 auto FiniCB = [this](InsertPointTy IP) { 4175 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4176 }; 4177 4178 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP, 4179 InsertPointTy CodeGenIP, 4180 llvm::BasicBlock &FiniBB) { 4181 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4182 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt, 4183 CodeGenIP, FiniBB); 4184 }; 4185 4186 LexicalScope Scope(*this, S.getSourceRange()); 4187 EmitStopPoint(&S); 4188 Builder.restoreIP( 4189 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal)); 4190 4191 return; 4192 } 4193 LexicalScope Scope(*this, S.getSourceRange()); 4194 EmitStopPoint(&S); 4195 emitMasked(*this, S); 4196 } 4197 4198 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 4199 if (CGM.getLangOpts().OpenMPIRBuilder) { 4200 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 4201 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 4202 4203 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt(); 4204 const Expr *Hint = nullptr; 4205 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4206 Hint = HintClause->getHint(); 4207 4208 // TODO: This is slightly different from what's currently being done in 4209 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 4210 // about typing is final. 4211 llvm::Value *HintInst = nullptr; 4212 if (Hint) 4213 HintInst = 4214 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 4215 4216 auto FiniCB = [this](InsertPointTy IP) { 4217 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 4218 }; 4219 4220 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 4221 InsertPointTy CodeGenIP, 4222 llvm::BasicBlock &FiniBB) { 4223 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 4224 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 4225 CodeGenIP, FiniBB); 4226 }; 4227 4228 LexicalScope Scope(*this, S.getSourceRange()); 4229 EmitStopPoint(&S); 4230 Builder.restoreIP(OMPBuilder.createCritical( 4231 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 4232 HintInst)); 4233 4234 return; 4235 } 4236 4237 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4238 Action.Enter(CGF); 4239 CGF.EmitStmt(S.getAssociatedStmt()); 4240 }; 4241 const Expr *Hint = nullptr; 4242 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 4243 Hint = HintClause->getHint(); 4244 LexicalScope Scope(*this, S.getSourceRange()); 4245 EmitStopPoint(&S); 4246 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 4247 S.getDirectiveName().getAsString(), 4248 CodeGen, S.getBeginLoc(), Hint); 4249 } 4250 4251 void CodeGenFunction::EmitOMPParallelForDirective( 4252 const OMPParallelForDirective &S) { 4253 // Emit directive as a combined directive that consists of two implicit 4254 // directives: 'parallel' with 'for' directive. 4255 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4256 Action.Enter(CGF); 4257 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 4258 }; 4259 { 4260 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4261 [](const OMPReductionClause *C) { 4262 return C->getModifier() == OMPC_REDUCTION_inscan; 4263 })) { 4264 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4265 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4266 CGCapturedStmtInfo CGSI(CR_OpenMP); 4267 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4268 OMPLoopScope LoopScope(CGF, S); 4269 return CGF.EmitScalarExpr(S.getNumIterations()); 4270 }; 4271 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4272 } 4273 auto LPCRegion = 4274 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4275 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 4276 emitEmptyBoundParameters); 4277 } 4278 // Check for outer lastprivate conditional update. 4279 checkForLastprivateConditionalUpdate(*this, S); 4280 } 4281 4282 void CodeGenFunction::EmitOMPParallelForSimdDirective( 4283 const OMPParallelForSimdDirective &S) { 4284 // Emit directive as a combined directive that consists of two implicit 4285 // directives: 'parallel' with 'for' directive. 4286 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4287 Action.Enter(CGF); 4288 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 4289 }; 4290 { 4291 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 4292 [](const OMPReductionClause *C) { 4293 return C->getModifier() == OMPC_REDUCTION_inscan; 4294 })) { 4295 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 4296 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 4297 CGCapturedStmtInfo CGSI(CR_OpenMP); 4298 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI); 4299 OMPLoopScope LoopScope(CGF, S); 4300 return CGF.EmitScalarExpr(S.getNumIterations()); 4301 }; 4302 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen); 4303 } 4304 auto LPCRegion = 4305 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4306 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 4307 emitEmptyBoundParameters); 4308 } 4309 // Check for outer lastprivate conditional update. 4310 checkForLastprivateConditionalUpdate(*this, S); 4311 } 4312 4313 void CodeGenFunction::EmitOMPParallelMasterDirective( 4314 const OMPParallelMasterDirective &S) { 4315 // Emit directive as a combined directive that consists of two implicit 4316 // directives: 'parallel' with 'master' directive. 4317 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4318 Action.Enter(CGF); 4319 OMPPrivateScope PrivateScope(CGF); 4320 bool Copyins = CGF.EmitOMPCopyinClause(S); 4321 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4322 if (Copyins) { 4323 // Emit implicit barrier to synchronize threads and avoid data races on 4324 // propagation master's thread values of threadprivate variables to local 4325 // instances of that variables of all other implicit threads. 4326 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 4327 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4328 /*ForceSimpleCall=*/true); 4329 } 4330 CGF.EmitOMPPrivateClause(S, PrivateScope); 4331 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4332 (void)PrivateScope.Privatize(); 4333 emitMaster(CGF, S); 4334 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4335 }; 4336 { 4337 auto LPCRegion = 4338 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4339 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 4340 emitEmptyBoundParameters); 4341 emitPostUpdateForReductionClause(*this, S, 4342 [](CodeGenFunction &) { return nullptr; }); 4343 } 4344 // Check for outer lastprivate conditional update. 4345 checkForLastprivateConditionalUpdate(*this, S); 4346 } 4347 4348 void CodeGenFunction::EmitOMPParallelSectionsDirective( 4349 const OMPParallelSectionsDirective &S) { 4350 // Emit directive as a combined directive that consists of two implicit 4351 // directives: 'parallel' with 'sections' directive. 4352 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4353 Action.Enter(CGF); 4354 CGF.EmitSections(S); 4355 }; 4356 { 4357 auto LPCRegion = 4358 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4359 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 4360 emitEmptyBoundParameters); 4361 } 4362 // Check for outer lastprivate conditional update. 4363 checkForLastprivateConditionalUpdate(*this, S); 4364 } 4365 4366 namespace { 4367 /// Get the list of variables declared in the context of the untied tasks. 4368 class CheckVarsEscapingUntiedTaskDeclContext final 4369 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> { 4370 llvm::SmallVector<const VarDecl *, 4> PrivateDecls; 4371 4372 public: 4373 explicit CheckVarsEscapingUntiedTaskDeclContext() = default; 4374 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default; 4375 void VisitDeclStmt(const DeclStmt *S) { 4376 if (!S) 4377 return; 4378 // Need to privatize only local vars, static locals can be processed as is. 4379 for (const Decl *D : S->decls()) { 4380 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) 4381 if (VD->hasLocalStorage()) 4382 PrivateDecls.push_back(VD); 4383 } 4384 } 4385 void VisitOMPExecutableDirective(const OMPExecutableDirective *) {} 4386 void VisitCapturedStmt(const CapturedStmt *) {} 4387 void VisitLambdaExpr(const LambdaExpr *) {} 4388 void VisitBlockExpr(const BlockExpr *) {} 4389 void VisitStmt(const Stmt *S) { 4390 if (!S) 4391 return; 4392 for (const Stmt *Child : S->children()) 4393 if (Child) 4394 Visit(Child); 4395 } 4396 4397 /// Swaps list of vars with the provided one. 4398 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; } 4399 }; 4400 } // anonymous namespace 4401 4402 void CodeGenFunction::EmitOMPTaskBasedDirective( 4403 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 4404 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 4405 OMPTaskDataTy &Data) { 4406 // Emit outlined function for task construct. 4407 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 4408 auto I = CS->getCapturedDecl()->param_begin(); 4409 auto PartId = std::next(I); 4410 auto TaskT = std::next(I, 4); 4411 // Check if the task is final 4412 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 4413 // If the condition constant folds and can be elided, try to avoid emitting 4414 // the condition and the dead arm of the if/else. 4415 const Expr *Cond = Clause->getCondition(); 4416 bool CondConstant; 4417 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 4418 Data.Final.setInt(CondConstant); 4419 else 4420 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 4421 } else { 4422 // By default the task is not final. 4423 Data.Final.setInt(/*IntVal=*/false); 4424 } 4425 // Check if the task has 'priority' clause. 4426 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 4427 const Expr *Prio = Clause->getPriority(); 4428 Data.Priority.setInt(/*IntVal=*/true); 4429 Data.Priority.setPointer(EmitScalarConversion( 4430 EmitScalarExpr(Prio), Prio->getType(), 4431 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 4432 Prio->getExprLoc())); 4433 } 4434 // The first function argument for tasks is a thread id, the second one is a 4435 // part id (0 for tied tasks, >=0 for untied task). 4436 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 4437 // Get list of private variables. 4438 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 4439 auto IRef = C->varlist_begin(); 4440 for (const Expr *IInit : C->private_copies()) { 4441 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4442 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4443 Data.PrivateVars.push_back(*IRef); 4444 Data.PrivateCopies.push_back(IInit); 4445 } 4446 ++IRef; 4447 } 4448 } 4449 EmittedAsPrivate.clear(); 4450 // Get list of firstprivate variables. 4451 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4452 auto IRef = C->varlist_begin(); 4453 auto IElemInitRef = C->inits().begin(); 4454 for (const Expr *IInit : C->private_copies()) { 4455 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4456 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4457 Data.FirstprivateVars.push_back(*IRef); 4458 Data.FirstprivateCopies.push_back(IInit); 4459 Data.FirstprivateInits.push_back(*IElemInitRef); 4460 } 4461 ++IRef; 4462 ++IElemInitRef; 4463 } 4464 } 4465 // Get list of lastprivate variables (for taskloops). 4466 llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 4467 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 4468 auto IRef = C->varlist_begin(); 4469 auto ID = C->destination_exprs().begin(); 4470 for (const Expr *IInit : C->private_copies()) { 4471 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4472 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4473 Data.LastprivateVars.push_back(*IRef); 4474 Data.LastprivateCopies.push_back(IInit); 4475 } 4476 LastprivateDstsOrigs.insert( 4477 std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 4478 cast<DeclRefExpr>(*IRef))); 4479 ++IRef; 4480 ++ID; 4481 } 4482 } 4483 SmallVector<const Expr *, 4> LHSs; 4484 SmallVector<const Expr *, 4> RHSs; 4485 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 4486 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4487 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4488 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4489 Data.ReductionOps.append(C->reduction_ops().begin(), 4490 C->reduction_ops().end()); 4491 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4492 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4493 } 4494 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 4495 *this, S.getBeginLoc(), LHSs, RHSs, Data); 4496 // Build list of dependences. 4497 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4498 OMPTaskDataTy::DependData &DD = 4499 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4500 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4501 } 4502 // Get list of local vars for untied tasks. 4503 if (!Data.Tied) { 4504 CheckVarsEscapingUntiedTaskDeclContext Checker; 4505 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt()); 4506 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(), 4507 Checker.getPrivateDecls().end()); 4508 } 4509 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 4510 CapturedRegion](CodeGenFunction &CGF, 4511 PrePostActionTy &Action) { 4512 llvm::MapVector<CanonicalDeclPtr<const VarDecl>, 4513 std::pair<Address, Address>> 4514 UntiedLocalVars; 4515 // Set proper addresses for generated private copies. 4516 OMPPrivateScope Scope(CGF); 4517 // Generate debug info for variables present in shared clause. 4518 if (auto *DI = CGF.getDebugInfo()) { 4519 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields = 4520 CGF.CapturedStmtInfo->getCaptureFields(); 4521 llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue(); 4522 if (CaptureFields.size() && ContextValue) { 4523 unsigned CharWidth = CGF.getContext().getCharWidth(); 4524 // The shared variables are packed together as members of structure. 4525 // So the address of each shared variable can be computed by adding 4526 // offset of it (within record) to the base address of record. For each 4527 // shared variable, debug intrinsic llvm.dbg.declare is generated with 4528 // appropriate expressions (DIExpression). 4529 // Ex: 4530 // %12 = load %struct.anon*, %struct.anon** %__context.addr.i 4531 // call void @llvm.dbg.declare(metadata %struct.anon* %12, 4532 // metadata !svar1, 4533 // metadata !DIExpression(DW_OP_deref)) 4534 // call void @llvm.dbg.declare(metadata %struct.anon* %12, 4535 // metadata !svar2, 4536 // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref)) 4537 for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) { 4538 const VarDecl *SharedVar = It->first; 4539 RecordDecl *CaptureRecord = It->second->getParent(); 4540 const ASTRecordLayout &Layout = 4541 CGF.getContext().getASTRecordLayout(CaptureRecord); 4542 unsigned Offset = 4543 Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth; 4544 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo()) 4545 (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue, 4546 CGF.Builder, false); 4547 llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back(); 4548 // Get the call dbg.declare instruction we just created and update 4549 // its DIExpression to add offset to base address. 4550 if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) { 4551 SmallVector<uint64_t, 8> Ops; 4552 // Add offset to the base address if non zero. 4553 if (Offset) { 4554 Ops.push_back(llvm::dwarf::DW_OP_plus_uconst); 4555 Ops.push_back(Offset); 4556 } 4557 Ops.push_back(llvm::dwarf::DW_OP_deref); 4558 auto &Ctx = DDI->getContext(); 4559 llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops); 4560 Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr)); 4561 } 4562 } 4563 } 4564 } 4565 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 4566 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 4567 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) { 4568 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4569 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4570 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4571 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4572 CS->getCapturedDecl()->getParam(PrivatesParam))); 4573 // Map privates. 4574 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4575 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4576 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4577 CallArgs.push_back(PrivatesPtr); 4578 ParamTypes.push_back(PrivatesPtr->getType()); 4579 for (const Expr *E : Data.PrivateVars) { 4580 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4581 Address PrivatePtr = CGF.CreateMemTemp( 4582 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 4583 PrivatePtrs.emplace_back(VD, PrivatePtr); 4584 CallArgs.push_back(PrivatePtr.getPointer()); 4585 ParamTypes.push_back(PrivatePtr.getType()); 4586 } 4587 for (const Expr *E : Data.FirstprivateVars) { 4588 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4589 Address PrivatePtr = 4590 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4591 ".firstpriv.ptr.addr"); 4592 PrivatePtrs.emplace_back(VD, PrivatePtr); 4593 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 4594 CallArgs.push_back(PrivatePtr.getPointer()); 4595 ParamTypes.push_back(PrivatePtr.getType()); 4596 } 4597 for (const Expr *E : Data.LastprivateVars) { 4598 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4599 Address PrivatePtr = 4600 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4601 ".lastpriv.ptr.addr"); 4602 PrivatePtrs.emplace_back(VD, PrivatePtr); 4603 CallArgs.push_back(PrivatePtr.getPointer()); 4604 ParamTypes.push_back(PrivatePtr.getType()); 4605 } 4606 for (const VarDecl *VD : Data.PrivateLocals) { 4607 QualType Ty = VD->getType().getNonReferenceType(); 4608 if (VD->getType()->isLValueReferenceType()) 4609 Ty = CGF.getContext().getPointerType(Ty); 4610 if (isAllocatableDecl(VD)) 4611 Ty = CGF.getContext().getPointerType(Ty); 4612 Address PrivatePtr = CGF.CreateMemTemp( 4613 CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); 4614 auto Result = UntiedLocalVars.insert( 4615 std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid()))); 4616 // If key exists update in place. 4617 if (Result.second == false) 4618 *Result.first = std::make_pair( 4619 VD, std::make_pair(PrivatePtr, Address::invalid())); 4620 CallArgs.push_back(PrivatePtr.getPointer()); 4621 ParamTypes.push_back(PrivatePtr.getType()); 4622 } 4623 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4624 ParamTypes, /*isVarArg=*/false); 4625 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4626 CopyFn, CopyFnTy->getPointerTo()); 4627 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4628 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4629 for (const auto &Pair : LastprivateDstsOrigs) { 4630 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 4631 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 4632 /*RefersToEnclosingVariableOrCapture=*/ 4633 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 4634 Pair.second->getType(), VK_LValue, 4635 Pair.second->getExprLoc()); 4636 Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF)); 4637 } 4638 for (const auto &Pair : PrivatePtrs) { 4639 Address Replacement = Address( 4640 CGF.Builder.CreateLoad(Pair.second), 4641 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()), 4642 CGF.getContext().getDeclAlign(Pair.first)); 4643 Scope.addPrivate(Pair.first, Replacement); 4644 if (auto *DI = CGF.getDebugInfo()) 4645 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo()) 4646 (void)DI->EmitDeclareOfAutoVariable( 4647 Pair.first, Pair.second.getPointer(), CGF.Builder, 4648 /*UsePointerValue*/ true); 4649 } 4650 // Adjust mapping for internal locals by mapping actual memory instead of 4651 // a pointer to this memory. 4652 for (auto &Pair : UntiedLocalVars) { 4653 QualType VDType = Pair.first->getType().getNonReferenceType(); 4654 if (isAllocatableDecl(Pair.first)) { 4655 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4656 Address Replacement( 4657 Ptr, 4658 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)), 4659 CGF.getPointerAlign()); 4660 Pair.second.first = Replacement; 4661 Ptr = CGF.Builder.CreateLoad(Replacement); 4662 Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType), 4663 CGF.getContext().getDeclAlign(Pair.first)); 4664 Pair.second.second = Replacement; 4665 } else { 4666 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4667 Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType), 4668 CGF.getContext().getDeclAlign(Pair.first)); 4669 Pair.second.first = Replacement; 4670 } 4671 } 4672 } 4673 if (Data.Reductions) { 4674 OMPPrivateScope FirstprivateScope(CGF); 4675 for (const auto &Pair : FirstprivatePtrs) { 4676 Address Replacement( 4677 CGF.Builder.CreateLoad(Pair.second), 4678 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()), 4679 CGF.getContext().getDeclAlign(Pair.first)); 4680 FirstprivateScope.addPrivate(Pair.first, Replacement); 4681 } 4682 (void)FirstprivateScope.Privatize(); 4683 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 4684 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 4685 Data.ReductionCopies, Data.ReductionOps); 4686 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 4687 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 4688 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 4689 RedCG.emitSharedOrigLValue(CGF, Cnt); 4690 RedCG.emitAggregateType(CGF, Cnt); 4691 // FIXME: This must removed once the runtime library is fixed. 4692 // Emit required threadprivate variables for 4693 // initializer/combiner/finalizer. 4694 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4695 RedCG, Cnt); 4696 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4697 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4698 Replacement = 4699 Address(CGF.EmitScalarConversion( 4700 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4701 CGF.getContext().getPointerType( 4702 Data.ReductionCopies[Cnt]->getType()), 4703 Data.ReductionCopies[Cnt]->getExprLoc()), 4704 CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()), 4705 Replacement.getAlignment()); 4706 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4707 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement); 4708 } 4709 } 4710 // Privatize all private variables except for in_reduction items. 4711 (void)Scope.Privatize(); 4712 SmallVector<const Expr *, 4> InRedVars; 4713 SmallVector<const Expr *, 4> InRedPrivs; 4714 SmallVector<const Expr *, 4> InRedOps; 4715 SmallVector<const Expr *, 4> TaskgroupDescriptors; 4716 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 4717 auto IPriv = C->privates().begin(); 4718 auto IRed = C->reduction_ops().begin(); 4719 auto ITD = C->taskgroup_descriptors().begin(); 4720 for (const Expr *Ref : C->varlists()) { 4721 InRedVars.emplace_back(Ref); 4722 InRedPrivs.emplace_back(*IPriv); 4723 InRedOps.emplace_back(*IRed); 4724 TaskgroupDescriptors.emplace_back(*ITD); 4725 std::advance(IPriv, 1); 4726 std::advance(IRed, 1); 4727 std::advance(ITD, 1); 4728 } 4729 } 4730 // Privatize in_reduction items here, because taskgroup descriptors must be 4731 // privatized earlier. 4732 OMPPrivateScope InRedScope(CGF); 4733 if (!InRedVars.empty()) { 4734 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 4735 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 4736 RedCG.emitSharedOrigLValue(CGF, Cnt); 4737 RedCG.emitAggregateType(CGF, Cnt); 4738 // The taskgroup descriptor variable is always implicit firstprivate and 4739 // privatized already during processing of the firstprivates. 4740 // FIXME: This must removed once the runtime library is fixed. 4741 // Emit required threadprivate variables for 4742 // initializer/combiner/finalizer. 4743 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4744 RedCG, Cnt); 4745 llvm::Value *ReductionsPtr; 4746 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 4747 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 4748 TRExpr->getExprLoc()); 4749 } else { 4750 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4751 } 4752 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4753 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4754 Replacement = Address( 4755 CGF.EmitScalarConversion( 4756 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4757 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 4758 InRedPrivs[Cnt]->getExprLoc()), 4759 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()), 4760 Replacement.getAlignment()); 4761 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4762 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement); 4763 } 4764 } 4765 (void)InRedScope.Privatize(); 4766 4767 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF, 4768 UntiedLocalVars); 4769 Action.Enter(CGF); 4770 BodyGen(CGF); 4771 }; 4772 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4773 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 4774 Data.NumberOfParts); 4775 OMPLexicalScope Scope(*this, S, llvm::None, 4776 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4777 !isOpenMPSimdDirective(S.getDirectiveKind())); 4778 TaskGen(*this, OutlinedFn, Data); 4779 } 4780 4781 static ImplicitParamDecl * 4782 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 4783 QualType Ty, CapturedDecl *CD, 4784 SourceLocation Loc) { 4785 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4786 ImplicitParamDecl::Other); 4787 auto *OrigRef = DeclRefExpr::Create( 4788 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 4789 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4790 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4791 ImplicitParamDecl::Other); 4792 auto *PrivateRef = DeclRefExpr::Create( 4793 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 4794 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4795 QualType ElemType = C.getBaseElementType(Ty); 4796 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 4797 ImplicitParamDecl::Other); 4798 auto *InitRef = DeclRefExpr::Create( 4799 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 4800 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 4801 PrivateVD->setInitStyle(VarDecl::CInit); 4802 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 4803 InitRef, /*BasePath=*/nullptr, 4804 VK_PRValue, FPOptionsOverride())); 4805 Data.FirstprivateVars.emplace_back(OrigRef); 4806 Data.FirstprivateCopies.emplace_back(PrivateRef); 4807 Data.FirstprivateInits.emplace_back(InitRef); 4808 return OrigVD; 4809 } 4810 4811 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4812 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4813 OMPTargetDataInfo &InputInfo) { 4814 // Emit outlined function for task construct. 4815 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4816 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4817 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4818 auto I = CS->getCapturedDecl()->param_begin(); 4819 auto PartId = std::next(I); 4820 auto TaskT = std::next(I, 4); 4821 OMPTaskDataTy Data; 4822 // The task is not final. 4823 Data.Final.setInt(/*IntVal=*/false); 4824 // Get list of firstprivate variables. 4825 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4826 auto IRef = C->varlist_begin(); 4827 auto IElemInitRef = C->inits().begin(); 4828 for (auto *IInit : C->private_copies()) { 4829 Data.FirstprivateVars.push_back(*IRef); 4830 Data.FirstprivateCopies.push_back(IInit); 4831 Data.FirstprivateInits.push_back(*IElemInitRef); 4832 ++IRef; 4833 ++IElemInitRef; 4834 } 4835 } 4836 OMPPrivateScope TargetScope(*this); 4837 VarDecl *BPVD = nullptr; 4838 VarDecl *PVD = nullptr; 4839 VarDecl *SVD = nullptr; 4840 VarDecl *MVD = nullptr; 4841 if (InputInfo.NumberOfTargetItems > 0) { 4842 auto *CD = CapturedDecl::Create( 4843 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4844 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4845 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType( 4846 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4847 /*IndexTypeQuals=*/0); 4848 BPVD = createImplicitFirstprivateForType( 4849 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4850 PVD = createImplicitFirstprivateForType( 4851 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4852 QualType SizesType = getContext().getConstantArrayType( 4853 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4854 ArrSize, nullptr, ArrayType::Normal, 4855 /*IndexTypeQuals=*/0); 4856 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4857 S.getBeginLoc()); 4858 TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray); 4859 TargetScope.addPrivate(PVD, InputInfo.PointersArray); 4860 TargetScope.addPrivate(SVD, InputInfo.SizesArray); 4861 // If there is no user-defined mapper, the mapper array will be nullptr. In 4862 // this case, we don't need to privatize it. 4863 if (!isa_and_nonnull<llvm::ConstantPointerNull>( 4864 InputInfo.MappersArray.getPointer())) { 4865 MVD = createImplicitFirstprivateForType( 4866 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4867 TargetScope.addPrivate(MVD, InputInfo.MappersArray); 4868 } 4869 } 4870 (void)TargetScope.Privatize(); 4871 // Build list of dependences. 4872 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4873 OMPTaskDataTy::DependData &DD = 4874 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4875 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4876 } 4877 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, 4878 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4879 // Set proper addresses for generated private copies. 4880 OMPPrivateScope Scope(CGF); 4881 if (!Data.FirstprivateVars.empty()) { 4882 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4883 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4884 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4885 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4886 CS->getCapturedDecl()->getParam(PrivatesParam))); 4887 // Map privates. 4888 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4889 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4890 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4891 CallArgs.push_back(PrivatesPtr); 4892 ParamTypes.push_back(PrivatesPtr->getType()); 4893 for (const Expr *E : Data.FirstprivateVars) { 4894 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4895 Address PrivatePtr = 4896 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4897 ".firstpriv.ptr.addr"); 4898 PrivatePtrs.emplace_back(VD, PrivatePtr); 4899 CallArgs.push_back(PrivatePtr.getPointer()); 4900 ParamTypes.push_back(PrivatePtr.getType()); 4901 } 4902 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4903 ParamTypes, /*isVarArg=*/false); 4904 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4905 CopyFn, CopyFnTy->getPointerTo()); 4906 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4907 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4908 for (const auto &Pair : PrivatePtrs) { 4909 Address Replacement( 4910 CGF.Builder.CreateLoad(Pair.second), 4911 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()), 4912 CGF.getContext().getDeclAlign(Pair.first)); 4913 Scope.addPrivate(Pair.first, Replacement); 4914 } 4915 } 4916 // Privatize all private variables except for in_reduction items. 4917 (void)Scope.Privatize(); 4918 if (InputInfo.NumberOfTargetItems > 0) { 4919 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4920 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4921 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4922 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4923 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4924 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4925 // If MVD is nullptr, the mapper array is not privatized 4926 if (MVD) 4927 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP( 4928 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0); 4929 } 4930 4931 Action.Enter(CGF); 4932 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4933 BodyGen(CGF); 4934 }; 4935 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4936 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4937 Data.NumberOfParts); 4938 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4939 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4940 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4941 SourceLocation()); 4942 4943 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4944 SharedsTy, CapturedStruct, &IfCond, Data); 4945 } 4946 4947 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4948 // Emit outlined function for task construct. 4949 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4950 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4951 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4952 const Expr *IfCond = nullptr; 4953 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4954 if (C->getNameModifier() == OMPD_unknown || 4955 C->getNameModifier() == OMPD_task) { 4956 IfCond = C->getCondition(); 4957 break; 4958 } 4959 } 4960 4961 OMPTaskDataTy Data; 4962 // Check if we should emit tied or untied task. 4963 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4964 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4965 CGF.EmitStmt(CS->getCapturedStmt()); 4966 }; 4967 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4968 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4969 const OMPTaskDataTy &Data) { 4970 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4971 SharedsTy, CapturedStruct, IfCond, 4972 Data); 4973 }; 4974 auto LPCRegion = 4975 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4976 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4977 } 4978 4979 void CodeGenFunction::EmitOMPTaskyieldDirective( 4980 const OMPTaskyieldDirective &S) { 4981 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4982 } 4983 4984 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4985 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4986 } 4987 4988 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4989 OMPTaskDataTy Data; 4990 // Build list of dependences 4991 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4992 OMPTaskDataTy::DependData &DD = 4993 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4994 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4995 } 4996 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data); 4997 } 4998 4999 void CodeGenFunction::EmitOMPTaskgroupDirective( 5000 const OMPTaskgroupDirective &S) { 5001 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5002 Action.Enter(CGF); 5003 if (const Expr *E = S.getReductionRef()) { 5004 SmallVector<const Expr *, 4> LHSs; 5005 SmallVector<const Expr *, 4> RHSs; 5006 OMPTaskDataTy Data; 5007 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 5008 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 5009 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 5010 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 5011 Data.ReductionOps.append(C->reduction_ops().begin(), 5012 C->reduction_ops().end()); 5013 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 5014 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 5015 } 5016 llvm::Value *ReductionDesc = 5017 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 5018 LHSs, RHSs, Data); 5019 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 5020 CGF.EmitVarDecl(*VD); 5021 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 5022 /*Volatile=*/false, E->getType()); 5023 } 5024 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5025 }; 5026 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5027 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 5028 } 5029 5030 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 5031 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 5032 ? llvm::AtomicOrdering::NotAtomic 5033 : llvm::AtomicOrdering::AcquireRelease; 5034 CGM.getOpenMPRuntime().emitFlush( 5035 *this, 5036 [&S]() -> ArrayRef<const Expr *> { 5037 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 5038 return llvm::makeArrayRef(FlushClause->varlist_begin(), 5039 FlushClause->varlist_end()); 5040 return llvm::None; 5041 }(), 5042 S.getBeginLoc(), AO); 5043 } 5044 5045 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 5046 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 5047 LValue DOLVal = EmitLValue(DO->getDepobj()); 5048 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 5049 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 5050 DC->getModifier()); 5051 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 5052 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 5053 *this, Dependencies, DC->getBeginLoc()); 5054 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 5055 return; 5056 } 5057 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 5058 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 5059 return; 5060 } 5061 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 5062 CGM.getOpenMPRuntime().emitUpdateClause( 5063 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 5064 return; 5065 } 5066 } 5067 5068 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 5069 if (!OMPParentLoopDirectiveForScan) 5070 return; 5071 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 5072 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 5073 SmallVector<const Expr *, 4> Shareds; 5074 SmallVector<const Expr *, 4> Privates; 5075 SmallVector<const Expr *, 4> LHSs; 5076 SmallVector<const Expr *, 4> RHSs; 5077 SmallVector<const Expr *, 4> ReductionOps; 5078 SmallVector<const Expr *, 4> CopyOps; 5079 SmallVector<const Expr *, 4> CopyArrayTemps; 5080 SmallVector<const Expr *, 4> CopyArrayElems; 5081 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 5082 if (C->getModifier() != OMPC_REDUCTION_inscan) 5083 continue; 5084 Shareds.append(C->varlist_begin(), C->varlist_end()); 5085 Privates.append(C->privates().begin(), C->privates().end()); 5086 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 5087 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 5088 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 5089 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 5090 CopyArrayTemps.append(C->copy_array_temps().begin(), 5091 C->copy_array_temps().end()); 5092 CopyArrayElems.append(C->copy_array_elems().begin(), 5093 C->copy_array_elems().end()); 5094 } 5095 if (ParentDir.getDirectiveKind() == OMPD_simd || 5096 (getLangOpts().OpenMPSimd && 5097 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 5098 // For simd directive and simd-based directives in simd only mode, use the 5099 // following codegen: 5100 // int x = 0; 5101 // #pragma omp simd reduction(inscan, +: x) 5102 // for (..) { 5103 // <first part> 5104 // #pragma omp scan inclusive(x) 5105 // <second part> 5106 // } 5107 // is transformed to: 5108 // int x = 0; 5109 // for (..) { 5110 // int x_priv = 0; 5111 // <first part> 5112 // x = x_priv + x; 5113 // x_priv = x; 5114 // <second part> 5115 // } 5116 // and 5117 // int x = 0; 5118 // #pragma omp simd reduction(inscan, +: x) 5119 // for (..) { 5120 // <first part> 5121 // #pragma omp scan exclusive(x) 5122 // <second part> 5123 // } 5124 // to 5125 // int x = 0; 5126 // for (..) { 5127 // int x_priv = 0; 5128 // <second part> 5129 // int temp = x; 5130 // x = x_priv + x; 5131 // x_priv = temp; 5132 // <first part> 5133 // } 5134 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 5135 EmitBranch(IsInclusive 5136 ? OMPScanReduce 5137 : BreakContinueStack.back().ContinueBlock.getBlock()); 5138 EmitBlock(OMPScanDispatch); 5139 { 5140 // New scope for correct construction/destruction of temp variables for 5141 // exclusive scan. 5142 LexicalScope Scope(*this, S.getSourceRange()); 5143 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 5144 EmitBlock(OMPScanReduce); 5145 if (!IsInclusive) { 5146 // Create temp var and copy LHS value to this temp value. 5147 // TMP = LHS; 5148 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5149 const Expr *PrivateExpr = Privates[I]; 5150 const Expr *TempExpr = CopyArrayTemps[I]; 5151 EmitAutoVarDecl( 5152 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 5153 LValue DestLVal = EmitLValue(TempExpr); 5154 LValue SrcLVal = EmitLValue(LHSs[I]); 5155 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5156 SrcLVal.getAddress(*this), 5157 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5158 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5159 CopyOps[I]); 5160 } 5161 } 5162 CGM.getOpenMPRuntime().emitReduction( 5163 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 5164 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 5165 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5166 const Expr *PrivateExpr = Privates[I]; 5167 LValue DestLVal; 5168 LValue SrcLVal; 5169 if (IsInclusive) { 5170 DestLVal = EmitLValue(RHSs[I]); 5171 SrcLVal = EmitLValue(LHSs[I]); 5172 } else { 5173 const Expr *TempExpr = CopyArrayTemps[I]; 5174 DestLVal = EmitLValue(RHSs[I]); 5175 SrcLVal = EmitLValue(TempExpr); 5176 } 5177 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5178 SrcLVal.getAddress(*this), 5179 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5180 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5181 CopyOps[I]); 5182 } 5183 } 5184 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 5185 OMPScanExitBlock = IsInclusive 5186 ? BreakContinueStack.back().ContinueBlock.getBlock() 5187 : OMPScanReduce; 5188 EmitBlock(OMPAfterScanBlock); 5189 return; 5190 } 5191 if (!IsInclusive) { 5192 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 5193 EmitBlock(OMPScanExitBlock); 5194 } 5195 if (OMPFirstScanLoop) { 5196 // Emit buffer[i] = red; at the end of the input phase. 5197 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 5198 .getIterationVariable() 5199 ->IgnoreParenImpCasts(); 5200 LValue IdxLVal = EmitLValue(IVExpr); 5201 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 5202 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 5203 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5204 const Expr *PrivateExpr = Privates[I]; 5205 const Expr *OrigExpr = Shareds[I]; 5206 const Expr *CopyArrayElem = CopyArrayElems[I]; 5207 OpaqueValueMapping IdxMapping( 5208 *this, 5209 cast<OpaqueValueExpr>( 5210 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 5211 RValue::get(IdxVal)); 5212 LValue DestLVal = EmitLValue(CopyArrayElem); 5213 LValue SrcLVal = EmitLValue(OrigExpr); 5214 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5215 SrcLVal.getAddress(*this), 5216 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5217 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5218 CopyOps[I]); 5219 } 5220 } 5221 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 5222 if (IsInclusive) { 5223 EmitBlock(OMPScanExitBlock); 5224 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 5225 } 5226 EmitBlock(OMPScanDispatch); 5227 if (!OMPFirstScanLoop) { 5228 // Emit red = buffer[i]; at the entrance to the scan phase. 5229 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 5230 .getIterationVariable() 5231 ->IgnoreParenImpCasts(); 5232 LValue IdxLVal = EmitLValue(IVExpr); 5233 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 5234 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 5235 llvm::BasicBlock *ExclusiveExitBB = nullptr; 5236 if (!IsInclusive) { 5237 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 5238 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 5239 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 5240 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 5241 EmitBlock(ContBB); 5242 // Use idx - 1 iteration for exclusive scan. 5243 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 5244 } 5245 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 5246 const Expr *PrivateExpr = Privates[I]; 5247 const Expr *OrigExpr = Shareds[I]; 5248 const Expr *CopyArrayElem = CopyArrayElems[I]; 5249 OpaqueValueMapping IdxMapping( 5250 *this, 5251 cast<OpaqueValueExpr>( 5252 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 5253 RValue::get(IdxVal)); 5254 LValue SrcLVal = EmitLValue(CopyArrayElem); 5255 LValue DestLVal = EmitLValue(OrigExpr); 5256 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 5257 SrcLVal.getAddress(*this), 5258 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 5259 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 5260 CopyOps[I]); 5261 } 5262 if (!IsInclusive) { 5263 EmitBlock(ExclusiveExitBB); 5264 } 5265 } 5266 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 5267 : OMPAfterScanBlock); 5268 EmitBlock(OMPAfterScanBlock); 5269 } 5270 5271 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 5272 const CodeGenLoopTy &CodeGenLoop, 5273 Expr *IncExpr) { 5274 // Emit the loop iteration variable. 5275 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 5276 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 5277 EmitVarDecl(*IVDecl); 5278 5279 // Emit the iterations count variable. 5280 // If it is not a variable, Sema decided to calculate iterations count on each 5281 // iteration (e.g., it is foldable into a constant). 5282 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 5283 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 5284 // Emit calculation of the iterations count. 5285 EmitIgnoredExpr(S.getCalcLastIteration()); 5286 } 5287 5288 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 5289 5290 bool HasLastprivateClause = false; 5291 // Check pre-condition. 5292 { 5293 OMPLoopScope PreInitScope(*this, S); 5294 // Skip the entire loop if we don't meet the precondition. 5295 // If the condition constant folds and can be elided, avoid emitting the 5296 // whole loop. 5297 bool CondConstant; 5298 llvm::BasicBlock *ContBlock = nullptr; 5299 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 5300 if (!CondConstant) 5301 return; 5302 } else { 5303 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 5304 ContBlock = createBasicBlock("omp.precond.end"); 5305 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 5306 getProfileCount(&S)); 5307 EmitBlock(ThenBlock); 5308 incrementProfileCounter(&S); 5309 } 5310 5311 emitAlignedClause(*this, S); 5312 // Emit 'then' code. 5313 { 5314 // Emit helper vars inits. 5315 5316 LValue LB = EmitOMPHelperVar( 5317 *this, cast<DeclRefExpr>( 5318 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5319 ? S.getCombinedLowerBoundVariable() 5320 : S.getLowerBoundVariable()))); 5321 LValue UB = EmitOMPHelperVar( 5322 *this, cast<DeclRefExpr>( 5323 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5324 ? S.getCombinedUpperBoundVariable() 5325 : S.getUpperBoundVariable()))); 5326 LValue ST = 5327 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 5328 LValue IL = 5329 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 5330 5331 OMPPrivateScope LoopScope(*this); 5332 if (EmitOMPFirstprivateClause(S, LoopScope)) { 5333 // Emit implicit barrier to synchronize threads and avoid data races 5334 // on initialization of firstprivate variables and post-update of 5335 // lastprivate variables. 5336 CGM.getOpenMPRuntime().emitBarrierCall( 5337 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 5338 /*ForceSimpleCall=*/true); 5339 } 5340 EmitOMPPrivateClause(S, LoopScope); 5341 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5342 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5343 !isOpenMPTeamsDirective(S.getDirectiveKind())) 5344 EmitOMPReductionClauseInit(S, LoopScope); 5345 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 5346 EmitOMPPrivateLoopCounters(S, LoopScope); 5347 (void)LoopScope.Privatize(); 5348 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5349 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 5350 5351 // Detect the distribute schedule kind and chunk. 5352 llvm::Value *Chunk = nullptr; 5353 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 5354 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 5355 ScheduleKind = C->getDistScheduleKind(); 5356 if (const Expr *Ch = C->getChunkSize()) { 5357 Chunk = EmitScalarExpr(Ch); 5358 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 5359 S.getIterationVariable()->getType(), 5360 S.getBeginLoc()); 5361 } 5362 } else { 5363 // Default behaviour for dist_schedule clause. 5364 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 5365 *this, S, ScheduleKind, Chunk); 5366 } 5367 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 5368 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 5369 5370 // OpenMP [2.10.8, distribute Construct, Description] 5371 // If dist_schedule is specified, kind must be static. If specified, 5372 // iterations are divided into chunks of size chunk_size, chunks are 5373 // assigned to the teams of the league in a round-robin fashion in the 5374 // order of the team number. When no chunk_size is specified, the 5375 // iteration space is divided into chunks that are approximately equal 5376 // in size, and at most one chunk is distributed to each team of the 5377 // league. The size of the chunks is unspecified in this case. 5378 bool StaticChunked = 5379 RT.isStaticChunked(ScheduleKind, /* Chunked */ Chunk != nullptr) && 5380 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 5381 if (RT.isStaticNonchunked(ScheduleKind, 5382 /* Chunked */ Chunk != nullptr) || 5383 StaticChunked) { 5384 CGOpenMPRuntime::StaticRTInput StaticInit( 5385 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 5386 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5387 StaticChunked ? Chunk : nullptr); 5388 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 5389 StaticInit); 5390 JumpDest LoopExit = 5391 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 5392 // UB = min(UB, GlobalUB); 5393 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5394 ? S.getCombinedEnsureUpperBound() 5395 : S.getEnsureUpperBound()); 5396 // IV = LB; 5397 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5398 ? S.getCombinedInit() 5399 : S.getInit()); 5400 5401 const Expr *Cond = 5402 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5403 ? S.getCombinedCond() 5404 : S.getCond(); 5405 5406 if (StaticChunked) 5407 Cond = S.getCombinedDistCond(); 5408 5409 // For static unchunked schedules generate: 5410 // 5411 // 1. For distribute alone, codegen 5412 // while (idx <= UB) { 5413 // BODY; 5414 // ++idx; 5415 // } 5416 // 5417 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 5418 // while (idx <= UB) { 5419 // <CodeGen rest of pragma>(LB, UB); 5420 // idx += ST; 5421 // } 5422 // 5423 // For static chunk one schedule generate: 5424 // 5425 // while (IV <= GlobalUB) { 5426 // <CodeGen rest of pragma>(LB, UB); 5427 // LB += ST; 5428 // UB += ST; 5429 // UB = min(UB, GlobalUB); 5430 // IV = LB; 5431 // } 5432 // 5433 emitCommonSimdLoop( 5434 *this, S, 5435 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5436 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5437 CGF.EmitOMPSimdInit(S); 5438 }, 5439 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 5440 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 5441 CGF.EmitOMPInnerLoop( 5442 S, LoopScope.requiresCleanups(), Cond, IncExpr, 5443 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 5444 CodeGenLoop(CGF, S, LoopExit); 5445 }, 5446 [&S, StaticChunked](CodeGenFunction &CGF) { 5447 if (StaticChunked) { 5448 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 5449 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 5450 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 5451 CGF.EmitIgnoredExpr(S.getCombinedInit()); 5452 } 5453 }); 5454 }); 5455 EmitBlock(LoopExit.getBlock()); 5456 // Tell the runtime we are done. 5457 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 5458 } else { 5459 // Emit the outer loop, which requests its work chunk [LB..UB] from 5460 // runtime and runs the inner loop to process it. 5461 const OMPLoopArguments LoopArguments = { 5462 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5463 IL.getAddress(*this), Chunk}; 5464 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 5465 CodeGenLoop); 5466 } 5467 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 5468 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 5469 return CGF.Builder.CreateIsNotNull( 5470 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5471 }); 5472 } 5473 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5474 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5475 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 5476 EmitOMPReductionClauseFinal(S, OMPD_simd); 5477 // Emit post-update of the reduction variables if IsLastIter != 0. 5478 emitPostUpdateForReductionClause( 5479 *this, S, [IL, &S](CodeGenFunction &CGF) { 5480 return CGF.Builder.CreateIsNotNull( 5481 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5482 }); 5483 } 5484 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5485 if (HasLastprivateClause) { 5486 EmitOMPLastprivateClauseFinal( 5487 S, /*NoFinals=*/false, 5488 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 5489 } 5490 } 5491 5492 // We're now done with the loop, so jump to the continuation block. 5493 if (ContBlock) { 5494 EmitBranch(ContBlock); 5495 EmitBlock(ContBlock, true); 5496 } 5497 } 5498 } 5499 5500 void CodeGenFunction::EmitOMPDistributeDirective( 5501 const OMPDistributeDirective &S) { 5502 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5503 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5504 }; 5505 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5506 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 5507 } 5508 5509 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 5510 const CapturedStmt *S, 5511 SourceLocation Loc) { 5512 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 5513 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 5514 CGF.CapturedStmtInfo = &CapStmtInfo; 5515 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 5516 Fn->setDoesNotRecurse(); 5517 return Fn; 5518 } 5519 5520 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 5521 if (CGM.getLangOpts().OpenMPIRBuilder) { 5522 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 5523 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 5524 5525 if (S.hasClausesOfKind<OMPDependClause>()) { 5526 // The ordered directive with depend clause. 5527 assert(!S.hasAssociatedStmt() && 5528 "No associated statement must be in ordered depend construct."); 5529 InsertPointTy AllocaIP(AllocaInsertPt->getParent(), 5530 AllocaInsertPt->getIterator()); 5531 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) { 5532 unsigned NumLoops = DC->getNumLoops(); 5533 QualType Int64Ty = CGM.getContext().getIntTypeForBitwidth( 5534 /*DestWidth=*/64, /*Signed=*/1); 5535 llvm::SmallVector<llvm::Value *> StoreValues; 5536 for (unsigned I = 0; I < NumLoops; I++) { 5537 const Expr *CounterVal = DC->getLoopData(I); 5538 assert(CounterVal); 5539 llvm::Value *StoreValue = EmitScalarConversion( 5540 EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty, 5541 CounterVal->getExprLoc()); 5542 StoreValues.emplace_back(StoreValue); 5543 } 5544 bool IsDependSource = false; 5545 if (DC->getDependencyKind() == OMPC_DEPEND_source) 5546 IsDependSource = true; 5547 Builder.restoreIP(OMPBuilder.createOrderedDepend( 5548 Builder, AllocaIP, NumLoops, StoreValues, ".cnt.addr", 5549 IsDependSource)); 5550 } 5551 } else { 5552 // The ordered directive with threads or simd clause, or without clause. 5553 // Without clause, it behaves as if the threads clause is specified. 5554 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5555 5556 auto FiniCB = [this](InsertPointTy IP) { 5557 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 5558 }; 5559 5560 auto BodyGenCB = [&S, C, this](InsertPointTy AllocaIP, 5561 InsertPointTy CodeGenIP, 5562 llvm::BasicBlock &FiniBB) { 5563 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5564 if (C) { 5565 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5566 GenerateOpenMPCapturedVars(*CS, CapturedVars); 5567 llvm::Function *OutlinedFn = 5568 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5569 assert(S.getBeginLoc().isValid() && 5570 "Outlined function call location must be valid."); 5571 ApplyDebugLocation::CreateDefaultArtificial(*this, S.getBeginLoc()); 5572 OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, FiniBB, 5573 OutlinedFn, CapturedVars); 5574 } else { 5575 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, 5576 FiniBB); 5577 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CS->getCapturedStmt(), 5578 CodeGenIP, FiniBB); 5579 } 5580 }; 5581 5582 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5583 Builder.restoreIP( 5584 OMPBuilder.createOrderedThreadsSimd(Builder, BodyGenCB, FiniCB, !C)); 5585 } 5586 return; 5587 } 5588 5589 if (S.hasClausesOfKind<OMPDependClause>()) { 5590 assert(!S.hasAssociatedStmt() && 5591 "No associated statement must be in ordered depend construct."); 5592 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 5593 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 5594 return; 5595 } 5596 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5597 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 5598 PrePostActionTy &Action) { 5599 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5600 if (C) { 5601 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5602 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5603 llvm::Function *OutlinedFn = 5604 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5605 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 5606 OutlinedFn, CapturedVars); 5607 } else { 5608 Action.Enter(CGF); 5609 CGF.EmitStmt(CS->getCapturedStmt()); 5610 } 5611 }; 5612 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5613 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 5614 } 5615 5616 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 5617 QualType SrcType, QualType DestType, 5618 SourceLocation Loc) { 5619 assert(CGF.hasScalarEvaluationKind(DestType) && 5620 "DestType must have scalar evaluation kind."); 5621 assert(!Val.isAggregate() && "Must be a scalar or complex."); 5622 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 5623 DestType, Loc) 5624 : CGF.EmitComplexToScalarConversion( 5625 Val.getComplexVal(), SrcType, DestType, Loc); 5626 } 5627 5628 static CodeGenFunction::ComplexPairTy 5629 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 5630 QualType DestType, SourceLocation Loc) { 5631 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 5632 "DestType must have complex evaluation kind."); 5633 CodeGenFunction::ComplexPairTy ComplexVal; 5634 if (Val.isScalar()) { 5635 // Convert the input element to the element type of the complex. 5636 QualType DestElementType = 5637 DestType->castAs<ComplexType>()->getElementType(); 5638 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 5639 Val.getScalarVal(), SrcType, DestElementType, Loc); 5640 ComplexVal = CodeGenFunction::ComplexPairTy( 5641 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 5642 } else { 5643 assert(Val.isComplex() && "Must be a scalar or complex."); 5644 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 5645 QualType DestElementType = 5646 DestType->castAs<ComplexType>()->getElementType(); 5647 ComplexVal.first = CGF.EmitScalarConversion( 5648 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 5649 ComplexVal.second = CGF.EmitScalarConversion( 5650 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 5651 } 5652 return ComplexVal; 5653 } 5654 5655 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5656 LValue LVal, RValue RVal) { 5657 if (LVal.isGlobalReg()) 5658 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 5659 else 5660 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 5661 } 5662 5663 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 5664 llvm::AtomicOrdering AO, LValue LVal, 5665 SourceLocation Loc) { 5666 if (LVal.isGlobalReg()) 5667 return CGF.EmitLoadOfLValue(LVal, Loc); 5668 return CGF.EmitAtomicLoad( 5669 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 5670 LVal.isVolatile()); 5671 } 5672 5673 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 5674 QualType RValTy, SourceLocation Loc) { 5675 switch (getEvaluationKind(LVal.getType())) { 5676 case TEK_Scalar: 5677 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 5678 *this, RVal, RValTy, LVal.getType(), Loc)), 5679 LVal); 5680 break; 5681 case TEK_Complex: 5682 EmitStoreOfComplex( 5683 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 5684 /*isInit=*/false); 5685 break; 5686 case TEK_Aggregate: 5687 llvm_unreachable("Must be a scalar or complex."); 5688 } 5689 } 5690 5691 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5692 const Expr *X, const Expr *V, 5693 SourceLocation Loc) { 5694 // v = x; 5695 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 5696 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 5697 LValue XLValue = CGF.EmitLValue(X); 5698 LValue VLValue = CGF.EmitLValue(V); 5699 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 5700 // OpenMP, 2.17.7, atomic Construct 5701 // If the read or capture clause is specified and the acquire, acq_rel, or 5702 // seq_cst clause is specified then the strong flush on exit from the atomic 5703 // operation is also an acquire flush. 5704 switch (AO) { 5705 case llvm::AtomicOrdering::Acquire: 5706 case llvm::AtomicOrdering::AcquireRelease: 5707 case llvm::AtomicOrdering::SequentiallyConsistent: 5708 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5709 llvm::AtomicOrdering::Acquire); 5710 break; 5711 case llvm::AtomicOrdering::Monotonic: 5712 case llvm::AtomicOrdering::Release: 5713 break; 5714 case llvm::AtomicOrdering::NotAtomic: 5715 case llvm::AtomicOrdering::Unordered: 5716 llvm_unreachable("Unexpected ordering."); 5717 } 5718 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 5719 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5720 } 5721 5722 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 5723 llvm::AtomicOrdering AO, const Expr *X, 5724 const Expr *E, SourceLocation Loc) { 5725 // x = expr; 5726 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 5727 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 5728 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5729 // OpenMP, 2.17.7, atomic Construct 5730 // If the write, update, or capture clause is specified and the release, 5731 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5732 // the atomic operation is also a release flush. 5733 switch (AO) { 5734 case llvm::AtomicOrdering::Release: 5735 case llvm::AtomicOrdering::AcquireRelease: 5736 case llvm::AtomicOrdering::SequentiallyConsistent: 5737 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5738 llvm::AtomicOrdering::Release); 5739 break; 5740 case llvm::AtomicOrdering::Acquire: 5741 case llvm::AtomicOrdering::Monotonic: 5742 break; 5743 case llvm::AtomicOrdering::NotAtomic: 5744 case llvm::AtomicOrdering::Unordered: 5745 llvm_unreachable("Unexpected ordering."); 5746 } 5747 } 5748 5749 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 5750 RValue Update, 5751 BinaryOperatorKind BO, 5752 llvm::AtomicOrdering AO, 5753 bool IsXLHSInRHSPart) { 5754 ASTContext &Context = CGF.getContext(); 5755 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 5756 // expression is simple and atomic is allowed for the given type for the 5757 // target platform. 5758 if (BO == BO_Comma || !Update.isScalar() || 5759 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 5760 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 5761 (Update.getScalarVal()->getType() != 5762 X.getAddress(CGF).getElementType())) || 5763 !X.getAddress(CGF).getElementType()->isIntegerTy() || 5764 !Context.getTargetInfo().hasBuiltinAtomic( 5765 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 5766 return std::make_pair(false, RValue::get(nullptr)); 5767 5768 llvm::AtomicRMWInst::BinOp RMWOp; 5769 switch (BO) { 5770 case BO_Add: 5771 RMWOp = llvm::AtomicRMWInst::Add; 5772 break; 5773 case BO_Sub: 5774 if (!IsXLHSInRHSPart) 5775 return std::make_pair(false, RValue::get(nullptr)); 5776 RMWOp = llvm::AtomicRMWInst::Sub; 5777 break; 5778 case BO_And: 5779 RMWOp = llvm::AtomicRMWInst::And; 5780 break; 5781 case BO_Or: 5782 RMWOp = llvm::AtomicRMWInst::Or; 5783 break; 5784 case BO_Xor: 5785 RMWOp = llvm::AtomicRMWInst::Xor; 5786 break; 5787 case BO_LT: 5788 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5789 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 5790 : llvm::AtomicRMWInst::Max) 5791 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 5792 : llvm::AtomicRMWInst::UMax); 5793 break; 5794 case BO_GT: 5795 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5796 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 5797 : llvm::AtomicRMWInst::Min) 5798 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 5799 : llvm::AtomicRMWInst::UMin); 5800 break; 5801 case BO_Assign: 5802 RMWOp = llvm::AtomicRMWInst::Xchg; 5803 break; 5804 case BO_Mul: 5805 case BO_Div: 5806 case BO_Rem: 5807 case BO_Shl: 5808 case BO_Shr: 5809 case BO_LAnd: 5810 case BO_LOr: 5811 return std::make_pair(false, RValue::get(nullptr)); 5812 case BO_PtrMemD: 5813 case BO_PtrMemI: 5814 case BO_LE: 5815 case BO_GE: 5816 case BO_EQ: 5817 case BO_NE: 5818 case BO_Cmp: 5819 case BO_AddAssign: 5820 case BO_SubAssign: 5821 case BO_AndAssign: 5822 case BO_OrAssign: 5823 case BO_XorAssign: 5824 case BO_MulAssign: 5825 case BO_DivAssign: 5826 case BO_RemAssign: 5827 case BO_ShlAssign: 5828 case BO_ShrAssign: 5829 case BO_Comma: 5830 llvm_unreachable("Unsupported atomic update operation"); 5831 } 5832 llvm::Value *UpdateVal = Update.getScalarVal(); 5833 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 5834 UpdateVal = CGF.Builder.CreateIntCast( 5835 IC, X.getAddress(CGF).getElementType(), 5836 X.getType()->hasSignedIntegerRepresentation()); 5837 } 5838 llvm::Value *Res = 5839 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 5840 return std::make_pair(true, RValue::get(Res)); 5841 } 5842 5843 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 5844 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 5845 llvm::AtomicOrdering AO, SourceLocation Loc, 5846 const llvm::function_ref<RValue(RValue)> CommonGen) { 5847 // Update expressions are allowed to have the following forms: 5848 // x binop= expr; -> xrval + expr; 5849 // x++, ++x -> xrval + 1; 5850 // x--, --x -> xrval - 1; 5851 // x = x binop expr; -> xrval binop expr 5852 // x = expr Op x; - > expr binop xrval; 5853 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 5854 if (!Res.first) { 5855 if (X.isGlobalReg()) { 5856 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 5857 // 'xrval'. 5858 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 5859 } else { 5860 // Perform compare-and-swap procedure. 5861 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 5862 } 5863 } 5864 return Res; 5865 } 5866 5867 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 5868 llvm::AtomicOrdering AO, const Expr *X, 5869 const Expr *E, const Expr *UE, 5870 bool IsXLHSInRHSPart, SourceLocation Loc) { 5871 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5872 "Update expr in 'atomic update' must be a binary operator."); 5873 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5874 // Update expressions are allowed to have the following forms: 5875 // x binop= expr; -> xrval + expr; 5876 // x++, ++x -> xrval + 1; 5877 // x--, --x -> xrval - 1; 5878 // x = x binop expr; -> xrval binop expr 5879 // x = expr Op x; - > expr binop xrval; 5880 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 5881 LValue XLValue = CGF.EmitLValue(X); 5882 RValue ExprRValue = CGF.EmitAnyExpr(E); 5883 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5884 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5885 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5886 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5887 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 5888 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5889 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5890 return CGF.EmitAnyExpr(UE); 5891 }; 5892 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 5893 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5894 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5895 // OpenMP, 2.17.7, atomic Construct 5896 // If the write, update, or capture clause is specified and the release, 5897 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5898 // the atomic operation is also a release flush. 5899 switch (AO) { 5900 case llvm::AtomicOrdering::Release: 5901 case llvm::AtomicOrdering::AcquireRelease: 5902 case llvm::AtomicOrdering::SequentiallyConsistent: 5903 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5904 llvm::AtomicOrdering::Release); 5905 break; 5906 case llvm::AtomicOrdering::Acquire: 5907 case llvm::AtomicOrdering::Monotonic: 5908 break; 5909 case llvm::AtomicOrdering::NotAtomic: 5910 case llvm::AtomicOrdering::Unordered: 5911 llvm_unreachable("Unexpected ordering."); 5912 } 5913 } 5914 5915 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5916 QualType SourceType, QualType ResType, 5917 SourceLocation Loc) { 5918 switch (CGF.getEvaluationKind(ResType)) { 5919 case TEK_Scalar: 5920 return RValue::get( 5921 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5922 case TEK_Complex: { 5923 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5924 return RValue::getComplex(Res.first, Res.second); 5925 } 5926 case TEK_Aggregate: 5927 break; 5928 } 5929 llvm_unreachable("Must be a scalar or complex."); 5930 } 5931 5932 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5933 llvm::AtomicOrdering AO, 5934 bool IsPostfixUpdate, const Expr *V, 5935 const Expr *X, const Expr *E, 5936 const Expr *UE, bool IsXLHSInRHSPart, 5937 SourceLocation Loc) { 5938 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5939 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5940 RValue NewVVal; 5941 LValue VLValue = CGF.EmitLValue(V); 5942 LValue XLValue = CGF.EmitLValue(X); 5943 RValue ExprRValue = CGF.EmitAnyExpr(E); 5944 QualType NewVValType; 5945 if (UE) { 5946 // 'x' is updated with some additional value. 5947 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5948 "Update expr in 'atomic capture' must be a binary operator."); 5949 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5950 // Update expressions are allowed to have the following forms: 5951 // x binop= expr; -> xrval + expr; 5952 // x++, ++x -> xrval + 1; 5953 // x--, --x -> xrval - 1; 5954 // x = x binop expr; -> xrval binop expr 5955 // x = expr Op x; - > expr binop xrval; 5956 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5957 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5958 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5959 NewVValType = XRValExpr->getType(); 5960 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5961 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5962 IsPostfixUpdate](RValue XRValue) { 5963 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5964 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5965 RValue Res = CGF.EmitAnyExpr(UE); 5966 NewVVal = IsPostfixUpdate ? XRValue : Res; 5967 return Res; 5968 }; 5969 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5970 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5971 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5972 if (Res.first) { 5973 // 'atomicrmw' instruction was generated. 5974 if (IsPostfixUpdate) { 5975 // Use old value from 'atomicrmw'. 5976 NewVVal = Res.second; 5977 } else { 5978 // 'atomicrmw' does not provide new value, so evaluate it using old 5979 // value of 'x'. 5980 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5981 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5982 NewVVal = CGF.EmitAnyExpr(UE); 5983 } 5984 } 5985 } else { 5986 // 'x' is simply rewritten with some 'expr'. 5987 NewVValType = X->getType().getNonReferenceType(); 5988 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5989 X->getType().getNonReferenceType(), Loc); 5990 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5991 NewVVal = XRValue; 5992 return ExprRValue; 5993 }; 5994 // Try to perform atomicrmw xchg, otherwise simple exchange. 5995 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5996 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5997 Loc, Gen); 5998 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5999 if (Res.first) { 6000 // 'atomicrmw' instruction was generated. 6001 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 6002 } 6003 } 6004 // Emit post-update store to 'v' of old/new 'x' value. 6005 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 6006 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 6007 // OpenMP 5.1 removes the required flush for capture clause. 6008 if (CGF.CGM.getLangOpts().OpenMP < 51) { 6009 // OpenMP, 2.17.7, atomic Construct 6010 // If the write, update, or capture clause is specified and the release, 6011 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 6012 // the atomic operation is also a release flush. 6013 // If the read or capture clause is specified and the acquire, acq_rel, or 6014 // seq_cst clause is specified then the strong flush on exit from the atomic 6015 // operation is also an acquire flush. 6016 switch (AO) { 6017 case llvm::AtomicOrdering::Release: 6018 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 6019 llvm::AtomicOrdering::Release); 6020 break; 6021 case llvm::AtomicOrdering::Acquire: 6022 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 6023 llvm::AtomicOrdering::Acquire); 6024 break; 6025 case llvm::AtomicOrdering::AcquireRelease: 6026 case llvm::AtomicOrdering::SequentiallyConsistent: 6027 CGF.CGM.getOpenMPRuntime().emitFlush( 6028 CGF, llvm::None, Loc, llvm::AtomicOrdering::AcquireRelease); 6029 break; 6030 case llvm::AtomicOrdering::Monotonic: 6031 break; 6032 case llvm::AtomicOrdering::NotAtomic: 6033 case llvm::AtomicOrdering::Unordered: 6034 llvm_unreachable("Unexpected ordering."); 6035 } 6036 } 6037 } 6038 6039 static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF, 6040 llvm::AtomicOrdering AO, const Expr *X, 6041 const Expr *E, const Expr *D, 6042 const Expr *CE, bool IsXBinopExpr, 6043 SourceLocation Loc) { 6044 llvm::OpenMPIRBuilder &OMPBuilder = 6045 CGF.CGM.getOpenMPRuntime().getOMPBuilder(); 6046 6047 OMPAtomicCompareOp Op; 6048 assert(isa<BinaryOperator>(CE) && "CE is not a BinaryOperator"); 6049 switch (cast<BinaryOperator>(CE)->getOpcode()) { 6050 case BO_EQ: 6051 Op = OMPAtomicCompareOp::EQ; 6052 break; 6053 case BO_LT: 6054 Op = OMPAtomicCompareOp::MIN; 6055 break; 6056 case BO_GT: 6057 Op = OMPAtomicCompareOp::MAX; 6058 break; 6059 default: 6060 llvm_unreachable("unsupported atomic compare binary operator"); 6061 } 6062 6063 LValue XLVal = CGF.EmitLValue(X); 6064 Address XAddr = XLVal.getAddress(CGF); 6065 llvm::Value *EVal = CGF.EmitScalarExpr(E); 6066 llvm::Value *DVal = D ? CGF.EmitScalarExpr(D) : nullptr; 6067 6068 llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{ 6069 XAddr.getPointer(), XAddr.getElementType(), 6070 X->getType().isVolatileQualified(), 6071 X->getType()->hasSignedIntegerRepresentation()}; 6072 6073 CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare( 6074 CGF.Builder, XOpVal, EVal, DVal, AO, Op, IsXBinopExpr)); 6075 } 6076 6077 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 6078 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 6079 const Expr *X, const Expr *V, const Expr *E, 6080 const Expr *UE, const Expr *D, const Expr *CE, 6081 bool IsXLHSInRHSPart, bool IsCompareCapture, 6082 SourceLocation Loc) { 6083 switch (Kind) { 6084 case OMPC_read: 6085 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 6086 break; 6087 case OMPC_write: 6088 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 6089 break; 6090 case OMPC_unknown: 6091 case OMPC_update: 6092 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 6093 break; 6094 case OMPC_capture: 6095 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 6096 IsXLHSInRHSPart, Loc); 6097 break; 6098 case OMPC_compare: { 6099 if (IsCompareCapture) { 6100 // Emit an error here. 6101 unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID( 6102 DiagnosticsEngine::Error, 6103 "'atomic compare capture' is not supported for now"); 6104 CGF.CGM.getDiags().Report(DiagID); 6105 } else { 6106 emitOMPAtomicCompareExpr(CGF, AO, X, E, D, CE, IsXLHSInRHSPart, Loc); 6107 } 6108 break; 6109 } 6110 case OMPC_if: 6111 case OMPC_final: 6112 case OMPC_num_threads: 6113 case OMPC_private: 6114 case OMPC_firstprivate: 6115 case OMPC_lastprivate: 6116 case OMPC_reduction: 6117 case OMPC_task_reduction: 6118 case OMPC_in_reduction: 6119 case OMPC_safelen: 6120 case OMPC_simdlen: 6121 case OMPC_sizes: 6122 case OMPC_full: 6123 case OMPC_partial: 6124 case OMPC_allocator: 6125 case OMPC_allocate: 6126 case OMPC_collapse: 6127 case OMPC_default: 6128 case OMPC_seq_cst: 6129 case OMPC_acq_rel: 6130 case OMPC_acquire: 6131 case OMPC_release: 6132 case OMPC_relaxed: 6133 case OMPC_shared: 6134 case OMPC_linear: 6135 case OMPC_aligned: 6136 case OMPC_copyin: 6137 case OMPC_copyprivate: 6138 case OMPC_flush: 6139 case OMPC_depobj: 6140 case OMPC_proc_bind: 6141 case OMPC_schedule: 6142 case OMPC_ordered: 6143 case OMPC_nowait: 6144 case OMPC_untied: 6145 case OMPC_threadprivate: 6146 case OMPC_depend: 6147 case OMPC_mergeable: 6148 case OMPC_device: 6149 case OMPC_threads: 6150 case OMPC_simd: 6151 case OMPC_map: 6152 case OMPC_num_teams: 6153 case OMPC_thread_limit: 6154 case OMPC_priority: 6155 case OMPC_grainsize: 6156 case OMPC_nogroup: 6157 case OMPC_num_tasks: 6158 case OMPC_hint: 6159 case OMPC_dist_schedule: 6160 case OMPC_defaultmap: 6161 case OMPC_uniform: 6162 case OMPC_to: 6163 case OMPC_from: 6164 case OMPC_use_device_ptr: 6165 case OMPC_use_device_addr: 6166 case OMPC_is_device_ptr: 6167 case OMPC_unified_address: 6168 case OMPC_unified_shared_memory: 6169 case OMPC_reverse_offload: 6170 case OMPC_dynamic_allocators: 6171 case OMPC_atomic_default_mem_order: 6172 case OMPC_device_type: 6173 case OMPC_match: 6174 case OMPC_nontemporal: 6175 case OMPC_order: 6176 case OMPC_destroy: 6177 case OMPC_detach: 6178 case OMPC_inclusive: 6179 case OMPC_exclusive: 6180 case OMPC_uses_allocators: 6181 case OMPC_affinity: 6182 case OMPC_init: 6183 case OMPC_inbranch: 6184 case OMPC_notinbranch: 6185 case OMPC_link: 6186 case OMPC_indirect: 6187 case OMPC_use: 6188 case OMPC_novariants: 6189 case OMPC_nocontext: 6190 case OMPC_filter: 6191 case OMPC_when: 6192 case OMPC_adjust_args: 6193 case OMPC_append_args: 6194 case OMPC_memory_order: 6195 case OMPC_bind: 6196 case OMPC_align: 6197 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 6198 } 6199 } 6200 6201 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 6202 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 6203 bool MemOrderingSpecified = false; 6204 if (S.getSingleClause<OMPSeqCstClause>()) { 6205 AO = llvm::AtomicOrdering::SequentiallyConsistent; 6206 MemOrderingSpecified = true; 6207 } else if (S.getSingleClause<OMPAcqRelClause>()) { 6208 AO = llvm::AtomicOrdering::AcquireRelease; 6209 MemOrderingSpecified = true; 6210 } else if (S.getSingleClause<OMPAcquireClause>()) { 6211 AO = llvm::AtomicOrdering::Acquire; 6212 MemOrderingSpecified = true; 6213 } else if (S.getSingleClause<OMPReleaseClause>()) { 6214 AO = llvm::AtomicOrdering::Release; 6215 MemOrderingSpecified = true; 6216 } else if (S.getSingleClause<OMPRelaxedClause>()) { 6217 AO = llvm::AtomicOrdering::Monotonic; 6218 MemOrderingSpecified = true; 6219 } 6220 llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered; 6221 OpenMPClauseKind Kind = OMPC_unknown; 6222 for (const OMPClause *C : S.clauses()) { 6223 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 6224 // if it is first). 6225 OpenMPClauseKind K = C->getClauseKind(); 6226 if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire || 6227 K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint) 6228 continue; 6229 Kind = K; 6230 KindsEncountered.insert(K); 6231 } 6232 bool IsCompareCapture = false; 6233 if (KindsEncountered.contains(OMPC_compare) && 6234 KindsEncountered.contains(OMPC_capture)) { 6235 IsCompareCapture = true; 6236 Kind = OMPC_compare; 6237 } 6238 if (!MemOrderingSpecified) { 6239 llvm::AtomicOrdering DefaultOrder = 6240 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 6241 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 6242 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 6243 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 6244 Kind == OMPC_capture)) { 6245 AO = DefaultOrder; 6246 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 6247 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 6248 AO = llvm::AtomicOrdering::Release; 6249 } else if (Kind == OMPC_read) { 6250 assert(Kind == OMPC_read && "Unexpected atomic kind."); 6251 AO = llvm::AtomicOrdering::Acquire; 6252 } 6253 } 6254 } 6255 6256 LexicalScope Scope(*this, S.getSourceRange()); 6257 EmitStopPoint(S.getAssociatedStmt()); 6258 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 6259 S.getExpr(), S.getUpdateExpr(), S.getD(), S.getCondExpr(), 6260 S.isXLHSInRHSPart(), IsCompareCapture, S.getBeginLoc()); 6261 } 6262 6263 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 6264 const OMPExecutableDirective &S, 6265 const RegionCodeGenTy &CodeGen) { 6266 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 6267 CodeGenModule &CGM = CGF.CGM; 6268 6269 // On device emit this construct as inlined code. 6270 if (CGM.getLangOpts().OpenMPIsDevice) { 6271 OMPLexicalScope Scope(CGF, S, OMPD_target); 6272 CGM.getOpenMPRuntime().emitInlinedDirective( 6273 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6274 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 6275 }); 6276 return; 6277 } 6278 6279 auto LPCRegion = CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 6280 llvm::Function *Fn = nullptr; 6281 llvm::Constant *FnID = nullptr; 6282 6283 const Expr *IfCond = nullptr; 6284 // Check for the at most one if clause associated with the target region. 6285 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6286 if (C->getNameModifier() == OMPD_unknown || 6287 C->getNameModifier() == OMPD_target) { 6288 IfCond = C->getCondition(); 6289 break; 6290 } 6291 } 6292 6293 // Check if we have any device clause associated with the directive. 6294 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 6295 nullptr, OMPC_DEVICE_unknown); 6296 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 6297 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 6298 6299 // Check if we have an if clause whose conditional always evaluates to false 6300 // or if we do not have any targets specified. If so the target region is not 6301 // an offload entry point. 6302 bool IsOffloadEntry = true; 6303 if (IfCond) { 6304 bool Val; 6305 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 6306 IsOffloadEntry = false; 6307 } 6308 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6309 IsOffloadEntry = false; 6310 6311 if (CGM.getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) { 6312 unsigned DiagID = CGM.getDiags().getCustomDiagID( 6313 DiagnosticsEngine::Error, 6314 "No offloading entry generated while offloading is mandatory."); 6315 CGM.getDiags().Report(DiagID); 6316 } 6317 6318 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 6319 StringRef ParentName; 6320 // In case we have Ctors/Dtors we use the complete type variant to produce 6321 // the mangling of the device outlined kernel. 6322 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 6323 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 6324 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 6325 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 6326 else 6327 ParentName = 6328 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 6329 6330 // Emit target region as a standalone region. 6331 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 6332 IsOffloadEntry, CodeGen); 6333 OMPLexicalScope Scope(CGF, S, OMPD_task); 6334 auto &&SizeEmitter = 6335 [IsOffloadEntry](CodeGenFunction &CGF, 6336 const OMPLoopDirective &D) -> llvm::Value * { 6337 if (IsOffloadEntry) { 6338 OMPLoopScope(CGF, D); 6339 // Emit calculation of the iterations count. 6340 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 6341 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 6342 /*isSigned=*/false); 6343 return NumIterations; 6344 } 6345 return nullptr; 6346 }; 6347 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 6348 SizeEmitter); 6349 } 6350 6351 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 6352 PrePostActionTy &Action) { 6353 Action.Enter(CGF); 6354 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6355 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6356 CGF.EmitOMPPrivateClause(S, PrivateScope); 6357 (void)PrivateScope.Privatize(); 6358 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6359 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6360 6361 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 6362 CGF.EnsureInsertPoint(); 6363 } 6364 6365 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 6366 StringRef ParentName, 6367 const OMPTargetDirective &S) { 6368 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6369 emitTargetRegion(CGF, S, Action); 6370 }; 6371 llvm::Function *Fn; 6372 llvm::Constant *Addr; 6373 // Emit target region as a standalone region. 6374 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6375 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6376 assert(Fn && Addr && "Target device function emission failed."); 6377 } 6378 6379 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 6380 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6381 emitTargetRegion(CGF, S, Action); 6382 }; 6383 emitCommonOMPTargetDirective(*this, S, CodeGen); 6384 } 6385 6386 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 6387 const OMPExecutableDirective &S, 6388 OpenMPDirectiveKind InnermostKind, 6389 const RegionCodeGenTy &CodeGen) { 6390 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 6391 llvm::Function *OutlinedFn = 6392 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 6393 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 6394 6395 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 6396 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 6397 if (NT || TL) { 6398 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 6399 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 6400 6401 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 6402 S.getBeginLoc()); 6403 } 6404 6405 OMPTeamsScope Scope(CGF, S); 6406 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 6407 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 6408 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 6409 CapturedVars); 6410 } 6411 6412 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 6413 // Emit teams region as a standalone region. 6414 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6415 Action.Enter(CGF); 6416 OMPPrivateScope PrivateScope(CGF); 6417 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6418 CGF.EmitOMPPrivateClause(S, PrivateScope); 6419 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6420 (void)PrivateScope.Privatize(); 6421 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 6422 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6423 }; 6424 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6425 emitPostUpdateForReductionClause(*this, S, 6426 [](CodeGenFunction &) { return nullptr; }); 6427 } 6428 6429 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6430 const OMPTargetTeamsDirective &S) { 6431 auto *CS = S.getCapturedStmt(OMPD_teams); 6432 Action.Enter(CGF); 6433 // Emit teams region as a standalone region. 6434 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6435 Action.Enter(CGF); 6436 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6437 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6438 CGF.EmitOMPPrivateClause(S, PrivateScope); 6439 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6440 (void)PrivateScope.Privatize(); 6441 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6442 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6443 CGF.EmitStmt(CS->getCapturedStmt()); 6444 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6445 }; 6446 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 6447 emitPostUpdateForReductionClause(CGF, S, 6448 [](CodeGenFunction &) { return nullptr; }); 6449 } 6450 6451 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 6452 CodeGenModule &CGM, StringRef ParentName, 6453 const OMPTargetTeamsDirective &S) { 6454 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6455 emitTargetTeamsRegion(CGF, Action, S); 6456 }; 6457 llvm::Function *Fn; 6458 llvm::Constant *Addr; 6459 // Emit target region as a standalone region. 6460 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6461 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6462 assert(Fn && Addr && "Target device function emission failed."); 6463 } 6464 6465 void CodeGenFunction::EmitOMPTargetTeamsDirective( 6466 const OMPTargetTeamsDirective &S) { 6467 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6468 emitTargetTeamsRegion(CGF, Action, S); 6469 }; 6470 emitCommonOMPTargetDirective(*this, S, CodeGen); 6471 } 6472 6473 static void 6474 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 6475 const OMPTargetTeamsDistributeDirective &S) { 6476 Action.Enter(CGF); 6477 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6478 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6479 }; 6480 6481 // Emit teams region as a standalone region. 6482 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6483 PrePostActionTy &Action) { 6484 Action.Enter(CGF); 6485 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6486 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6487 (void)PrivateScope.Privatize(); 6488 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6489 CodeGenDistribute); 6490 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6491 }; 6492 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 6493 emitPostUpdateForReductionClause(CGF, S, 6494 [](CodeGenFunction &) { return nullptr; }); 6495 } 6496 6497 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 6498 CodeGenModule &CGM, StringRef ParentName, 6499 const OMPTargetTeamsDistributeDirective &S) { 6500 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6501 emitTargetTeamsDistributeRegion(CGF, Action, S); 6502 }; 6503 llvm::Function *Fn; 6504 llvm::Constant *Addr; 6505 // Emit target region as a standalone region. 6506 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6507 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6508 assert(Fn && Addr && "Target device function emission failed."); 6509 } 6510 6511 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 6512 const OMPTargetTeamsDistributeDirective &S) { 6513 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6514 emitTargetTeamsDistributeRegion(CGF, Action, S); 6515 }; 6516 emitCommonOMPTargetDirective(*this, S, CodeGen); 6517 } 6518 6519 static void emitTargetTeamsDistributeSimdRegion( 6520 CodeGenFunction &CGF, PrePostActionTy &Action, 6521 const OMPTargetTeamsDistributeSimdDirective &S) { 6522 Action.Enter(CGF); 6523 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6524 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6525 }; 6526 6527 // Emit teams region as a standalone region. 6528 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6529 PrePostActionTy &Action) { 6530 Action.Enter(CGF); 6531 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6532 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6533 (void)PrivateScope.Privatize(); 6534 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6535 CodeGenDistribute); 6536 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6537 }; 6538 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 6539 emitPostUpdateForReductionClause(CGF, S, 6540 [](CodeGenFunction &) { return nullptr; }); 6541 } 6542 6543 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 6544 CodeGenModule &CGM, StringRef ParentName, 6545 const OMPTargetTeamsDistributeSimdDirective &S) { 6546 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6547 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6548 }; 6549 llvm::Function *Fn; 6550 llvm::Constant *Addr; 6551 // Emit target region as a standalone region. 6552 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6553 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6554 assert(Fn && Addr && "Target device function emission failed."); 6555 } 6556 6557 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 6558 const OMPTargetTeamsDistributeSimdDirective &S) { 6559 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6560 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6561 }; 6562 emitCommonOMPTargetDirective(*this, S, CodeGen); 6563 } 6564 6565 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 6566 const OMPTeamsDistributeDirective &S) { 6567 6568 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6569 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6570 }; 6571 6572 // Emit teams region as a standalone region. 6573 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6574 PrePostActionTy &Action) { 6575 Action.Enter(CGF); 6576 OMPPrivateScope PrivateScope(CGF); 6577 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6578 (void)PrivateScope.Privatize(); 6579 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6580 CodeGenDistribute); 6581 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6582 }; 6583 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6584 emitPostUpdateForReductionClause(*this, S, 6585 [](CodeGenFunction &) { return nullptr; }); 6586 } 6587 6588 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 6589 const OMPTeamsDistributeSimdDirective &S) { 6590 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6591 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6592 }; 6593 6594 // Emit teams region as a standalone region. 6595 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6596 PrePostActionTy &Action) { 6597 Action.Enter(CGF); 6598 OMPPrivateScope PrivateScope(CGF); 6599 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6600 (void)PrivateScope.Privatize(); 6601 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 6602 CodeGenDistribute); 6603 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6604 }; 6605 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 6606 emitPostUpdateForReductionClause(*this, S, 6607 [](CodeGenFunction &) { return nullptr; }); 6608 } 6609 6610 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 6611 const OMPTeamsDistributeParallelForDirective &S) { 6612 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6613 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6614 S.getDistInc()); 6615 }; 6616 6617 // Emit teams region as a standalone region. 6618 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6619 PrePostActionTy &Action) { 6620 Action.Enter(CGF); 6621 OMPPrivateScope PrivateScope(CGF); 6622 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6623 (void)PrivateScope.Privatize(); 6624 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6625 CodeGenDistribute); 6626 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6627 }; 6628 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 6629 emitPostUpdateForReductionClause(*this, S, 6630 [](CodeGenFunction &) { return nullptr; }); 6631 } 6632 6633 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 6634 const OMPTeamsDistributeParallelForSimdDirective &S) { 6635 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6636 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6637 S.getDistInc()); 6638 }; 6639 6640 // Emit teams region as a standalone region. 6641 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6642 PrePostActionTy &Action) { 6643 Action.Enter(CGF); 6644 OMPPrivateScope PrivateScope(CGF); 6645 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6646 (void)PrivateScope.Privatize(); 6647 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6648 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6649 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6650 }; 6651 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 6652 CodeGen); 6653 emitPostUpdateForReductionClause(*this, S, 6654 [](CodeGenFunction &) { return nullptr; }); 6655 } 6656 6657 void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) { 6658 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6659 llvm::Value *Device = nullptr; 6660 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6661 Device = EmitScalarExpr(C->getDevice()); 6662 6663 llvm::Value *NumDependences = nullptr; 6664 llvm::Value *DependenceAddress = nullptr; 6665 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 6666 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 6667 DC->getModifier()); 6668 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 6669 std::pair<llvm::Value *, Address> DependencePair = 6670 CGM.getOpenMPRuntime().emitDependClause(*this, Dependencies, 6671 DC->getBeginLoc()); 6672 NumDependences = DependencePair.first; 6673 DependenceAddress = Builder.CreatePointerCast( 6674 DependencePair.second.getPointer(), CGM.Int8PtrTy); 6675 } 6676 6677 assert(!(S.hasClausesOfKind<OMPNowaitClause>() && 6678 !(S.getSingleClause<OMPInitClause>() || 6679 S.getSingleClause<OMPDestroyClause>() || 6680 S.getSingleClause<OMPUseClause>())) && 6681 "OMPNowaitClause clause is used separately in OMPInteropDirective."); 6682 6683 if (const auto *C = S.getSingleClause<OMPInitClause>()) { 6684 llvm::Value *InteropvarPtr = 6685 EmitLValue(C->getInteropVar()).getPointer(*this); 6686 llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown; 6687 if (C->getIsTarget()) { 6688 InteropType = llvm::omp::OMPInteropType::Target; 6689 } else { 6690 assert(C->getIsTargetSync() && "Expected interop-type target/targetsync"); 6691 InteropType = llvm::omp::OMPInteropType::TargetSync; 6692 } 6693 OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device, 6694 NumDependences, DependenceAddress, 6695 S.hasClausesOfKind<OMPNowaitClause>()); 6696 } else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) { 6697 llvm::Value *InteropvarPtr = 6698 EmitLValue(C->getInteropVar()).getPointer(*this); 6699 OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device, 6700 NumDependences, DependenceAddress, 6701 S.hasClausesOfKind<OMPNowaitClause>()); 6702 } else if (const auto *C = S.getSingleClause<OMPUseClause>()) { 6703 llvm::Value *InteropvarPtr = 6704 EmitLValue(C->getInteropVar()).getPointer(*this); 6705 OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device, 6706 NumDependences, DependenceAddress, 6707 S.hasClausesOfKind<OMPNowaitClause>()); 6708 } 6709 } 6710 6711 static void emitTargetTeamsDistributeParallelForRegion( 6712 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 6713 PrePostActionTy &Action) { 6714 Action.Enter(CGF); 6715 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6716 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6717 S.getDistInc()); 6718 }; 6719 6720 // Emit teams region as a standalone region. 6721 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6722 PrePostActionTy &Action) { 6723 Action.Enter(CGF); 6724 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6725 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6726 (void)PrivateScope.Privatize(); 6727 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6728 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6729 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6730 }; 6731 6732 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 6733 CodeGenTeams); 6734 emitPostUpdateForReductionClause(CGF, S, 6735 [](CodeGenFunction &) { return nullptr; }); 6736 } 6737 6738 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 6739 CodeGenModule &CGM, StringRef ParentName, 6740 const OMPTargetTeamsDistributeParallelForDirective &S) { 6741 // Emit SPMD target teams distribute parallel for region as a standalone 6742 // region. 6743 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6744 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6745 }; 6746 llvm::Function *Fn; 6747 llvm::Constant *Addr; 6748 // Emit target region as a standalone region. 6749 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6750 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6751 assert(Fn && Addr && "Target device function emission failed."); 6752 } 6753 6754 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 6755 const OMPTargetTeamsDistributeParallelForDirective &S) { 6756 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6757 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6758 }; 6759 emitCommonOMPTargetDirective(*this, S, CodeGen); 6760 } 6761 6762 static void emitTargetTeamsDistributeParallelForSimdRegion( 6763 CodeGenFunction &CGF, 6764 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 6765 PrePostActionTy &Action) { 6766 Action.Enter(CGF); 6767 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6768 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6769 S.getDistInc()); 6770 }; 6771 6772 // Emit teams region as a standalone region. 6773 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6774 PrePostActionTy &Action) { 6775 Action.Enter(CGF); 6776 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6777 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6778 (void)PrivateScope.Privatize(); 6779 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6780 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6781 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6782 }; 6783 6784 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 6785 CodeGenTeams); 6786 emitPostUpdateForReductionClause(CGF, S, 6787 [](CodeGenFunction &) { return nullptr; }); 6788 } 6789 6790 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 6791 CodeGenModule &CGM, StringRef ParentName, 6792 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6793 // Emit SPMD target teams distribute parallel for simd region as a standalone 6794 // region. 6795 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6796 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6797 }; 6798 llvm::Function *Fn; 6799 llvm::Constant *Addr; 6800 // Emit target region as a standalone region. 6801 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6802 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6803 assert(Fn && Addr && "Target device function emission failed."); 6804 } 6805 6806 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 6807 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6808 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6809 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6810 }; 6811 emitCommonOMPTargetDirective(*this, S, CodeGen); 6812 } 6813 6814 void CodeGenFunction::EmitOMPCancellationPointDirective( 6815 const OMPCancellationPointDirective &S) { 6816 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 6817 S.getCancelRegion()); 6818 } 6819 6820 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 6821 const Expr *IfCond = nullptr; 6822 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6823 if (C->getNameModifier() == OMPD_unknown || 6824 C->getNameModifier() == OMPD_cancel) { 6825 IfCond = C->getCondition(); 6826 break; 6827 } 6828 } 6829 if (CGM.getLangOpts().OpenMPIRBuilder) { 6830 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6831 // TODO: This check is necessary as we only generate `omp parallel` through 6832 // the OpenMPIRBuilder for now. 6833 if (S.getCancelRegion() == OMPD_parallel || 6834 S.getCancelRegion() == OMPD_sections || 6835 S.getCancelRegion() == OMPD_section) { 6836 llvm::Value *IfCondition = nullptr; 6837 if (IfCond) 6838 IfCondition = EmitScalarExpr(IfCond, 6839 /*IgnoreResultAssign=*/true); 6840 return Builder.restoreIP( 6841 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion())); 6842 } 6843 } 6844 6845 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 6846 S.getCancelRegion()); 6847 } 6848 6849 CodeGenFunction::JumpDest 6850 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 6851 if (Kind == OMPD_parallel || Kind == OMPD_task || 6852 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 6853 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 6854 return ReturnBlock; 6855 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 6856 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 6857 Kind == OMPD_distribute_parallel_for || 6858 Kind == OMPD_target_parallel_for || 6859 Kind == OMPD_teams_distribute_parallel_for || 6860 Kind == OMPD_target_teams_distribute_parallel_for); 6861 return OMPCancelStack.getExitBlock(); 6862 } 6863 6864 void CodeGenFunction::EmitOMPUseDevicePtrClause( 6865 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 6866 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6867 auto OrigVarIt = C.varlist_begin(); 6868 auto InitIt = C.inits().begin(); 6869 for (const Expr *PvtVarIt : C.private_copies()) { 6870 const auto *OrigVD = 6871 cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 6872 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 6873 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 6874 6875 // In order to identify the right initializer we need to match the 6876 // declaration used by the mapping logic. In some cases we may get 6877 // OMPCapturedExprDecl that refers to the original declaration. 6878 const ValueDecl *MatchingVD = OrigVD; 6879 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6880 // OMPCapturedExprDecl are used to privative fields of the current 6881 // structure. 6882 const auto *ME = cast<MemberExpr>(OED->getInit()); 6883 assert(isa<CXXThisExpr>(ME->getBase()) && 6884 "Base should be the current struct!"); 6885 MatchingVD = ME->getMemberDecl(); 6886 } 6887 6888 // If we don't have information about the current list item, move on to 6889 // the next one. 6890 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6891 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6892 continue; 6893 6894 // Initialize the temporary initialization variable with the address 6895 // we get from the runtime library. We have to cast the source address 6896 // because it is always a void *. References are materialized in the 6897 // privatization scope, so the initialization here disregards the fact 6898 // the original variable is a reference. 6899 llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType()); 6900 Address InitAddr = Builder.CreateElementBitCast(InitAddrIt->second, Ty); 6901 setAddrOfLocalVar(InitVD, InitAddr); 6902 6903 // Emit private declaration, it will be initialized by the value we 6904 // declaration we just added to the local declarations map. 6905 EmitDecl(*PvtVD); 6906 6907 // The initialization variables reached its purpose in the emission 6908 // of the previous declaration, so we don't need it anymore. 6909 LocalDeclMap.erase(InitVD); 6910 6911 // Return the address of the private variable. 6912 bool IsRegistered = 6913 PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(PvtVD)); 6914 assert(IsRegistered && "firstprivate var already registered as private"); 6915 // Silence the warning about unused variable. 6916 (void)IsRegistered; 6917 6918 ++OrigVarIt; 6919 ++InitIt; 6920 } 6921 } 6922 6923 static const VarDecl *getBaseDecl(const Expr *Ref) { 6924 const Expr *Base = Ref->IgnoreParenImpCasts(); 6925 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 6926 Base = OASE->getBase()->IgnoreParenImpCasts(); 6927 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 6928 Base = ASE->getBase()->IgnoreParenImpCasts(); 6929 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 6930 } 6931 6932 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 6933 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 6934 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6935 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 6936 for (const Expr *Ref : C.varlists()) { 6937 const VarDecl *OrigVD = getBaseDecl(Ref); 6938 if (!Processed.insert(OrigVD).second) 6939 continue; 6940 // In order to identify the right initializer we need to match the 6941 // declaration used by the mapping logic. In some cases we may get 6942 // OMPCapturedExprDecl that refers to the original declaration. 6943 const ValueDecl *MatchingVD = OrigVD; 6944 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6945 // OMPCapturedExprDecl are used to privative fields of the current 6946 // structure. 6947 const auto *ME = cast<MemberExpr>(OED->getInit()); 6948 assert(isa<CXXThisExpr>(ME->getBase()) && 6949 "Base should be the current struct!"); 6950 MatchingVD = ME->getMemberDecl(); 6951 } 6952 6953 // If we don't have information about the current list item, move on to 6954 // the next one. 6955 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6956 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6957 continue; 6958 6959 Address PrivAddr = InitAddrIt->getSecond(); 6960 // For declrefs and variable length array need to load the pointer for 6961 // correct mapping, since the pointer to the data was passed to the runtime. 6962 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 6963 MatchingVD->getType()->isArrayType()) { 6964 QualType PtrTy = getContext().getPointerType( 6965 OrigVD->getType().getNonReferenceType()); 6966 PrivAddr = EmitLoadOfPointer( 6967 Builder.CreateElementBitCast(PrivAddr, ConvertTypeForMem(PtrTy)), 6968 PtrTy->castAs<PointerType>()); 6969 } 6970 6971 (void)PrivateScope.addPrivate(OrigVD, PrivAddr); 6972 } 6973 } 6974 6975 // Generate the instructions for '#pragma omp target data' directive. 6976 void CodeGenFunction::EmitOMPTargetDataDirective( 6977 const OMPTargetDataDirective &S) { 6978 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true, 6979 /*SeparateBeginEndCalls=*/true); 6980 6981 // Create a pre/post action to signal the privatization of the device pointer. 6982 // This action can be replaced by the OpenMP runtime code generation to 6983 // deactivate privatization. 6984 bool PrivatizeDevicePointers = false; 6985 class DevicePointerPrivActionTy : public PrePostActionTy { 6986 bool &PrivatizeDevicePointers; 6987 6988 public: 6989 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 6990 : PrivatizeDevicePointers(PrivatizeDevicePointers) {} 6991 void Enter(CodeGenFunction &CGF) override { 6992 PrivatizeDevicePointers = true; 6993 } 6994 }; 6995 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 6996 6997 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 6998 CodeGenFunction &CGF, PrePostActionTy &Action) { 6999 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 7000 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 7001 }; 7002 7003 // Codegen that selects whether to generate the privatization code or not. 7004 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 7005 &InnermostCodeGen](CodeGenFunction &CGF, 7006 PrePostActionTy &Action) { 7007 RegionCodeGenTy RCG(InnermostCodeGen); 7008 PrivatizeDevicePointers = false; 7009 7010 // Call the pre-action to change the status of PrivatizeDevicePointers if 7011 // needed. 7012 Action.Enter(CGF); 7013 7014 if (PrivatizeDevicePointers) { 7015 OMPPrivateScope PrivateScope(CGF); 7016 // Emit all instances of the use_device_ptr clause. 7017 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 7018 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 7019 Info.CaptureDeviceAddrMap); 7020 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 7021 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 7022 Info.CaptureDeviceAddrMap); 7023 (void)PrivateScope.Privatize(); 7024 RCG(CGF); 7025 } else { 7026 OMPLexicalScope Scope(CGF, S, OMPD_unknown); 7027 RCG(CGF); 7028 } 7029 }; 7030 7031 // Forward the provided action to the privatization codegen. 7032 RegionCodeGenTy PrivRCG(PrivCodeGen); 7033 PrivRCG.setAction(Action); 7034 7035 // Notwithstanding the body of the region is emitted as inlined directive, 7036 // we don't use an inline scope as changes in the references inside the 7037 // region are expected to be visible outside, so we do not privative them. 7038 OMPLexicalScope Scope(CGF, S); 7039 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 7040 PrivRCG); 7041 }; 7042 7043 RegionCodeGenTy RCG(CodeGen); 7044 7045 // If we don't have target devices, don't bother emitting the data mapping 7046 // code. 7047 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 7048 RCG(*this); 7049 return; 7050 } 7051 7052 // Check if we have any if clause associated with the directive. 7053 const Expr *IfCond = nullptr; 7054 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7055 IfCond = C->getCondition(); 7056 7057 // Check if we have any device clause associated with the directive. 7058 const Expr *Device = nullptr; 7059 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7060 Device = C->getDevice(); 7061 7062 // Set the action to signal privatization of device pointers. 7063 RCG.setAction(PrivAction); 7064 7065 // Emit region code. 7066 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 7067 Info); 7068 } 7069 7070 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 7071 const OMPTargetEnterDataDirective &S) { 7072 // If we don't have target devices, don't bother emitting the data mapping 7073 // code. 7074 if (CGM.getLangOpts().OMPTargetTriples.empty()) 7075 return; 7076 7077 // Check if we have any if clause associated with the directive. 7078 const Expr *IfCond = nullptr; 7079 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7080 IfCond = C->getCondition(); 7081 7082 // Check if we have any device clause associated with the directive. 7083 const Expr *Device = nullptr; 7084 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7085 Device = C->getDevice(); 7086 7087 OMPLexicalScope Scope(*this, S, OMPD_task); 7088 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 7089 } 7090 7091 void CodeGenFunction::EmitOMPTargetExitDataDirective( 7092 const OMPTargetExitDataDirective &S) { 7093 // If we don't have target devices, don't bother emitting the data mapping 7094 // code. 7095 if (CGM.getLangOpts().OMPTargetTriples.empty()) 7096 return; 7097 7098 // Check if we have any if clause associated with the directive. 7099 const Expr *IfCond = nullptr; 7100 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7101 IfCond = C->getCondition(); 7102 7103 // Check if we have any device clause associated with the directive. 7104 const Expr *Device = nullptr; 7105 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7106 Device = C->getDevice(); 7107 7108 OMPLexicalScope Scope(*this, S, OMPD_task); 7109 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 7110 } 7111 7112 static void emitTargetParallelRegion(CodeGenFunction &CGF, 7113 const OMPTargetParallelDirective &S, 7114 PrePostActionTy &Action) { 7115 // Get the captured statement associated with the 'parallel' region. 7116 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 7117 Action.Enter(CGF); 7118 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 7119 Action.Enter(CGF); 7120 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 7121 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 7122 CGF.EmitOMPPrivateClause(S, PrivateScope); 7123 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 7124 (void)PrivateScope.Privatize(); 7125 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 7126 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 7127 // TODO: Add support for clauses. 7128 CGF.EmitStmt(CS->getCapturedStmt()); 7129 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 7130 }; 7131 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 7132 emitEmptyBoundParameters); 7133 emitPostUpdateForReductionClause(CGF, S, 7134 [](CodeGenFunction &) { return nullptr; }); 7135 } 7136 7137 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 7138 CodeGenModule &CGM, StringRef ParentName, 7139 const OMPTargetParallelDirective &S) { 7140 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7141 emitTargetParallelRegion(CGF, S, Action); 7142 }; 7143 llvm::Function *Fn; 7144 llvm::Constant *Addr; 7145 // Emit target region as a standalone region. 7146 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 7147 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 7148 assert(Fn && Addr && "Target device function emission failed."); 7149 } 7150 7151 void CodeGenFunction::EmitOMPTargetParallelDirective( 7152 const OMPTargetParallelDirective &S) { 7153 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7154 emitTargetParallelRegion(CGF, S, Action); 7155 }; 7156 emitCommonOMPTargetDirective(*this, S, CodeGen); 7157 } 7158 7159 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 7160 const OMPTargetParallelForDirective &S, 7161 PrePostActionTy &Action) { 7162 Action.Enter(CGF); 7163 // Emit directive as a combined directive that consists of two implicit 7164 // directives: 'parallel' with 'for' directive. 7165 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7166 Action.Enter(CGF); 7167 CodeGenFunction::OMPCancelStackRAII CancelRegion( 7168 CGF, OMPD_target_parallel_for, S.hasCancel()); 7169 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 7170 emitDispatchForLoopBounds); 7171 }; 7172 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 7173 emitEmptyBoundParameters); 7174 } 7175 7176 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 7177 CodeGenModule &CGM, StringRef ParentName, 7178 const OMPTargetParallelForDirective &S) { 7179 // Emit SPMD target parallel for region as a standalone region. 7180 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7181 emitTargetParallelForRegion(CGF, S, Action); 7182 }; 7183 llvm::Function *Fn; 7184 llvm::Constant *Addr; 7185 // Emit target region as a standalone region. 7186 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 7187 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 7188 assert(Fn && Addr && "Target device function emission failed."); 7189 } 7190 7191 void CodeGenFunction::EmitOMPTargetParallelForDirective( 7192 const OMPTargetParallelForDirective &S) { 7193 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7194 emitTargetParallelForRegion(CGF, S, Action); 7195 }; 7196 emitCommonOMPTargetDirective(*this, S, CodeGen); 7197 } 7198 7199 static void 7200 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 7201 const OMPTargetParallelForSimdDirective &S, 7202 PrePostActionTy &Action) { 7203 Action.Enter(CGF); 7204 // Emit directive as a combined directive that consists of two implicit 7205 // directives: 'parallel' with 'for' directive. 7206 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7207 Action.Enter(CGF); 7208 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 7209 emitDispatchForLoopBounds); 7210 }; 7211 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 7212 emitEmptyBoundParameters); 7213 } 7214 7215 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 7216 CodeGenModule &CGM, StringRef ParentName, 7217 const OMPTargetParallelForSimdDirective &S) { 7218 // Emit SPMD target parallel for region as a standalone region. 7219 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7220 emitTargetParallelForSimdRegion(CGF, S, Action); 7221 }; 7222 llvm::Function *Fn; 7223 llvm::Constant *Addr; 7224 // Emit target region as a standalone region. 7225 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 7226 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 7227 assert(Fn && Addr && "Target device function emission failed."); 7228 } 7229 7230 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 7231 const OMPTargetParallelForSimdDirective &S) { 7232 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7233 emitTargetParallelForSimdRegion(CGF, S, Action); 7234 }; 7235 emitCommonOMPTargetDirective(*this, S, CodeGen); 7236 } 7237 7238 /// Emit a helper variable and return corresponding lvalue. 7239 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 7240 const ImplicitParamDecl *PVD, 7241 CodeGenFunction::OMPPrivateScope &Privates) { 7242 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 7243 Privates.addPrivate(VDecl, CGF.GetAddrOfLocalVar(PVD)); 7244 } 7245 7246 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 7247 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 7248 // Emit outlined function for task construct. 7249 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 7250 Address CapturedStruct = Address::invalid(); 7251 { 7252 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 7253 CapturedStruct = GenerateCapturedStmtArgument(*CS); 7254 } 7255 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 7256 const Expr *IfCond = nullptr; 7257 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 7258 if (C->getNameModifier() == OMPD_unknown || 7259 C->getNameModifier() == OMPD_taskloop) { 7260 IfCond = C->getCondition(); 7261 break; 7262 } 7263 } 7264 7265 OMPTaskDataTy Data; 7266 // Check if taskloop must be emitted without taskgroup. 7267 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 7268 // TODO: Check if we should emit tied or untied task. 7269 Data.Tied = true; 7270 // Set scheduling for taskloop 7271 if (const auto *Clause = S.getSingleClause<OMPGrainsizeClause>()) { 7272 // grainsize clause 7273 Data.Schedule.setInt(/*IntVal=*/false); 7274 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 7275 } else if (const auto *Clause = S.getSingleClause<OMPNumTasksClause>()) { 7276 // num_tasks clause 7277 Data.Schedule.setInt(/*IntVal=*/true); 7278 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 7279 } 7280 7281 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 7282 // if (PreCond) { 7283 // for (IV in 0..LastIteration) BODY; 7284 // <Final counter/linear vars updates>; 7285 // } 7286 // 7287 7288 // Emit: if (PreCond) - begin. 7289 // If the condition constant folds and can be elided, avoid emitting the 7290 // whole loop. 7291 bool CondConstant; 7292 llvm::BasicBlock *ContBlock = nullptr; 7293 OMPLoopScope PreInitScope(CGF, S); 7294 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 7295 if (!CondConstant) 7296 return; 7297 } else { 7298 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 7299 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 7300 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 7301 CGF.getProfileCount(&S)); 7302 CGF.EmitBlock(ThenBlock); 7303 CGF.incrementProfileCounter(&S); 7304 } 7305 7306 (void)CGF.EmitOMPLinearClauseInit(S); 7307 7308 OMPPrivateScope LoopScope(CGF); 7309 // Emit helper vars inits. 7310 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 7311 auto *I = CS->getCapturedDecl()->param_begin(); 7312 auto *LBP = std::next(I, LowerBound); 7313 auto *UBP = std::next(I, UpperBound); 7314 auto *STP = std::next(I, Stride); 7315 auto *LIP = std::next(I, LastIter); 7316 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 7317 LoopScope); 7318 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 7319 LoopScope); 7320 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 7321 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 7322 LoopScope); 7323 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 7324 CGF.EmitOMPLinearClause(S, LoopScope); 7325 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 7326 (void)LoopScope.Privatize(); 7327 // Emit the loop iteration variable. 7328 const Expr *IVExpr = S.getIterationVariable(); 7329 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 7330 CGF.EmitVarDecl(*IVDecl); 7331 CGF.EmitIgnoredExpr(S.getInit()); 7332 7333 // Emit the iterations count variable. 7334 // If it is not a variable, Sema decided to calculate iterations count on 7335 // each iteration (e.g., it is foldable into a constant). 7336 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 7337 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 7338 // Emit calculation of the iterations count. 7339 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 7340 } 7341 7342 { 7343 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 7344 emitCommonSimdLoop( 7345 CGF, S, 7346 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 7347 if (isOpenMPSimdDirective(S.getDirectiveKind())) 7348 CGF.EmitOMPSimdInit(S); 7349 }, 7350 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 7351 CGF.EmitOMPInnerLoop( 7352 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 7353 [&S](CodeGenFunction &CGF) { 7354 emitOMPLoopBodyWithStopPoint(CGF, S, 7355 CodeGenFunction::JumpDest()); 7356 }, 7357 [](CodeGenFunction &) {}); 7358 }); 7359 } 7360 // Emit: if (PreCond) - end. 7361 if (ContBlock) { 7362 CGF.EmitBranch(ContBlock); 7363 CGF.EmitBlock(ContBlock, true); 7364 } 7365 // Emit final copy of the lastprivate variables if IsLastIter != 0. 7366 if (HasLastprivateClause) { 7367 CGF.EmitOMPLastprivateClauseFinal( 7368 S, isOpenMPSimdDirective(S.getDirectiveKind()), 7369 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 7370 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 7371 (*LIP)->getType(), S.getBeginLoc()))); 7372 } 7373 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 7374 return CGF.Builder.CreateIsNotNull( 7375 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 7376 (*LIP)->getType(), S.getBeginLoc())); 7377 }); 7378 }; 7379 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 7380 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 7381 const OMPTaskDataTy &Data) { 7382 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 7383 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 7384 OMPLoopScope PreInitScope(CGF, S); 7385 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 7386 OutlinedFn, SharedsTy, 7387 CapturedStruct, IfCond, Data); 7388 }; 7389 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 7390 CodeGen); 7391 }; 7392 if (Data.Nogroup) { 7393 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 7394 } else { 7395 CGM.getOpenMPRuntime().emitTaskgroupRegion( 7396 *this, 7397 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 7398 PrePostActionTy &Action) { 7399 Action.Enter(CGF); 7400 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 7401 Data); 7402 }, 7403 S.getBeginLoc()); 7404 } 7405 } 7406 7407 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 7408 auto LPCRegion = 7409 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7410 EmitOMPTaskLoopBasedDirective(S); 7411 } 7412 7413 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 7414 const OMPTaskLoopSimdDirective &S) { 7415 auto LPCRegion = 7416 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7417 OMPLexicalScope Scope(*this, S); 7418 EmitOMPTaskLoopBasedDirective(S); 7419 } 7420 7421 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 7422 const OMPMasterTaskLoopDirective &S) { 7423 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7424 Action.Enter(CGF); 7425 EmitOMPTaskLoopBasedDirective(S); 7426 }; 7427 auto LPCRegion = 7428 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7429 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 7430 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7431 } 7432 7433 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 7434 const OMPMasterTaskLoopSimdDirective &S) { 7435 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7436 Action.Enter(CGF); 7437 EmitOMPTaskLoopBasedDirective(S); 7438 }; 7439 auto LPCRegion = 7440 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7441 OMPLexicalScope Scope(*this, S); 7442 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 7443 } 7444 7445 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 7446 const OMPParallelMasterTaskLoopDirective &S) { 7447 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7448 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7449 PrePostActionTy &Action) { 7450 Action.Enter(CGF); 7451 CGF.EmitOMPTaskLoopBasedDirective(S); 7452 }; 7453 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7454 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7455 S.getBeginLoc()); 7456 }; 7457 auto LPCRegion = 7458 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7459 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 7460 emitEmptyBoundParameters); 7461 } 7462 7463 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 7464 const OMPParallelMasterTaskLoopSimdDirective &S) { 7465 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7466 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 7467 PrePostActionTy &Action) { 7468 Action.Enter(CGF); 7469 CGF.EmitOMPTaskLoopBasedDirective(S); 7470 }; 7471 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 7472 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 7473 S.getBeginLoc()); 7474 }; 7475 auto LPCRegion = 7476 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 7477 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 7478 emitEmptyBoundParameters); 7479 } 7480 7481 // Generate the instructions for '#pragma omp target update' directive. 7482 void CodeGenFunction::EmitOMPTargetUpdateDirective( 7483 const OMPTargetUpdateDirective &S) { 7484 // If we don't have target devices, don't bother emitting the data mapping 7485 // code. 7486 if (CGM.getLangOpts().OMPTargetTriples.empty()) 7487 return; 7488 7489 // Check if we have any if clause associated with the directive. 7490 const Expr *IfCond = nullptr; 7491 if (const auto *C = S.getSingleClause<OMPIfClause>()) 7492 IfCond = C->getCondition(); 7493 7494 // Check if we have any device clause associated with the directive. 7495 const Expr *Device = nullptr; 7496 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 7497 Device = C->getDevice(); 7498 7499 OMPLexicalScope Scope(*this, S, OMPD_task); 7500 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 7501 } 7502 7503 void CodeGenFunction::EmitOMPGenericLoopDirective( 7504 const OMPGenericLoopDirective &S) { 7505 // Unimplemented, just inline the underlying statement for now. 7506 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 7507 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 7508 }; 7509 OMPLexicalScope Scope(*this, S, OMPD_unknown); 7510 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_loop, CodeGen); 7511 } 7512 7513 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 7514 const OMPExecutableDirective &D) { 7515 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 7516 EmitOMPScanDirective(*SD); 7517 return; 7518 } 7519 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 7520 return; 7521 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 7522 OMPPrivateScope GlobalsScope(CGF); 7523 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 7524 // Capture global firstprivates to avoid crash. 7525 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 7526 for (const Expr *Ref : C->varlists()) { 7527 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 7528 if (!DRE) 7529 continue; 7530 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 7531 if (!VD || VD->hasLocalStorage()) 7532 continue; 7533 if (!CGF.LocalDeclMap.count(VD)) { 7534 LValue GlobLVal = CGF.EmitLValue(Ref); 7535 GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF)); 7536 } 7537 } 7538 } 7539 } 7540 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 7541 (void)GlobalsScope.Privatize(); 7542 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 7543 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 7544 } else { 7545 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 7546 for (const Expr *E : LD->counters()) { 7547 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 7548 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 7549 LValue GlobLVal = CGF.EmitLValue(E); 7550 GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF)); 7551 } 7552 if (isa<OMPCapturedExprDecl>(VD)) { 7553 // Emit only those that were not explicitly referenced in clauses. 7554 if (!CGF.LocalDeclMap.count(VD)) 7555 CGF.EmitVarDecl(*VD); 7556 } 7557 } 7558 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 7559 if (!C->getNumForLoops()) 7560 continue; 7561 for (unsigned I = LD->getLoopsNumber(), 7562 E = C->getLoopNumIterations().size(); 7563 I < E; ++I) { 7564 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 7565 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 7566 // Emit only those that were not explicitly referenced in clauses. 7567 if (!CGF.LocalDeclMap.count(VD)) 7568 CGF.EmitVarDecl(*VD); 7569 } 7570 } 7571 } 7572 } 7573 (void)GlobalsScope.Privatize(); 7574 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 7575 } 7576 }; 7577 if (D.getDirectiveKind() == OMPD_atomic || 7578 D.getDirectiveKind() == OMPD_critical || 7579 D.getDirectiveKind() == OMPD_section || 7580 D.getDirectiveKind() == OMPD_master || 7581 D.getDirectiveKind() == OMPD_masked) { 7582 EmitStmt(D.getAssociatedStmt()); 7583 } else { 7584 auto LPCRegion = 7585 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 7586 OMPSimdLexicalScope Scope(*this, D); 7587 CGM.getOpenMPRuntime().emitInlinedDirective( 7588 *this, 7589 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 7590 : D.getDirectiveKind(), 7591 CodeGen); 7592 } 7593 // Check for outer lastprivate conditional update. 7594 checkForLastprivateConditionalUpdate(*this, D); 7595 } 7596