1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/AST/StmtVisitor.h" 25 #include "clang/Basic/OpenMPKinds.h" 26 #include "clang/Basic/PrettyStackTrace.h" 27 #include "llvm/Frontend/OpenMP/OMPConstants.h" 28 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/Support/AtomicOrdering.h" 32 using namespace clang; 33 using namespace CodeGen; 34 using namespace llvm::omp; 35 36 static const VarDecl *getBaseDecl(const Expr *Ref); 37 38 namespace { 39 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 40 /// for captured expressions. 41 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 42 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 43 for (const auto *C : S.clauses()) { 44 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 45 if (const auto *PreInit = 46 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 47 for (const auto *I : PreInit->decls()) { 48 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 49 CGF.EmitVarDecl(cast<VarDecl>(*I)); 50 } else { 51 CodeGenFunction::AutoVarEmission Emission = 52 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 53 CGF.EmitAutoVarCleanups(Emission); 54 } 55 } 56 } 57 } 58 } 59 } 60 CodeGenFunction::OMPPrivateScope InlinedShareds; 61 62 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 63 return CGF.LambdaCaptureFields.lookup(VD) || 64 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 65 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 66 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 67 } 68 69 public: 70 OMPLexicalScope( 71 CodeGenFunction &CGF, const OMPExecutableDirective &S, 72 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 73 const bool EmitPreInitStmt = true) 74 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 75 InlinedShareds(CGF) { 76 if (EmitPreInitStmt) 77 emitPreInitStmt(CGF, S); 78 if (!CapturedRegion.hasValue()) 79 return; 80 assert(S.hasAssociatedStmt() && 81 "Expected associated statement for inlined directive."); 82 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 83 for (const auto &C : CS->captures()) { 84 if (C.capturesVariable() || C.capturesVariableByCopy()) { 85 auto *VD = C.getCapturedVar(); 86 assert(VD == VD->getCanonicalDecl() && 87 "Canonical decl must be captured."); 88 DeclRefExpr DRE( 89 CGF.getContext(), const_cast<VarDecl *>(VD), 90 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 91 InlinedShareds.isGlobalVarCaptured(VD)), 92 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 93 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 94 return CGF.EmitLValue(&DRE).getAddress(CGF); 95 }); 96 } 97 } 98 (void)InlinedShareds.Privatize(); 99 } 100 }; 101 102 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 103 /// for captured expressions. 104 class OMPParallelScope final : public OMPLexicalScope { 105 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 106 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 107 return !(isOpenMPTargetExecutionDirective(Kind) || 108 isOpenMPLoopBoundSharingDirective(Kind)) && 109 isOpenMPParallelDirective(Kind); 110 } 111 112 public: 113 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 114 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 115 EmitPreInitStmt(S)) {} 116 }; 117 118 /// Lexical scope for OpenMP teams construct, that handles correct codegen 119 /// for captured expressions. 120 class OMPTeamsScope final : public OMPLexicalScope { 121 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 122 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 123 return !isOpenMPTargetExecutionDirective(Kind) && 124 isOpenMPTeamsDirective(Kind); 125 } 126 127 public: 128 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 129 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 130 EmitPreInitStmt(S)) {} 131 }; 132 133 /// Private scope for OpenMP loop-based directives, that supports capturing 134 /// of used expression from loop statement. 135 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 136 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) { 137 const DeclStmt *PreInits; 138 CodeGenFunction::OMPMapVars PreCondVars; 139 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 140 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 141 for (const auto *E : LD->counters()) { 142 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 143 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 144 (void)PreCondVars.setVarAddr( 145 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 146 } 147 // Mark private vars as undefs. 148 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) { 149 for (const Expr *IRef : C->varlists()) { 150 const auto *OrigVD = 151 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 152 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 153 (void)PreCondVars.setVarAddr( 154 CGF, OrigVD, 155 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem( 156 CGF.getContext().getPointerType( 157 OrigVD->getType().getNonReferenceType()))), 158 CGF.getContext().getDeclAlign(OrigVD))); 159 } 160 } 161 } 162 (void)PreCondVars.apply(CGF); 163 // Emit init, __range and __end variables for C++ range loops. 164 (void)OMPLoopBasedDirective::doForAllLoops( 165 LD->getInnermostCapturedStmt()->getCapturedStmt(), 166 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(), 167 [&CGF](unsigned Cnt, const Stmt *CurStmt) { 168 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) { 169 if (const Stmt *Init = CXXFor->getInit()) 170 CGF.EmitStmt(Init); 171 CGF.EmitStmt(CXXFor->getRangeStmt()); 172 CGF.EmitStmt(CXXFor->getEndStmt()); 173 } 174 return false; 175 }); 176 PreInits = cast_or_null<DeclStmt>(LD->getPreInits()); 177 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) { 178 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits()); 179 } else { 180 llvm_unreachable("Unknown loop-based directive kind."); 181 } 182 if (PreInits) { 183 for (const auto *I : PreInits->decls()) 184 CGF.EmitVarDecl(cast<VarDecl>(*I)); 185 } 186 PreCondVars.restore(CGF); 187 } 188 189 public: 190 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) 191 : CodeGenFunction::RunCleanupsScope(CGF) { 192 emitPreInitStmt(CGF, S); 193 } 194 }; 195 196 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 197 CodeGenFunction::OMPPrivateScope InlinedShareds; 198 199 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 200 return CGF.LambdaCaptureFields.lookup(VD) || 201 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 202 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 203 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 204 } 205 206 public: 207 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 208 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 209 InlinedShareds(CGF) { 210 for (const auto *C : S.clauses()) { 211 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 212 if (const auto *PreInit = 213 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 214 for (const auto *I : PreInit->decls()) { 215 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 216 CGF.EmitVarDecl(cast<VarDecl>(*I)); 217 } else { 218 CodeGenFunction::AutoVarEmission Emission = 219 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 220 CGF.EmitAutoVarCleanups(Emission); 221 } 222 } 223 } 224 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 225 for (const Expr *E : UDP->varlists()) { 226 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 227 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 228 CGF.EmitVarDecl(*OED); 229 } 230 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 231 for (const Expr *E : UDP->varlists()) { 232 const Decl *D = getBaseDecl(E); 233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 234 CGF.EmitVarDecl(*OED); 235 } 236 } 237 } 238 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 239 CGF.EmitOMPPrivateClause(S, InlinedShareds); 240 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 241 if (const Expr *E = TG->getReductionRef()) 242 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 243 } 244 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 245 while (CS) { 246 for (auto &C : CS->captures()) { 247 if (C.capturesVariable() || C.capturesVariableByCopy()) { 248 auto *VD = C.getCapturedVar(); 249 assert(VD == VD->getCanonicalDecl() && 250 "Canonical decl must be captured."); 251 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 252 isCapturedVar(CGF, VD) || 253 (CGF.CapturedStmtInfo && 254 InlinedShareds.isGlobalVarCaptured(VD)), 255 VD->getType().getNonReferenceType(), VK_LValue, 256 C.getLocation()); 257 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 258 return CGF.EmitLValue(&DRE).getAddress(CGF); 259 }); 260 } 261 } 262 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 263 } 264 (void)InlinedShareds.Privatize(); 265 } 266 }; 267 268 } // namespace 269 270 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 271 const OMPExecutableDirective &S, 272 const RegionCodeGenTy &CodeGen); 273 274 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 275 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 276 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 277 OrigVD = OrigVD->getCanonicalDecl(); 278 bool IsCaptured = 279 LambdaCaptureFields.lookup(OrigVD) || 280 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 281 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 282 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 283 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 284 return EmitLValue(&DRE); 285 } 286 } 287 return EmitLValue(E); 288 } 289 290 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 291 ASTContext &C = getContext(); 292 llvm::Value *Size = nullptr; 293 auto SizeInChars = C.getTypeSizeInChars(Ty); 294 if (SizeInChars.isZero()) { 295 // getTypeSizeInChars() returns 0 for a VLA. 296 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 297 VlaSizePair VlaSize = getVLASize(VAT); 298 Ty = VlaSize.Type; 299 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 300 : VlaSize.NumElts; 301 } 302 SizeInChars = C.getTypeSizeInChars(Ty); 303 if (SizeInChars.isZero()) 304 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 305 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 306 } 307 return CGM.getSize(SizeInChars); 308 } 309 310 void CodeGenFunction::GenerateOpenMPCapturedVars( 311 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 312 const RecordDecl *RD = S.getCapturedRecordDecl(); 313 auto CurField = RD->field_begin(); 314 auto CurCap = S.captures().begin(); 315 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 316 E = S.capture_init_end(); 317 I != E; ++I, ++CurField, ++CurCap) { 318 if (CurField->hasCapturedVLAType()) { 319 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 320 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 321 CapturedVars.push_back(Val); 322 } else if (CurCap->capturesThis()) { 323 CapturedVars.push_back(CXXThisValue); 324 } else if (CurCap->capturesVariableByCopy()) { 325 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 326 327 // If the field is not a pointer, we need to save the actual value 328 // and load it as a void pointer. 329 if (!CurField->getType()->isAnyPointerType()) { 330 ASTContext &Ctx = getContext(); 331 Address DstAddr = CreateMemTemp( 332 Ctx.getUIntPtrType(), 333 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 334 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 335 336 llvm::Value *SrcAddrVal = EmitScalarConversion( 337 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 338 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 339 LValue SrcLV = 340 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 341 342 // Store the value using the source type pointer. 343 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 344 345 // Load the value using the destination type pointer. 346 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 347 } 348 CapturedVars.push_back(CV); 349 } else { 350 assert(CurCap->capturesVariable() && "Expected capture by reference."); 351 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 352 } 353 } 354 } 355 356 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 357 QualType DstType, StringRef Name, 358 LValue AddrLV) { 359 ASTContext &Ctx = CGF.getContext(); 360 361 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 362 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 363 Ctx.getPointerType(DstType), Loc); 364 Address TmpAddr = 365 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 366 .getAddress(CGF); 367 return TmpAddr; 368 } 369 370 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 371 if (T->isLValueReferenceType()) 372 return C.getLValueReferenceType( 373 getCanonicalParamType(C, T.getNonReferenceType()), 374 /*SpelledAsLValue=*/false); 375 if (T->isPointerType()) 376 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 377 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 378 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 379 return getCanonicalParamType(C, VLA->getElementType()); 380 if (!A->isVariablyModifiedType()) 381 return C.getCanonicalType(T); 382 } 383 return C.getCanonicalParamType(T); 384 } 385 386 namespace { 387 /// Contains required data for proper outlined function codegen. 388 struct FunctionOptions { 389 /// Captured statement for which the function is generated. 390 const CapturedStmt *S = nullptr; 391 /// true if cast to/from UIntPtr is required for variables captured by 392 /// value. 393 const bool UIntPtrCastRequired = true; 394 /// true if only casted arguments must be registered as local args or VLA 395 /// sizes. 396 const bool RegisterCastedArgsOnly = false; 397 /// Name of the generated function. 398 const StringRef FunctionName; 399 /// Location of the non-debug version of the outlined function. 400 SourceLocation Loc; 401 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 402 bool RegisterCastedArgsOnly, StringRef FunctionName, 403 SourceLocation Loc) 404 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 405 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 406 FunctionName(FunctionName), Loc(Loc) {} 407 }; 408 } // namespace 409 410 static llvm::Function *emitOutlinedFunctionPrologue( 411 CodeGenFunction &CGF, FunctionArgList &Args, 412 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 413 &LocalAddrs, 414 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 415 &VLASizes, 416 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 417 const CapturedDecl *CD = FO.S->getCapturedDecl(); 418 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 419 assert(CD->hasBody() && "missing CapturedDecl body"); 420 421 CXXThisValue = nullptr; 422 // Build the argument list. 423 CodeGenModule &CGM = CGF.CGM; 424 ASTContext &Ctx = CGM.getContext(); 425 FunctionArgList TargetArgs; 426 Args.append(CD->param_begin(), 427 std::next(CD->param_begin(), CD->getContextParamPosition())); 428 TargetArgs.append( 429 CD->param_begin(), 430 std::next(CD->param_begin(), CD->getContextParamPosition())); 431 auto I = FO.S->captures().begin(); 432 FunctionDecl *DebugFunctionDecl = nullptr; 433 if (!FO.UIntPtrCastRequired) { 434 FunctionProtoType::ExtProtoInfo EPI; 435 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 436 DebugFunctionDecl = FunctionDecl::Create( 437 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 438 SourceLocation(), DeclarationName(), FunctionTy, 439 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 440 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 441 } 442 for (const FieldDecl *FD : RD->fields()) { 443 QualType ArgType = FD->getType(); 444 IdentifierInfo *II = nullptr; 445 VarDecl *CapVar = nullptr; 446 447 // If this is a capture by copy and the type is not a pointer, the outlined 448 // function argument type should be uintptr and the value properly casted to 449 // uintptr. This is necessary given that the runtime library is only able to 450 // deal with pointers. We can pass in the same way the VLA type sizes to the 451 // outlined function. 452 if (FO.UIntPtrCastRequired && 453 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 454 I->capturesVariableArrayType())) 455 ArgType = Ctx.getUIntPtrType(); 456 457 if (I->capturesVariable() || I->capturesVariableByCopy()) { 458 CapVar = I->getCapturedVar(); 459 II = CapVar->getIdentifier(); 460 } else if (I->capturesThis()) { 461 II = &Ctx.Idents.get("this"); 462 } else { 463 assert(I->capturesVariableArrayType()); 464 II = &Ctx.Idents.get("vla"); 465 } 466 if (ArgType->isVariablyModifiedType()) 467 ArgType = getCanonicalParamType(Ctx, ArgType); 468 VarDecl *Arg; 469 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 470 Arg = ParmVarDecl::Create( 471 Ctx, DebugFunctionDecl, 472 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 473 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 474 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 475 } else { 476 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 477 II, ArgType, ImplicitParamDecl::Other); 478 } 479 Args.emplace_back(Arg); 480 // Do not cast arguments if we emit function with non-original types. 481 TargetArgs.emplace_back( 482 FO.UIntPtrCastRequired 483 ? Arg 484 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 485 ++I; 486 } 487 Args.append( 488 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 489 CD->param_end()); 490 TargetArgs.append( 491 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 492 CD->param_end()); 493 494 // Create the function declaration. 495 const CGFunctionInfo &FuncInfo = 496 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 497 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 498 499 auto *F = 500 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 501 FO.FunctionName, &CGM.getModule()); 502 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 503 if (CD->isNothrow()) 504 F->setDoesNotThrow(); 505 F->setDoesNotRecurse(); 506 507 // Generate the function. 508 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 509 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 510 FO.UIntPtrCastRequired ? FO.Loc 511 : CD->getBody()->getBeginLoc()); 512 unsigned Cnt = CD->getContextParamPosition(); 513 I = FO.S->captures().begin(); 514 for (const FieldDecl *FD : RD->fields()) { 515 // Do not map arguments if we emit function with non-original types. 516 Address LocalAddr(Address::invalid()); 517 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 518 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 519 TargetArgs[Cnt]); 520 } else { 521 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 522 } 523 // If we are capturing a pointer by copy we don't need to do anything, just 524 // use the value that we get from the arguments. 525 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 526 const VarDecl *CurVD = I->getCapturedVar(); 527 if (!FO.RegisterCastedArgsOnly) 528 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 529 ++Cnt; 530 ++I; 531 continue; 532 } 533 534 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 535 AlignmentSource::Decl); 536 if (FD->hasCapturedVLAType()) { 537 if (FO.UIntPtrCastRequired) { 538 ArgLVal = CGF.MakeAddrLValue( 539 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 540 Args[Cnt]->getName(), ArgLVal), 541 FD->getType(), AlignmentSource::Decl); 542 } 543 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 544 const VariableArrayType *VAT = FD->getCapturedVLAType(); 545 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 546 } else if (I->capturesVariable()) { 547 const VarDecl *Var = I->getCapturedVar(); 548 QualType VarTy = Var->getType(); 549 Address ArgAddr = ArgLVal.getAddress(CGF); 550 if (ArgLVal.getType()->isLValueReferenceType()) { 551 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 552 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 553 assert(ArgLVal.getType()->isPointerType()); 554 ArgAddr = CGF.EmitLoadOfPointer( 555 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 556 } 557 if (!FO.RegisterCastedArgsOnly) { 558 LocalAddrs.insert( 559 {Args[Cnt], 560 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 561 } 562 } else if (I->capturesVariableByCopy()) { 563 assert(!FD->getType()->isAnyPointerType() && 564 "Not expecting a captured pointer."); 565 const VarDecl *Var = I->getCapturedVar(); 566 LocalAddrs.insert({Args[Cnt], 567 {Var, FO.UIntPtrCastRequired 568 ? castValueFromUintptr( 569 CGF, I->getLocation(), FD->getType(), 570 Args[Cnt]->getName(), ArgLVal) 571 : ArgLVal.getAddress(CGF)}}); 572 } else { 573 // If 'this' is captured, load it into CXXThisValue. 574 assert(I->capturesThis()); 575 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 576 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 577 } 578 ++Cnt; 579 ++I; 580 } 581 582 return F; 583 } 584 585 llvm::Function * 586 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 587 SourceLocation Loc) { 588 assert( 589 CapturedStmtInfo && 590 "CapturedStmtInfo should be set when generating the captured function"); 591 const CapturedDecl *CD = S.getCapturedDecl(); 592 // Build the argument list. 593 bool NeedWrapperFunction = 594 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 595 FunctionArgList Args; 596 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 597 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 598 SmallString<256> Buffer; 599 llvm::raw_svector_ostream Out(Buffer); 600 Out << CapturedStmtInfo->getHelperName(); 601 if (NeedWrapperFunction) 602 Out << "_debug__"; 603 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 604 Out.str(), Loc); 605 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 606 VLASizes, CXXThisValue, FO); 607 CodeGenFunction::OMPPrivateScope LocalScope(*this); 608 for (const auto &LocalAddrPair : LocalAddrs) { 609 if (LocalAddrPair.second.first) { 610 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 611 return LocalAddrPair.second.second; 612 }); 613 } 614 } 615 (void)LocalScope.Privatize(); 616 for (const auto &VLASizePair : VLASizes) 617 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 618 PGO.assignRegionCounters(GlobalDecl(CD), F); 619 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 620 (void)LocalScope.ForceCleanup(); 621 FinishFunction(CD->getBodyRBrace()); 622 if (!NeedWrapperFunction) 623 return F; 624 625 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 626 /*RegisterCastedArgsOnly=*/true, 627 CapturedStmtInfo->getHelperName(), Loc); 628 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 629 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 630 Args.clear(); 631 LocalAddrs.clear(); 632 VLASizes.clear(); 633 llvm::Function *WrapperF = 634 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 635 WrapperCGF.CXXThisValue, WrapperFO); 636 llvm::SmallVector<llvm::Value *, 4> CallArgs; 637 auto *PI = F->arg_begin(); 638 for (const auto *Arg : Args) { 639 llvm::Value *CallArg; 640 auto I = LocalAddrs.find(Arg); 641 if (I != LocalAddrs.end()) { 642 LValue LV = WrapperCGF.MakeAddrLValue( 643 I->second.second, 644 I->second.first ? I->second.first->getType() : Arg->getType(), 645 AlignmentSource::Decl); 646 if (LV.getType()->isAnyComplexType()) 647 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 648 LV.getAddress(WrapperCGF), 649 PI->getType()->getPointerTo( 650 LV.getAddress(WrapperCGF).getAddressSpace()))); 651 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 652 } else { 653 auto EI = VLASizes.find(Arg); 654 if (EI != VLASizes.end()) { 655 CallArg = EI->second.second; 656 } else { 657 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 658 Arg->getType(), 659 AlignmentSource::Decl); 660 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 661 } 662 } 663 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 664 ++PI; 665 } 666 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 667 WrapperCGF.FinishFunction(); 668 return WrapperF; 669 } 670 671 //===----------------------------------------------------------------------===// 672 // OpenMP Directive Emission 673 //===----------------------------------------------------------------------===// 674 void CodeGenFunction::EmitOMPAggregateAssign( 675 Address DestAddr, Address SrcAddr, QualType OriginalType, 676 const llvm::function_ref<void(Address, Address)> CopyGen) { 677 // Perform element-by-element initialization. 678 QualType ElementTy; 679 680 // Drill down to the base element type on both arrays. 681 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 682 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 683 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 684 685 llvm::Value *SrcBegin = SrcAddr.getPointer(); 686 llvm::Value *DestBegin = DestAddr.getPointer(); 687 // Cast from pointer to array type to pointer to single element. 688 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 689 // The basic structure here is a while-do loop. 690 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 691 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 692 llvm::Value *IsEmpty = 693 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 694 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 695 696 // Enter the loop body, making that address the current address. 697 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 698 EmitBlock(BodyBB); 699 700 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 701 702 llvm::PHINode *SrcElementPHI = 703 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 704 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 705 Address SrcElementCurrent = 706 Address(SrcElementPHI, 707 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 708 709 llvm::PHINode *DestElementPHI = 710 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 711 DestElementPHI->addIncoming(DestBegin, EntryBB); 712 Address DestElementCurrent = 713 Address(DestElementPHI, 714 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 715 716 // Emit copy. 717 CopyGen(DestElementCurrent, SrcElementCurrent); 718 719 // Shift the address forward by one element. 720 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 721 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 722 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 723 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 724 // Check whether we've reached the end. 725 llvm::Value *Done = 726 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 727 Builder.CreateCondBr(Done, DoneBB, BodyBB); 728 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 729 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 730 731 // Done. 732 EmitBlock(DoneBB, /*IsFinished=*/true); 733 } 734 735 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 736 Address SrcAddr, const VarDecl *DestVD, 737 const VarDecl *SrcVD, const Expr *Copy) { 738 if (OriginalType->isArrayType()) { 739 const auto *BO = dyn_cast<BinaryOperator>(Copy); 740 if (BO && BO->getOpcode() == BO_Assign) { 741 // Perform simple memcpy for simple copying. 742 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 743 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 744 EmitAggregateAssign(Dest, Src, OriginalType); 745 } else { 746 // For arrays with complex element types perform element by element 747 // copying. 748 EmitOMPAggregateAssign( 749 DestAddr, SrcAddr, OriginalType, 750 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 751 // Working with the single array element, so have to remap 752 // destination and source variables to corresponding array 753 // elements. 754 CodeGenFunction::OMPPrivateScope Remap(*this); 755 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 756 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 757 (void)Remap.Privatize(); 758 EmitIgnoredExpr(Copy); 759 }); 760 } 761 } else { 762 // Remap pseudo source variable to private copy. 763 CodeGenFunction::OMPPrivateScope Remap(*this); 764 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 765 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 766 (void)Remap.Privatize(); 767 // Emit copying of the whole variable. 768 EmitIgnoredExpr(Copy); 769 } 770 } 771 772 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 773 OMPPrivateScope &PrivateScope) { 774 if (!HaveInsertPoint()) 775 return false; 776 bool DeviceConstTarget = 777 getLangOpts().OpenMPIsDevice && 778 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 779 bool FirstprivateIsLastprivate = false; 780 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 781 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 782 for (const auto *D : C->varlists()) 783 Lastprivates.try_emplace( 784 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 785 C->getKind()); 786 } 787 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 788 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 789 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 790 // Force emission of the firstprivate copy if the directive does not emit 791 // outlined function, like omp for, omp simd, omp distribute etc. 792 bool MustEmitFirstprivateCopy = 793 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 794 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 795 const auto *IRef = C->varlist_begin(); 796 const auto *InitsRef = C->inits().begin(); 797 for (const Expr *IInit : C->private_copies()) { 798 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 799 bool ThisFirstprivateIsLastprivate = 800 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 801 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 802 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 803 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 804 !FD->getType()->isReferenceType() && 805 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 806 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 807 ++IRef; 808 ++InitsRef; 809 continue; 810 } 811 // Do not emit copy for firstprivate constant variables in target regions, 812 // captured by reference. 813 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 814 FD && FD->getType()->isReferenceType() && 815 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 816 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 817 OrigVD); 818 ++IRef; 819 ++InitsRef; 820 continue; 821 } 822 FirstprivateIsLastprivate = 823 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 824 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 825 const auto *VDInit = 826 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 827 bool IsRegistered; 828 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 829 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 830 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 831 LValue OriginalLVal; 832 if (!FD) { 833 // Check if the firstprivate variable is just a constant value. 834 ConstantEmission CE = tryEmitAsConstant(&DRE); 835 if (CE && !CE.isReference()) { 836 // Constant value, no need to create a copy. 837 ++IRef; 838 ++InitsRef; 839 continue; 840 } 841 if (CE && CE.isReference()) { 842 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 843 } else { 844 assert(!CE && "Expected non-constant firstprivate."); 845 OriginalLVal = EmitLValue(&DRE); 846 } 847 } else { 848 OriginalLVal = EmitLValue(&DRE); 849 } 850 QualType Type = VD->getType(); 851 if (Type->isArrayType()) { 852 // Emit VarDecl with copy init for arrays. 853 // Get the address of the original variable captured in current 854 // captured region. 855 IsRegistered = PrivateScope.addPrivate( 856 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 857 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 858 const Expr *Init = VD->getInit(); 859 if (!isa<CXXConstructExpr>(Init) || 860 isTrivialInitializer(Init)) { 861 // Perform simple memcpy. 862 LValue Dest = 863 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 864 EmitAggregateAssign(Dest, OriginalLVal, Type); 865 } else { 866 EmitOMPAggregateAssign( 867 Emission.getAllocatedAddress(), 868 OriginalLVal.getAddress(*this), Type, 869 [this, VDInit, Init](Address DestElement, 870 Address SrcElement) { 871 // Clean up any temporaries needed by the 872 // initialization. 873 RunCleanupsScope InitScope(*this); 874 // Emit initialization for single element. 875 setAddrOfLocalVar(VDInit, SrcElement); 876 EmitAnyExprToMem(Init, DestElement, 877 Init->getType().getQualifiers(), 878 /*IsInitializer*/ false); 879 LocalDeclMap.erase(VDInit); 880 }); 881 } 882 EmitAutoVarCleanups(Emission); 883 return Emission.getAllocatedAddress(); 884 }); 885 } else { 886 Address OriginalAddr = OriginalLVal.getAddress(*this); 887 IsRegistered = 888 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 889 ThisFirstprivateIsLastprivate, 890 OrigVD, &Lastprivates, IRef]() { 891 // Emit private VarDecl with copy init. 892 // Remap temp VDInit variable to the address of the original 893 // variable (for proper handling of captured global variables). 894 setAddrOfLocalVar(VDInit, OriginalAddr); 895 EmitDecl(*VD); 896 LocalDeclMap.erase(VDInit); 897 if (ThisFirstprivateIsLastprivate && 898 Lastprivates[OrigVD->getCanonicalDecl()] == 899 OMPC_LASTPRIVATE_conditional) { 900 // Create/init special variable for lastprivate conditionals. 901 Address VDAddr = 902 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 903 *this, OrigVD); 904 llvm::Value *V = EmitLoadOfScalar( 905 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 906 AlignmentSource::Decl), 907 (*IRef)->getExprLoc()); 908 EmitStoreOfScalar(V, 909 MakeAddrLValue(VDAddr, (*IRef)->getType(), 910 AlignmentSource::Decl)); 911 LocalDeclMap.erase(VD); 912 setAddrOfLocalVar(VD, VDAddr); 913 return VDAddr; 914 } 915 return GetAddrOfLocalVar(VD); 916 }); 917 } 918 assert(IsRegistered && 919 "firstprivate var already registered as private"); 920 // Silence the warning about unused variable. 921 (void)IsRegistered; 922 } 923 ++IRef; 924 ++InitsRef; 925 } 926 } 927 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 928 } 929 930 void CodeGenFunction::EmitOMPPrivateClause( 931 const OMPExecutableDirective &D, 932 CodeGenFunction::OMPPrivateScope &PrivateScope) { 933 if (!HaveInsertPoint()) 934 return; 935 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 936 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 937 auto IRef = C->varlist_begin(); 938 for (const Expr *IInit : C->private_copies()) { 939 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 940 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 941 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 942 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 943 // Emit private VarDecl with copy init. 944 EmitDecl(*VD); 945 return GetAddrOfLocalVar(VD); 946 }); 947 assert(IsRegistered && "private var already registered as private"); 948 // Silence the warning about unused variable. 949 (void)IsRegistered; 950 } 951 ++IRef; 952 } 953 } 954 } 955 956 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 957 if (!HaveInsertPoint()) 958 return false; 959 // threadprivate_var1 = master_threadprivate_var1; 960 // operator=(threadprivate_var2, master_threadprivate_var2); 961 // ... 962 // __kmpc_barrier(&loc, global_tid); 963 llvm::DenseSet<const VarDecl *> CopiedVars; 964 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 965 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 966 auto IRef = C->varlist_begin(); 967 auto ISrcRef = C->source_exprs().begin(); 968 auto IDestRef = C->destination_exprs().begin(); 969 for (const Expr *AssignOp : C->assignment_ops()) { 970 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 971 QualType Type = VD->getType(); 972 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 973 // Get the address of the master variable. If we are emitting code with 974 // TLS support, the address is passed from the master as field in the 975 // captured declaration. 976 Address MasterAddr = Address::invalid(); 977 if (getLangOpts().OpenMPUseTLS && 978 getContext().getTargetInfo().isTLSSupported()) { 979 assert(CapturedStmtInfo->lookup(VD) && 980 "Copyin threadprivates should have been captured!"); 981 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 982 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 983 MasterAddr = EmitLValue(&DRE).getAddress(*this); 984 LocalDeclMap.erase(VD); 985 } else { 986 MasterAddr = 987 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 988 : CGM.GetAddrOfGlobal(VD), 989 getContext().getDeclAlign(VD)); 990 } 991 // Get the address of the threadprivate variable. 992 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 993 if (CopiedVars.size() == 1) { 994 // At first check if current thread is a master thread. If it is, no 995 // need to copy data. 996 CopyBegin = createBasicBlock("copyin.not.master"); 997 CopyEnd = createBasicBlock("copyin.not.master.end"); 998 Builder.CreateCondBr( 999 Builder.CreateICmpNE( 1000 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 1001 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 1002 CGM.IntPtrTy)), 1003 CopyBegin, CopyEnd); 1004 EmitBlock(CopyBegin); 1005 } 1006 const auto *SrcVD = 1007 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1008 const auto *DestVD = 1009 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1010 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 1011 } 1012 ++IRef; 1013 ++ISrcRef; 1014 ++IDestRef; 1015 } 1016 } 1017 if (CopyEnd) { 1018 // Exit out of copying procedure for non-master thread. 1019 EmitBlock(CopyEnd, /*IsFinished=*/true); 1020 return true; 1021 } 1022 return false; 1023 } 1024 1025 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1026 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1027 if (!HaveInsertPoint()) 1028 return false; 1029 bool HasAtLeastOneLastprivate = false; 1030 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1031 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1032 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1033 for (const Expr *C : LoopDirective->counters()) { 1034 SIMDLCVs.insert( 1035 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1036 } 1037 } 1038 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1039 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1040 HasAtLeastOneLastprivate = true; 1041 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1042 !getLangOpts().OpenMPSimd) 1043 break; 1044 const auto *IRef = C->varlist_begin(); 1045 const auto *IDestRef = C->destination_exprs().begin(); 1046 for (const Expr *IInit : C->private_copies()) { 1047 // Keep the address of the original variable for future update at the end 1048 // of the loop. 1049 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1050 // Taskloops do not require additional initialization, it is done in 1051 // runtime support library. 1052 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1053 const auto *DestVD = 1054 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1055 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1056 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1057 /*RefersToEnclosingVariableOrCapture=*/ 1058 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1059 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1060 return EmitLValue(&DRE).getAddress(*this); 1061 }); 1062 // Check if the variable is also a firstprivate: in this case IInit is 1063 // not generated. Initialization of this variable will happen in codegen 1064 // for 'firstprivate' clause. 1065 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1066 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1067 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1068 OrigVD]() { 1069 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1070 Address VDAddr = 1071 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1072 OrigVD); 1073 setAddrOfLocalVar(VD, VDAddr); 1074 return VDAddr; 1075 } 1076 // Emit private VarDecl with copy init. 1077 EmitDecl(*VD); 1078 return GetAddrOfLocalVar(VD); 1079 }); 1080 assert(IsRegistered && 1081 "lastprivate var already registered as private"); 1082 (void)IsRegistered; 1083 } 1084 } 1085 ++IRef; 1086 ++IDestRef; 1087 } 1088 } 1089 return HasAtLeastOneLastprivate; 1090 } 1091 1092 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1093 const OMPExecutableDirective &D, bool NoFinals, 1094 llvm::Value *IsLastIterCond) { 1095 if (!HaveInsertPoint()) 1096 return; 1097 // Emit following code: 1098 // if (<IsLastIterCond>) { 1099 // orig_var1 = private_orig_var1; 1100 // ... 1101 // orig_varn = private_orig_varn; 1102 // } 1103 llvm::BasicBlock *ThenBB = nullptr; 1104 llvm::BasicBlock *DoneBB = nullptr; 1105 if (IsLastIterCond) { 1106 // Emit implicit barrier if at least one lastprivate conditional is found 1107 // and this is not a simd mode. 1108 if (!getLangOpts().OpenMPSimd && 1109 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1110 [](const OMPLastprivateClause *C) { 1111 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1112 })) { 1113 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1114 OMPD_unknown, 1115 /*EmitChecks=*/false, 1116 /*ForceSimpleCall=*/true); 1117 } 1118 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1119 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1120 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1121 EmitBlock(ThenBB); 1122 } 1123 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1124 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1125 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1126 auto IC = LoopDirective->counters().begin(); 1127 for (const Expr *F : LoopDirective->finals()) { 1128 const auto *D = 1129 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1130 if (NoFinals) 1131 AlreadyEmittedVars.insert(D); 1132 else 1133 LoopCountersAndUpdates[D] = F; 1134 ++IC; 1135 } 1136 } 1137 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1138 auto IRef = C->varlist_begin(); 1139 auto ISrcRef = C->source_exprs().begin(); 1140 auto IDestRef = C->destination_exprs().begin(); 1141 for (const Expr *AssignOp : C->assignment_ops()) { 1142 const auto *PrivateVD = 1143 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1144 QualType Type = PrivateVD->getType(); 1145 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1146 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1147 // If lastprivate variable is a loop control variable for loop-based 1148 // directive, update its value before copyin back to original 1149 // variable. 1150 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1151 EmitIgnoredExpr(FinalExpr); 1152 const auto *SrcVD = 1153 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1154 const auto *DestVD = 1155 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1156 // Get the address of the private variable. 1157 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1158 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1159 PrivateAddr = 1160 Address(Builder.CreateLoad(PrivateAddr), 1161 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1162 // Store the last value to the private copy in the last iteration. 1163 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1164 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1165 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1166 (*IRef)->getExprLoc()); 1167 // Get the address of the original variable. 1168 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1169 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1170 } 1171 ++IRef; 1172 ++ISrcRef; 1173 ++IDestRef; 1174 } 1175 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1176 EmitIgnoredExpr(PostUpdate); 1177 } 1178 if (IsLastIterCond) 1179 EmitBlock(DoneBB, /*IsFinished=*/true); 1180 } 1181 1182 void CodeGenFunction::EmitOMPReductionClauseInit( 1183 const OMPExecutableDirective &D, 1184 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1185 if (!HaveInsertPoint()) 1186 return; 1187 SmallVector<const Expr *, 4> Shareds; 1188 SmallVector<const Expr *, 4> Privates; 1189 SmallVector<const Expr *, 4> ReductionOps; 1190 SmallVector<const Expr *, 4> LHSs; 1191 SmallVector<const Expr *, 4> RHSs; 1192 OMPTaskDataTy Data; 1193 SmallVector<const Expr *, 4> TaskLHSs; 1194 SmallVector<const Expr *, 4> TaskRHSs; 1195 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1196 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1197 continue; 1198 Shareds.append(C->varlist_begin(), C->varlist_end()); 1199 Privates.append(C->privates().begin(), C->privates().end()); 1200 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1201 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1202 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1203 if (C->getModifier() == OMPC_REDUCTION_task) { 1204 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1205 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1206 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1207 Data.ReductionOps.append(C->reduction_ops().begin(), 1208 C->reduction_ops().end()); 1209 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1210 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1211 } 1212 } 1213 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1214 unsigned Count = 0; 1215 auto *ILHS = LHSs.begin(); 1216 auto *IRHS = RHSs.begin(); 1217 auto *IPriv = Privates.begin(); 1218 for (const Expr *IRef : Shareds) { 1219 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1220 // Emit private VarDecl with reduction init. 1221 RedCG.emitSharedOrigLValue(*this, Count); 1222 RedCG.emitAggregateType(*this, Count); 1223 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1224 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1225 RedCG.getSharedLValue(Count), 1226 [&Emission](CodeGenFunction &CGF) { 1227 CGF.EmitAutoVarInit(Emission); 1228 return true; 1229 }); 1230 EmitAutoVarCleanups(Emission); 1231 Address BaseAddr = RedCG.adjustPrivateAddress( 1232 *this, Count, Emission.getAllocatedAddress()); 1233 bool IsRegistered = PrivateScope.addPrivate( 1234 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1235 assert(IsRegistered && "private var already registered as private"); 1236 // Silence the warning about unused variable. 1237 (void)IsRegistered; 1238 1239 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1240 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1241 QualType Type = PrivateVD->getType(); 1242 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1243 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1244 // Store the address of the original variable associated with the LHS 1245 // implicit variable. 1246 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1247 return RedCG.getSharedLValue(Count).getAddress(*this); 1248 }); 1249 PrivateScope.addPrivate( 1250 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1251 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1252 isa<ArraySubscriptExpr>(IRef)) { 1253 // Store the address of the original variable associated with the LHS 1254 // implicit variable. 1255 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1256 return RedCG.getSharedLValue(Count).getAddress(*this); 1257 }); 1258 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1259 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1260 ConvertTypeForMem(RHSVD->getType()), 1261 "rhs.begin"); 1262 }); 1263 } else { 1264 QualType Type = PrivateVD->getType(); 1265 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1266 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1267 // Store the address of the original variable associated with the LHS 1268 // implicit variable. 1269 if (IsArray) { 1270 OriginalAddr = Builder.CreateElementBitCast( 1271 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1272 } 1273 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1274 PrivateScope.addPrivate( 1275 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1276 return IsArray 1277 ? Builder.CreateElementBitCast( 1278 GetAddrOfLocalVar(PrivateVD), 1279 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1280 : GetAddrOfLocalVar(PrivateVD); 1281 }); 1282 } 1283 ++ILHS; 1284 ++IRHS; 1285 ++IPriv; 1286 ++Count; 1287 } 1288 if (!Data.ReductionVars.empty()) { 1289 Data.IsReductionWithTaskMod = true; 1290 Data.IsWorksharingReduction = 1291 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1292 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1293 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1294 const Expr *TaskRedRef = nullptr; 1295 switch (D.getDirectiveKind()) { 1296 case OMPD_parallel: 1297 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1298 break; 1299 case OMPD_for: 1300 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1301 break; 1302 case OMPD_sections: 1303 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1304 break; 1305 case OMPD_parallel_for: 1306 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1307 break; 1308 case OMPD_parallel_master: 1309 TaskRedRef = 1310 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_parallel_sections: 1313 TaskRedRef = 1314 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_target_parallel: 1317 TaskRedRef = 1318 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1319 break; 1320 case OMPD_target_parallel_for: 1321 TaskRedRef = 1322 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1323 break; 1324 case OMPD_distribute_parallel_for: 1325 TaskRedRef = 1326 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1327 break; 1328 case OMPD_teams_distribute_parallel_for: 1329 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1330 .getTaskReductionRefExpr(); 1331 break; 1332 case OMPD_target_teams_distribute_parallel_for: 1333 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1334 .getTaskReductionRefExpr(); 1335 break; 1336 case OMPD_simd: 1337 case OMPD_for_simd: 1338 case OMPD_section: 1339 case OMPD_single: 1340 case OMPD_master: 1341 case OMPD_critical: 1342 case OMPD_parallel_for_simd: 1343 case OMPD_task: 1344 case OMPD_taskyield: 1345 case OMPD_barrier: 1346 case OMPD_taskwait: 1347 case OMPD_taskgroup: 1348 case OMPD_flush: 1349 case OMPD_depobj: 1350 case OMPD_scan: 1351 case OMPD_ordered: 1352 case OMPD_atomic: 1353 case OMPD_teams: 1354 case OMPD_target: 1355 case OMPD_cancellation_point: 1356 case OMPD_cancel: 1357 case OMPD_target_data: 1358 case OMPD_target_enter_data: 1359 case OMPD_target_exit_data: 1360 case OMPD_taskloop: 1361 case OMPD_taskloop_simd: 1362 case OMPD_master_taskloop: 1363 case OMPD_master_taskloop_simd: 1364 case OMPD_parallel_master_taskloop: 1365 case OMPD_parallel_master_taskloop_simd: 1366 case OMPD_distribute: 1367 case OMPD_target_update: 1368 case OMPD_distribute_parallel_for_simd: 1369 case OMPD_distribute_simd: 1370 case OMPD_target_parallel_for_simd: 1371 case OMPD_target_simd: 1372 case OMPD_teams_distribute: 1373 case OMPD_teams_distribute_simd: 1374 case OMPD_teams_distribute_parallel_for_simd: 1375 case OMPD_target_teams: 1376 case OMPD_target_teams_distribute: 1377 case OMPD_target_teams_distribute_parallel_for_simd: 1378 case OMPD_target_teams_distribute_simd: 1379 case OMPD_declare_target: 1380 case OMPD_end_declare_target: 1381 case OMPD_threadprivate: 1382 case OMPD_allocate: 1383 case OMPD_declare_reduction: 1384 case OMPD_declare_mapper: 1385 case OMPD_declare_simd: 1386 case OMPD_requires: 1387 case OMPD_declare_variant: 1388 case OMPD_begin_declare_variant: 1389 case OMPD_end_declare_variant: 1390 case OMPD_unknown: 1391 default: 1392 llvm_unreachable("Enexpected directive with task reductions."); 1393 } 1394 1395 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1396 EmitVarDecl(*VD); 1397 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1398 /*Volatile=*/false, TaskRedRef->getType()); 1399 } 1400 } 1401 1402 void CodeGenFunction::EmitOMPReductionClauseFinal( 1403 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1404 if (!HaveInsertPoint()) 1405 return; 1406 llvm::SmallVector<const Expr *, 8> Privates; 1407 llvm::SmallVector<const Expr *, 8> LHSExprs; 1408 llvm::SmallVector<const Expr *, 8> RHSExprs; 1409 llvm::SmallVector<const Expr *, 8> ReductionOps; 1410 bool HasAtLeastOneReduction = false; 1411 bool IsReductionWithTaskMod = false; 1412 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1413 // Do not emit for inscan reductions. 1414 if (C->getModifier() == OMPC_REDUCTION_inscan) 1415 continue; 1416 HasAtLeastOneReduction = true; 1417 Privates.append(C->privates().begin(), C->privates().end()); 1418 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1419 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1420 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1421 IsReductionWithTaskMod = 1422 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1423 } 1424 if (HasAtLeastOneReduction) { 1425 if (IsReductionWithTaskMod) { 1426 CGM.getOpenMPRuntime().emitTaskReductionFini( 1427 *this, D.getBeginLoc(), 1428 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1429 } 1430 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1431 isOpenMPParallelDirective(D.getDirectiveKind()) || 1432 ReductionKind == OMPD_simd; 1433 bool SimpleReduction = ReductionKind == OMPD_simd; 1434 // Emit nowait reduction if nowait clause is present or directive is a 1435 // parallel directive (it always has implicit barrier). 1436 CGM.getOpenMPRuntime().emitReduction( 1437 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1438 {WithNowait, SimpleReduction, ReductionKind}); 1439 } 1440 } 1441 1442 static void emitPostUpdateForReductionClause( 1443 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1444 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1445 if (!CGF.HaveInsertPoint()) 1446 return; 1447 llvm::BasicBlock *DoneBB = nullptr; 1448 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1449 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1450 if (!DoneBB) { 1451 if (llvm::Value *Cond = CondGen(CGF)) { 1452 // If the first post-update expression is found, emit conditional 1453 // block if it was requested. 1454 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1455 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1456 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1457 CGF.EmitBlock(ThenBB); 1458 } 1459 } 1460 CGF.EmitIgnoredExpr(PostUpdate); 1461 } 1462 } 1463 if (DoneBB) 1464 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1465 } 1466 1467 namespace { 1468 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1469 /// parallel function. This is necessary for combined constructs such as 1470 /// 'distribute parallel for' 1471 typedef llvm::function_ref<void(CodeGenFunction &, 1472 const OMPExecutableDirective &, 1473 llvm::SmallVectorImpl<llvm::Value *> &)> 1474 CodeGenBoundParametersTy; 1475 } // anonymous namespace 1476 1477 static void 1478 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1479 const OMPExecutableDirective &S) { 1480 if (CGF.getLangOpts().OpenMP < 50) 1481 return; 1482 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1483 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1484 for (const Expr *Ref : C->varlists()) { 1485 if (!Ref->getType()->isScalarType()) 1486 continue; 1487 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1488 if (!DRE) 1489 continue; 1490 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1491 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1492 } 1493 } 1494 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1495 for (const Expr *Ref : C->varlists()) { 1496 if (!Ref->getType()->isScalarType()) 1497 continue; 1498 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1499 if (!DRE) 1500 continue; 1501 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1502 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1503 } 1504 } 1505 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1506 for (const Expr *Ref : C->varlists()) { 1507 if (!Ref->getType()->isScalarType()) 1508 continue; 1509 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1510 if (!DRE) 1511 continue; 1512 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1513 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1514 } 1515 } 1516 // Privates should ne analyzed since they are not captured at all. 1517 // Task reductions may be skipped - tasks are ignored. 1518 // Firstprivates do not return value but may be passed by reference - no need 1519 // to check for updated lastprivate conditional. 1520 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1521 for (const Expr *Ref : C->varlists()) { 1522 if (!Ref->getType()->isScalarType()) 1523 continue; 1524 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1525 if (!DRE) 1526 continue; 1527 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1528 } 1529 } 1530 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1531 CGF, S, PrivateDecls); 1532 } 1533 1534 static void emitCommonOMPParallelDirective( 1535 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1536 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1537 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1538 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1539 llvm::Function *OutlinedFn = 1540 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1541 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1542 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1543 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1544 llvm::Value *NumThreads = 1545 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1546 /*IgnoreResultAssign=*/true); 1547 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1548 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1549 } 1550 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1551 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1552 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1553 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1554 } 1555 const Expr *IfCond = nullptr; 1556 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1557 if (C->getNameModifier() == OMPD_unknown || 1558 C->getNameModifier() == OMPD_parallel) { 1559 IfCond = C->getCondition(); 1560 break; 1561 } 1562 } 1563 1564 OMPParallelScope Scope(CGF, S); 1565 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1566 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1567 // lower and upper bounds with the pragma 'for' chunking mechanism. 1568 // The following lambda takes care of appending the lower and upper bound 1569 // parameters when necessary 1570 CodeGenBoundParameters(CGF, S, CapturedVars); 1571 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1572 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1573 CapturedVars, IfCond); 1574 } 1575 1576 static bool isAllocatableDecl(const VarDecl *VD) { 1577 const VarDecl *CVD = VD->getCanonicalDecl(); 1578 if (!CVD->hasAttr<OMPAllocateDeclAttr>()) 1579 return false; 1580 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1581 // Use the default allocation. 1582 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc || 1583 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) && 1584 !AA->getAllocator()); 1585 } 1586 1587 static void emitEmptyBoundParameters(CodeGenFunction &, 1588 const OMPExecutableDirective &, 1589 llvm::SmallVectorImpl<llvm::Value *> &) {} 1590 1591 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable( 1592 CodeGenFunction &CGF, const VarDecl *VD) { 1593 CodeGenModule &CGM = CGF.CGM; 1594 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1595 1596 if (!VD) 1597 return Address::invalid(); 1598 const VarDecl *CVD = VD->getCanonicalDecl(); 1599 if (!isAllocatableDecl(CVD)) 1600 return Address::invalid(); 1601 llvm::Value *Size; 1602 CharUnits Align = CGM.getContext().getDeclAlign(CVD); 1603 if (CVD->getType()->isVariablyModifiedType()) { 1604 Size = CGF.getTypeSize(CVD->getType()); 1605 // Align the size: ((size + align - 1) / align) * align 1606 Size = CGF.Builder.CreateNUWAdd( 1607 Size, CGM.getSize(Align - CharUnits::fromQuantity(1))); 1608 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align)); 1609 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align)); 1610 } else { 1611 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType()); 1612 Size = CGM.getSize(Sz.alignTo(Align)); 1613 } 1614 1615 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>(); 1616 assert(AA->getAllocator() && 1617 "Expected allocator expression for non-default allocator."); 1618 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator()); 1619 // According to the standard, the original allocator type is a enum (integer). 1620 // Convert to pointer type, if required. 1621 if (Allocator->getType()->isIntegerTy()) 1622 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy); 1623 else if (Allocator->getType()->isPointerTy()) 1624 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator, 1625 CGM.VoidPtrTy); 1626 1627 llvm::Value *Addr = OMPBuilder.createOMPAlloc( 1628 CGF.Builder, Size, Allocator, 1629 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", ".")); 1630 llvm::CallInst *FreeCI = 1631 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator); 1632 1633 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI); 1634 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1635 Addr, 1636 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())), 1637 getNameWithSeparators({CVD->getName(), ".addr"}, ".", ".")); 1638 return Address(Addr, Align); 1639 } 1640 1641 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate( 1642 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, 1643 SourceLocation Loc) { 1644 CodeGenModule &CGM = CGF.CGM; 1645 if (CGM.getLangOpts().OpenMPUseTLS && 1646 CGM.getContext().getTargetInfo().isTLSSupported()) 1647 return VDAddr; 1648 1649 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1650 1651 llvm::Type *VarTy = VDAddr.getElementType(); 1652 llvm::Value *Data = 1653 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy); 1654 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)); 1655 std::string Suffix = getNameWithSeparators({"cache", ""}); 1656 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix); 1657 1658 llvm::CallInst *ThreadPrivateCacheCall = 1659 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName); 1660 1661 return Address(ThreadPrivateCacheCall, VDAddr.getAlignment()); 1662 } 1663 1664 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators( 1665 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) { 1666 SmallString<128> Buffer; 1667 llvm::raw_svector_ostream OS(Buffer); 1668 StringRef Sep = FirstSeparator; 1669 for (StringRef Part : Parts) { 1670 OS << Sep << Part; 1671 Sep = Separator; 1672 } 1673 return OS.str().str(); 1674 } 1675 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1676 if (CGM.getLangOpts().OpenMPIRBuilder) { 1677 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1678 // Check if we have any if clause associated with the directive. 1679 llvm::Value *IfCond = nullptr; 1680 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1681 IfCond = EmitScalarExpr(C->getCondition(), 1682 /*IgnoreResultAssign=*/true); 1683 1684 llvm::Value *NumThreads = nullptr; 1685 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1686 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1687 /*IgnoreResultAssign=*/true); 1688 1689 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1690 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1691 ProcBind = ProcBindClause->getProcBindKind(); 1692 1693 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1694 1695 // The cleanup callback that finalizes all variabels at the given location, 1696 // thus calls destructors etc. 1697 auto FiniCB = [this](InsertPointTy IP) { 1698 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1699 }; 1700 1701 // Privatization callback that performs appropriate action for 1702 // shared/private/firstprivate/lastprivate/copyin/... variables. 1703 // 1704 // TODO: This defaults to shared right now. 1705 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1706 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) { 1707 // The next line is appropriate only for variables (Val) with the 1708 // data-sharing attribute "shared". 1709 ReplVal = &Val; 1710 1711 return CodeGenIP; 1712 }; 1713 1714 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1715 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1716 1717 auto BodyGenCB = [ParallelRegionBodyStmt, 1718 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1719 llvm::BasicBlock &ContinuationBB) { 1720 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1721 ContinuationBB); 1722 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1723 CodeGenIP, ContinuationBB); 1724 }; 1725 1726 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1727 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1728 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 1729 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 1730 Builder.restoreIP( 1731 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB, 1732 IfCond, NumThreads, ProcBind, S.hasCancel())); 1733 return; 1734 } 1735 1736 // Emit parallel region as a standalone region. 1737 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1738 Action.Enter(CGF); 1739 OMPPrivateScope PrivateScope(CGF); 1740 bool Copyins = CGF.EmitOMPCopyinClause(S); 1741 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1742 if (Copyins) { 1743 // Emit implicit barrier to synchronize threads and avoid data races on 1744 // propagation master's thread values of threadprivate variables to local 1745 // instances of that variables of all other implicit threads. 1746 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1747 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1748 /*ForceSimpleCall=*/true); 1749 } 1750 CGF.EmitOMPPrivateClause(S, PrivateScope); 1751 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1752 (void)PrivateScope.Privatize(); 1753 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1754 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1755 }; 1756 { 1757 auto LPCRegion = 1758 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1759 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1760 emitEmptyBoundParameters); 1761 emitPostUpdateForReductionClause(*this, S, 1762 [](CodeGenFunction &) { return nullptr; }); 1763 } 1764 // Check for outer lastprivate conditional update. 1765 checkForLastprivateConditionalUpdate(*this, S); 1766 } 1767 1768 namespace { 1769 /// RAII to handle scopes for loop transformation directives. 1770 class OMPTransformDirectiveScopeRAII { 1771 OMPLoopScope *Scope = nullptr; 1772 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr; 1773 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr; 1774 1775 public: 1776 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) { 1777 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) { 1778 Scope = new OMPLoopScope(CGF, *Dir); 1779 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); 1780 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); 1781 } 1782 } 1783 ~OMPTransformDirectiveScopeRAII() { 1784 if (!Scope) 1785 return; 1786 delete CapInfoRAII; 1787 delete CGSI; 1788 delete Scope; 1789 } 1790 }; 1791 } // namespace 1792 1793 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1794 int MaxLevel, int Level = 0) { 1795 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1796 const Stmt *SimplifiedS = S->IgnoreContainers(); 1797 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1798 PrettyStackTraceLoc CrashInfo( 1799 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1800 "LLVM IR generation of compound statement ('{}')"); 1801 1802 // Keep track of the current cleanup stack depth, including debug scopes. 1803 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1804 for (const Stmt *CurStmt : CS->body()) 1805 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1806 return; 1807 } 1808 if (SimplifiedS == NextLoop) { 1809 OMPTransformDirectiveScopeRAII PossiblyTransformDirectiveScope(CGF, 1810 SimplifiedS); 1811 if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS)) 1812 SimplifiedS = Dir->getTransformedStmt(); 1813 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS)) 1814 SimplifiedS = CanonLoop->getLoopStmt(); 1815 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1816 S = For->getBody(); 1817 } else { 1818 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1819 "Expected canonical for loop or range-based for loop."); 1820 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1821 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1822 S = CXXFor->getBody(); 1823 } 1824 if (Level + 1 < MaxLevel) { 1825 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1826 S, /*TryImperfectlyNestedLoops=*/true); 1827 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1828 return; 1829 } 1830 } 1831 CGF.EmitStmt(S); 1832 } 1833 1834 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1835 JumpDest LoopExit) { 1836 RunCleanupsScope BodyScope(*this); 1837 // Update counters values on current iteration. 1838 for (const Expr *UE : D.updates()) 1839 EmitIgnoredExpr(UE); 1840 // Update the linear variables. 1841 // In distribute directives only loop counters may be marked as linear, no 1842 // need to generate the code for them. 1843 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1844 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1845 for (const Expr *UE : C->updates()) 1846 EmitIgnoredExpr(UE); 1847 } 1848 } 1849 1850 // On a continue in the body, jump to the end. 1851 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1852 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1853 for (const Expr *E : D.finals_conditions()) { 1854 if (!E) 1855 continue; 1856 // Check that loop counter in non-rectangular nest fits into the iteration 1857 // space. 1858 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1859 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1860 getProfileCount(D.getBody())); 1861 EmitBlock(NextBB); 1862 } 1863 1864 OMPPrivateScope InscanScope(*this); 1865 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1866 bool IsInscanRegion = InscanScope.Privatize(); 1867 if (IsInscanRegion) { 1868 // Need to remember the block before and after scan directive 1869 // to dispatch them correctly depending on the clause used in 1870 // this directive, inclusive or exclusive. For inclusive scan the natural 1871 // order of the blocks is used, for exclusive clause the blocks must be 1872 // executed in reverse order. 1873 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1874 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1875 // No need to allocate inscan exit block, in simd mode it is selected in the 1876 // codegen for the scan directive. 1877 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1878 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1879 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1880 EmitBranch(OMPScanDispatch); 1881 EmitBlock(OMPBeforeScanBlock); 1882 } 1883 1884 // Emit loop variables for C++ range loops. 1885 const Stmt *Body = 1886 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1887 // Emit loop body. 1888 emitBody(*this, Body, 1889 OMPLoopBasedDirective::tryToFindNextInnerLoop( 1890 Body, /*TryImperfectlyNestedLoops=*/true), 1891 D.getLoopsNumber()); 1892 1893 // Jump to the dispatcher at the end of the loop body. 1894 if (IsInscanRegion) 1895 EmitBranch(OMPScanExitBlock); 1896 1897 // The end (updates/cleanups). 1898 EmitBlock(Continue.getBlock()); 1899 BreakContinueStack.pop_back(); 1900 } 1901 1902 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>; 1903 1904 /// Emit a captured statement and return the function as well as its captured 1905 /// closure context. 1906 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF, 1907 const CapturedStmt *S) { 1908 LValue CapStruct = ParentCGF.InitCapturedStruct(*S); 1909 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true); 1910 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI = 1911 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S); 1912 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get()); 1913 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S); 1914 1915 return {F, CapStruct.getPointer(ParentCGF)}; 1916 } 1917 1918 /// Emit a call to a previously captured closure. 1919 static llvm::CallInst * 1920 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap, 1921 llvm::ArrayRef<llvm::Value *> Args) { 1922 // Append the closure context to the argument. 1923 SmallVector<llvm::Value *> EffectiveArgs; 1924 EffectiveArgs.reserve(Args.size() + 1); 1925 llvm::append_range(EffectiveArgs, Args); 1926 EffectiveArgs.push_back(Cap.second); 1927 1928 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs); 1929 } 1930 1931 llvm::CanonicalLoopInfo * 1932 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) { 1933 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented"); 1934 1935 EmitStmt(S); 1936 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops"); 1937 1938 // The last added loop is the outermost one. 1939 return OMPLoopNestStack.back(); 1940 } 1941 1942 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) { 1943 const Stmt *SyntacticalLoop = S->getLoopStmt(); 1944 if (!getLangOpts().OpenMPIRBuilder) { 1945 // Ignore if OpenMPIRBuilder is not enabled. 1946 EmitStmt(SyntacticalLoop); 1947 return; 1948 } 1949 1950 LexicalScope ForScope(*this, S->getSourceRange()); 1951 1952 // Emit init statements. The Distance/LoopVar funcs may reference variable 1953 // declarations they contain. 1954 const Stmt *BodyStmt; 1955 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) { 1956 if (const Stmt *InitStmt = For->getInit()) 1957 EmitStmt(InitStmt); 1958 BodyStmt = For->getBody(); 1959 } else if (const auto *RangeFor = 1960 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) { 1961 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt()) 1962 EmitStmt(RangeStmt); 1963 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt()) 1964 EmitStmt(BeginStmt); 1965 if (const DeclStmt *EndStmt = RangeFor->getEndStmt()) 1966 EmitStmt(EndStmt); 1967 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt()) 1968 EmitStmt(LoopVarStmt); 1969 BodyStmt = RangeFor->getBody(); 1970 } else 1971 llvm_unreachable("Expected for-stmt or range-based for-stmt"); 1972 1973 // Emit closure for later use. By-value captures will be captured here. 1974 const CapturedStmt *DistanceFunc = S->getDistanceFunc(); 1975 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc); 1976 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc(); 1977 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc); 1978 1979 // Call the distance function to get the number of iterations of the loop to 1980 // come. 1981 QualType LogicalTy = DistanceFunc->getCapturedDecl() 1982 ->getParam(0) 1983 ->getType() 1984 .getNonReferenceType(); 1985 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr"); 1986 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()}); 1987 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count"); 1988 1989 // Emit the loop structure. 1990 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 1991 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, 1992 llvm::Value *IndVar) { 1993 Builder.restoreIP(CodeGenIP); 1994 1995 // Emit the loop body: Convert the logical iteration number to the loop 1996 // variable and emit the body. 1997 const DeclRefExpr *LoopVarRef = S->getLoopVarRef(); 1998 LValue LCVal = EmitLValue(LoopVarRef); 1999 Address LoopVarAddress = LCVal.getAddress(*this); 2000 emitCapturedStmtCall(*this, LoopVarClosure, 2001 {LoopVarAddress.getPointer(), IndVar}); 2002 2003 RunCleanupsScope BodyScope(*this); 2004 EmitStmt(BodyStmt); 2005 }; 2006 llvm::CanonicalLoopInfo *CL = 2007 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal); 2008 2009 // Finish up the loop. 2010 Builder.restoreIP(CL->getAfterIP()); 2011 ForScope.ForceCleanup(); 2012 2013 // Remember the CanonicalLoopInfo for parent AST nodes consuming it. 2014 OMPLoopNestStack.push_back(CL); 2015 } 2016 2017 void CodeGenFunction::EmitOMPInnerLoop( 2018 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 2019 const Expr *IncExpr, 2020 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 2021 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 2022 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 2023 2024 // Start the loop with a block that tests the condition. 2025 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 2026 EmitBlock(CondBlock); 2027 const SourceRange R = S.getSourceRange(); 2028 2029 // If attributes are attached, push to the basic block with them. 2030 const auto &OMPED = cast<OMPExecutableDirective>(S); 2031 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 2032 const Stmt *SS = ICS->getCapturedStmt(); 2033 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 2034 OMPLoopNestStack.clear(); 2035 if (AS) 2036 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 2037 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 2038 SourceLocToDebugLoc(R.getEnd())); 2039 else 2040 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2041 SourceLocToDebugLoc(R.getEnd())); 2042 2043 // If there are any cleanups between here and the loop-exit scope, 2044 // create a block to stage a loop exit along. 2045 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2046 if (RequiresCleanup) 2047 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 2048 2049 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 2050 2051 // Emit condition. 2052 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 2053 if (ExitBlock != LoopExit.getBlock()) { 2054 EmitBlock(ExitBlock); 2055 EmitBranchThroughCleanup(LoopExit); 2056 } 2057 2058 EmitBlock(LoopBody); 2059 incrementProfileCounter(&S); 2060 2061 // Create a block for the increment. 2062 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 2063 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2064 2065 BodyGen(*this); 2066 2067 // Emit "IV = IV + 1" and a back-edge to the condition block. 2068 EmitBlock(Continue.getBlock()); 2069 EmitIgnoredExpr(IncExpr); 2070 PostIncGen(*this); 2071 BreakContinueStack.pop_back(); 2072 EmitBranch(CondBlock); 2073 LoopStack.pop(); 2074 // Emit the fall-through block. 2075 EmitBlock(LoopExit.getBlock()); 2076 } 2077 2078 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 2079 if (!HaveInsertPoint()) 2080 return false; 2081 // Emit inits for the linear variables. 2082 bool HasLinears = false; 2083 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2084 for (const Expr *Init : C->inits()) { 2085 HasLinears = true; 2086 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 2087 if (const auto *Ref = 2088 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 2089 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 2090 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 2091 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2092 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2093 VD->getInit()->getType(), VK_LValue, 2094 VD->getInit()->getExprLoc()); 2095 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 2096 VD->getType()), 2097 /*capturedByInit=*/false); 2098 EmitAutoVarCleanups(Emission); 2099 } else { 2100 EmitVarDecl(*VD); 2101 } 2102 } 2103 // Emit the linear steps for the linear clauses. 2104 // If a step is not constant, it is pre-calculated before the loop. 2105 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 2106 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 2107 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 2108 // Emit calculation of the linear step. 2109 EmitIgnoredExpr(CS); 2110 } 2111 } 2112 return HasLinears; 2113 } 2114 2115 void CodeGenFunction::EmitOMPLinearClauseFinal( 2116 const OMPLoopDirective &D, 2117 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2118 if (!HaveInsertPoint()) 2119 return; 2120 llvm::BasicBlock *DoneBB = nullptr; 2121 // Emit the final values of the linear variables. 2122 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2123 auto IC = C->varlist_begin(); 2124 for (const Expr *F : C->finals()) { 2125 if (!DoneBB) { 2126 if (llvm::Value *Cond = CondGen(*this)) { 2127 // If the first post-update expression is found, emit conditional 2128 // block if it was requested. 2129 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 2130 DoneBB = createBasicBlock(".omp.linear.pu.done"); 2131 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2132 EmitBlock(ThenBB); 2133 } 2134 } 2135 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 2136 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 2137 CapturedStmtInfo->lookup(OrigVD) != nullptr, 2138 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 2139 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 2140 CodeGenFunction::OMPPrivateScope VarScope(*this); 2141 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2142 (void)VarScope.Privatize(); 2143 EmitIgnoredExpr(F); 2144 ++IC; 2145 } 2146 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 2147 EmitIgnoredExpr(PostUpdate); 2148 } 2149 if (DoneBB) 2150 EmitBlock(DoneBB, /*IsFinished=*/true); 2151 } 2152 2153 static void emitAlignedClause(CodeGenFunction &CGF, 2154 const OMPExecutableDirective &D) { 2155 if (!CGF.HaveInsertPoint()) 2156 return; 2157 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 2158 llvm::APInt ClauseAlignment(64, 0); 2159 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 2160 auto *AlignmentCI = 2161 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 2162 ClauseAlignment = AlignmentCI->getValue(); 2163 } 2164 for (const Expr *E : Clause->varlists()) { 2165 llvm::APInt Alignment(ClauseAlignment); 2166 if (Alignment == 0) { 2167 // OpenMP [2.8.1, Description] 2168 // If no optional parameter is specified, implementation-defined default 2169 // alignments for SIMD instructions on the target platforms are assumed. 2170 Alignment = 2171 CGF.getContext() 2172 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 2173 E->getType()->getPointeeType())) 2174 .getQuantity(); 2175 } 2176 assert((Alignment == 0 || Alignment.isPowerOf2()) && 2177 "alignment is not power of 2"); 2178 if (Alignment != 0) { 2179 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 2180 CGF.emitAlignmentAssumption( 2181 PtrValue, E, /*No second loc needed*/ SourceLocation(), 2182 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 2183 } 2184 } 2185 } 2186 } 2187 2188 void CodeGenFunction::EmitOMPPrivateLoopCounters( 2189 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 2190 if (!HaveInsertPoint()) 2191 return; 2192 auto I = S.private_counters().begin(); 2193 for (const Expr *E : S.counters()) { 2194 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2195 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 2196 // Emit var without initialization. 2197 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 2198 EmitAutoVarCleanups(VarEmission); 2199 LocalDeclMap.erase(PrivateVD); 2200 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 2201 return VarEmission.getAllocatedAddress(); 2202 }); 2203 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 2204 VD->hasGlobalStorage()) { 2205 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 2206 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 2207 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 2208 E->getType(), VK_LValue, E->getExprLoc()); 2209 return EmitLValue(&DRE).getAddress(*this); 2210 }); 2211 } else { 2212 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 2213 return VarEmission.getAllocatedAddress(); 2214 }); 2215 } 2216 ++I; 2217 } 2218 // Privatize extra loop counters used in loops for ordered(n) clauses. 2219 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 2220 if (!C->getNumForLoops()) 2221 continue; 2222 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size(); 2223 I < E; ++I) { 2224 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 2225 const auto *VD = cast<VarDecl>(DRE->getDecl()); 2226 // Override only those variables that can be captured to avoid re-emission 2227 // of the variables declared within the loops. 2228 if (DRE->refersToEnclosingVariableOrCapture()) { 2229 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 2230 return CreateMemTemp(DRE->getType(), VD->getName()); 2231 }); 2232 } 2233 } 2234 } 2235 } 2236 2237 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 2238 const Expr *Cond, llvm::BasicBlock *TrueBlock, 2239 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 2240 if (!CGF.HaveInsertPoint()) 2241 return; 2242 { 2243 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 2244 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 2245 (void)PreCondScope.Privatize(); 2246 // Get initial values of real counters. 2247 for (const Expr *I : S.inits()) { 2248 CGF.EmitIgnoredExpr(I); 2249 } 2250 } 2251 // Create temp loop control variables with their init values to support 2252 // non-rectangular loops. 2253 CodeGenFunction::OMPMapVars PreCondVars; 2254 for (const Expr * E: S.dependent_counters()) { 2255 if (!E) 2256 continue; 2257 assert(!E->getType().getNonReferenceType()->isRecordType() && 2258 "dependent counter must not be an iterator."); 2259 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2260 Address CounterAddr = 2261 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2262 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2263 } 2264 (void)PreCondVars.apply(CGF); 2265 for (const Expr *E : S.dependent_inits()) { 2266 if (!E) 2267 continue; 2268 CGF.EmitIgnoredExpr(E); 2269 } 2270 // Check that loop is executed at least one time. 2271 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2272 PreCondVars.restore(CGF); 2273 } 2274 2275 void CodeGenFunction::EmitOMPLinearClause( 2276 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2277 if (!HaveInsertPoint()) 2278 return; 2279 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2280 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2281 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2282 for (const Expr *C : LoopDirective->counters()) { 2283 SIMDLCVs.insert( 2284 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2285 } 2286 } 2287 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2288 auto CurPrivate = C->privates().begin(); 2289 for (const Expr *E : C->varlists()) { 2290 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2291 const auto *PrivateVD = 2292 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2293 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2294 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2295 // Emit private VarDecl with copy init. 2296 EmitVarDecl(*PrivateVD); 2297 return GetAddrOfLocalVar(PrivateVD); 2298 }); 2299 assert(IsRegistered && "linear var already registered as private"); 2300 // Silence the warning about unused variable. 2301 (void)IsRegistered; 2302 } else { 2303 EmitVarDecl(*PrivateVD); 2304 } 2305 ++CurPrivate; 2306 } 2307 } 2308 } 2309 2310 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2311 const OMPExecutableDirective &D, 2312 bool IsMonotonic) { 2313 if (!CGF.HaveInsertPoint()) 2314 return; 2315 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2316 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2317 /*ignoreResult=*/true); 2318 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2319 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2320 // In presence of finite 'safelen', it may be unsafe to mark all 2321 // the memory instructions parallel, because loop-carried 2322 // dependences of 'safelen' iterations are possible. 2323 if (!IsMonotonic) 2324 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2325 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2326 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2327 /*ignoreResult=*/true); 2328 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2329 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2330 // In presence of finite 'safelen', it may be unsafe to mark all 2331 // the memory instructions parallel, because loop-carried 2332 // dependences of 'safelen' iterations are possible. 2333 CGF.LoopStack.setParallel(/*Enable=*/false); 2334 } 2335 } 2336 2337 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2338 bool IsMonotonic) { 2339 // Walk clauses and process safelen/lastprivate. 2340 LoopStack.setParallel(!IsMonotonic); 2341 LoopStack.setVectorizeEnable(); 2342 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2343 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2344 if (C->getKind() == OMPC_ORDER_concurrent) 2345 LoopStack.setParallel(/*Enable=*/true); 2346 if ((D.getDirectiveKind() == OMPD_simd || 2347 (getLangOpts().OpenMPSimd && 2348 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2349 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2350 [](const OMPReductionClause *C) { 2351 return C->getModifier() == OMPC_REDUCTION_inscan; 2352 })) 2353 // Disable parallel access in case of prefix sum. 2354 LoopStack.setParallel(/*Enable=*/false); 2355 } 2356 2357 void CodeGenFunction::EmitOMPSimdFinal( 2358 const OMPLoopDirective &D, 2359 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2360 if (!HaveInsertPoint()) 2361 return; 2362 llvm::BasicBlock *DoneBB = nullptr; 2363 auto IC = D.counters().begin(); 2364 auto IPC = D.private_counters().begin(); 2365 for (const Expr *F : D.finals()) { 2366 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2367 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2368 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2369 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2370 OrigVD->hasGlobalStorage() || CED) { 2371 if (!DoneBB) { 2372 if (llvm::Value *Cond = CondGen(*this)) { 2373 // If the first post-update expression is found, emit conditional 2374 // block if it was requested. 2375 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2376 DoneBB = createBasicBlock(".omp.final.done"); 2377 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2378 EmitBlock(ThenBB); 2379 } 2380 } 2381 Address OrigAddr = Address::invalid(); 2382 if (CED) { 2383 OrigAddr = 2384 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2385 } else { 2386 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2387 /*RefersToEnclosingVariableOrCapture=*/false, 2388 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2389 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2390 } 2391 OMPPrivateScope VarScope(*this); 2392 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2393 (void)VarScope.Privatize(); 2394 EmitIgnoredExpr(F); 2395 } 2396 ++IC; 2397 ++IPC; 2398 } 2399 if (DoneBB) 2400 EmitBlock(DoneBB, /*IsFinished=*/true); 2401 } 2402 2403 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2404 const OMPLoopDirective &S, 2405 CodeGenFunction::JumpDest LoopExit) { 2406 CGF.EmitOMPLoopBody(S, LoopExit); 2407 CGF.EmitStopPoint(&S); 2408 } 2409 2410 /// Emit a helper variable and return corresponding lvalue. 2411 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2412 const DeclRefExpr *Helper) { 2413 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2414 CGF.EmitVarDecl(*VDecl); 2415 return CGF.EmitLValue(Helper); 2416 } 2417 2418 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2419 const RegionCodeGenTy &SimdInitGen, 2420 const RegionCodeGenTy &BodyCodeGen) { 2421 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2422 PrePostActionTy &) { 2423 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2424 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2425 SimdInitGen(CGF); 2426 2427 BodyCodeGen(CGF); 2428 }; 2429 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2430 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2431 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2432 2433 BodyCodeGen(CGF); 2434 }; 2435 const Expr *IfCond = nullptr; 2436 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2437 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2438 if (CGF.getLangOpts().OpenMP >= 50 && 2439 (C->getNameModifier() == OMPD_unknown || 2440 C->getNameModifier() == OMPD_simd)) { 2441 IfCond = C->getCondition(); 2442 break; 2443 } 2444 } 2445 } 2446 if (IfCond) { 2447 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2448 } else { 2449 RegionCodeGenTy ThenRCG(ThenGen); 2450 ThenRCG(CGF); 2451 } 2452 } 2453 2454 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2455 PrePostActionTy &Action) { 2456 Action.Enter(CGF); 2457 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2458 "Expected simd directive"); 2459 OMPLoopScope PreInitScope(CGF, S); 2460 // if (PreCond) { 2461 // for (IV in 0..LastIteration) BODY; 2462 // <Final counter/linear vars updates>; 2463 // } 2464 // 2465 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2466 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2467 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2468 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2469 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2470 } 2471 2472 // Emit: if (PreCond) - begin. 2473 // If the condition constant folds and can be elided, avoid emitting the 2474 // whole loop. 2475 bool CondConstant; 2476 llvm::BasicBlock *ContBlock = nullptr; 2477 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2478 if (!CondConstant) 2479 return; 2480 } else { 2481 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2482 ContBlock = CGF.createBasicBlock("simd.if.end"); 2483 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2484 CGF.getProfileCount(&S)); 2485 CGF.EmitBlock(ThenBlock); 2486 CGF.incrementProfileCounter(&S); 2487 } 2488 2489 // Emit the loop iteration variable. 2490 const Expr *IVExpr = S.getIterationVariable(); 2491 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2492 CGF.EmitVarDecl(*IVDecl); 2493 CGF.EmitIgnoredExpr(S.getInit()); 2494 2495 // Emit the iterations count variable. 2496 // If it is not a variable, Sema decided to calculate iterations count on 2497 // each iteration (e.g., it is foldable into a constant). 2498 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2499 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2500 // Emit calculation of the iterations count. 2501 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2502 } 2503 2504 emitAlignedClause(CGF, S); 2505 (void)CGF.EmitOMPLinearClauseInit(S); 2506 { 2507 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2508 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2509 CGF.EmitOMPLinearClause(S, LoopScope); 2510 CGF.EmitOMPPrivateClause(S, LoopScope); 2511 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2512 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2513 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2514 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2515 (void)LoopScope.Privatize(); 2516 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2517 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2518 2519 emitCommonSimdLoop( 2520 CGF, S, 2521 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2522 CGF.EmitOMPSimdInit(S); 2523 }, 2524 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2525 CGF.EmitOMPInnerLoop( 2526 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2527 [&S](CodeGenFunction &CGF) { 2528 emitOMPLoopBodyWithStopPoint(CGF, S, 2529 CodeGenFunction::JumpDest()); 2530 }, 2531 [](CodeGenFunction &) {}); 2532 }); 2533 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2534 // Emit final copy of the lastprivate variables at the end of loops. 2535 if (HasLastprivateClause) 2536 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2537 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2538 emitPostUpdateForReductionClause(CGF, S, 2539 [](CodeGenFunction &) { return nullptr; }); 2540 } 2541 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2542 // Emit: if (PreCond) - end. 2543 if (ContBlock) { 2544 CGF.EmitBranch(ContBlock); 2545 CGF.EmitBlock(ContBlock, true); 2546 } 2547 } 2548 2549 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2550 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2551 OMPFirstScanLoop = true; 2552 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2553 emitOMPSimdRegion(CGF, S, Action); 2554 }; 2555 { 2556 auto LPCRegion = 2557 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2558 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2559 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2560 } 2561 // Check for outer lastprivate conditional update. 2562 checkForLastprivateConditionalUpdate(*this, S); 2563 } 2564 2565 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) { 2566 // Emit the de-sugared statement. 2567 OMPTransformDirectiveScopeRAII TileScope(*this, &S); 2568 EmitStmt(S.getTransformedStmt()); 2569 } 2570 2571 void CodeGenFunction::EmitOMPOuterLoop( 2572 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2573 CodeGenFunction::OMPPrivateScope &LoopScope, 2574 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2575 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2576 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2577 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2578 2579 const Expr *IVExpr = S.getIterationVariable(); 2580 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2581 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2582 2583 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2584 2585 // Start the loop with a block that tests the condition. 2586 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2587 EmitBlock(CondBlock); 2588 const SourceRange R = S.getSourceRange(); 2589 OMPLoopNestStack.clear(); 2590 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2591 SourceLocToDebugLoc(R.getEnd())); 2592 2593 llvm::Value *BoolCondVal = nullptr; 2594 if (!DynamicOrOrdered) { 2595 // UB = min(UB, GlobalUB) or 2596 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2597 // 'distribute parallel for') 2598 EmitIgnoredExpr(LoopArgs.EUB); 2599 // IV = LB 2600 EmitIgnoredExpr(LoopArgs.Init); 2601 // IV < UB 2602 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2603 } else { 2604 BoolCondVal = 2605 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2606 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2607 } 2608 2609 // If there are any cleanups between here and the loop-exit scope, 2610 // create a block to stage a loop exit along. 2611 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2612 if (LoopScope.requiresCleanups()) 2613 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2614 2615 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2616 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2617 if (ExitBlock != LoopExit.getBlock()) { 2618 EmitBlock(ExitBlock); 2619 EmitBranchThroughCleanup(LoopExit); 2620 } 2621 EmitBlock(LoopBody); 2622 2623 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2624 // LB for loop condition and emitted it above). 2625 if (DynamicOrOrdered) 2626 EmitIgnoredExpr(LoopArgs.Init); 2627 2628 // Create a block for the increment. 2629 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2630 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2631 2632 emitCommonSimdLoop( 2633 *this, S, 2634 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2635 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2636 // with dynamic/guided scheduling and without ordered clause. 2637 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2638 CGF.LoopStack.setParallel(!IsMonotonic); 2639 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2640 if (C->getKind() == OMPC_ORDER_concurrent) 2641 CGF.LoopStack.setParallel(/*Enable=*/true); 2642 } else { 2643 CGF.EmitOMPSimdInit(S, IsMonotonic); 2644 } 2645 }, 2646 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2647 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2648 SourceLocation Loc = S.getBeginLoc(); 2649 // when 'distribute' is not combined with a 'for': 2650 // while (idx <= UB) { BODY; ++idx; } 2651 // when 'distribute' is combined with a 'for' 2652 // (e.g. 'distribute parallel for') 2653 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2654 CGF.EmitOMPInnerLoop( 2655 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2656 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2657 CodeGenLoop(CGF, S, LoopExit); 2658 }, 2659 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2660 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2661 }); 2662 }); 2663 2664 EmitBlock(Continue.getBlock()); 2665 BreakContinueStack.pop_back(); 2666 if (!DynamicOrOrdered) { 2667 // Emit "LB = LB + Stride", "UB = UB + Stride". 2668 EmitIgnoredExpr(LoopArgs.NextLB); 2669 EmitIgnoredExpr(LoopArgs.NextUB); 2670 } 2671 2672 EmitBranch(CondBlock); 2673 OMPLoopNestStack.clear(); 2674 LoopStack.pop(); 2675 // Emit the fall-through block. 2676 EmitBlock(LoopExit.getBlock()); 2677 2678 // Tell the runtime we are done. 2679 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2680 if (!DynamicOrOrdered) 2681 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2682 S.getDirectiveKind()); 2683 }; 2684 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2685 } 2686 2687 void CodeGenFunction::EmitOMPForOuterLoop( 2688 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2689 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2690 const OMPLoopArguments &LoopArgs, 2691 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2692 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2693 2694 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2695 const bool DynamicOrOrdered = 2696 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2697 2698 assert((Ordered || 2699 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2700 LoopArgs.Chunk != nullptr)) && 2701 "static non-chunked schedule does not need outer loop"); 2702 2703 // Emit outer loop. 2704 // 2705 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2706 // When schedule(dynamic,chunk_size) is specified, the iterations are 2707 // distributed to threads in the team in chunks as the threads request them. 2708 // Each thread executes a chunk of iterations, then requests another chunk, 2709 // until no chunks remain to be distributed. Each chunk contains chunk_size 2710 // iterations, except for the last chunk to be distributed, which may have 2711 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2712 // 2713 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2714 // to threads in the team in chunks as the executing threads request them. 2715 // Each thread executes a chunk of iterations, then requests another chunk, 2716 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2717 // each chunk is proportional to the number of unassigned iterations divided 2718 // by the number of threads in the team, decreasing to 1. For a chunk_size 2719 // with value k (greater than 1), the size of each chunk is determined in the 2720 // same way, with the restriction that the chunks do not contain fewer than k 2721 // iterations (except for the last chunk to be assigned, which may have fewer 2722 // than k iterations). 2723 // 2724 // When schedule(auto) is specified, the decision regarding scheduling is 2725 // delegated to the compiler and/or runtime system. The programmer gives the 2726 // implementation the freedom to choose any possible mapping of iterations to 2727 // threads in the team. 2728 // 2729 // When schedule(runtime) is specified, the decision regarding scheduling is 2730 // deferred until run time, and the schedule and chunk size are taken from the 2731 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2732 // implementation defined 2733 // 2734 // while(__kmpc_dispatch_next(&LB, &UB)) { 2735 // idx = LB; 2736 // while (idx <= UB) { BODY; ++idx; 2737 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2738 // } // inner loop 2739 // } 2740 // 2741 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2742 // When schedule(static, chunk_size) is specified, iterations are divided into 2743 // chunks of size chunk_size, and the chunks are assigned to the threads in 2744 // the team in a round-robin fashion in the order of the thread number. 2745 // 2746 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2747 // while (idx <= UB) { BODY; ++idx; } // inner loop 2748 // LB = LB + ST; 2749 // UB = UB + ST; 2750 // } 2751 // 2752 2753 const Expr *IVExpr = S.getIterationVariable(); 2754 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2755 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2756 2757 if (DynamicOrOrdered) { 2758 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2759 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2760 llvm::Value *LBVal = DispatchBounds.first; 2761 llvm::Value *UBVal = DispatchBounds.second; 2762 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2763 LoopArgs.Chunk}; 2764 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2765 IVSigned, Ordered, DipatchRTInputValues); 2766 } else { 2767 CGOpenMPRuntime::StaticRTInput StaticInit( 2768 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2769 LoopArgs.ST, LoopArgs.Chunk); 2770 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2771 ScheduleKind, StaticInit); 2772 } 2773 2774 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2775 const unsigned IVSize, 2776 const bool IVSigned) { 2777 if (Ordered) { 2778 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2779 IVSigned); 2780 } 2781 }; 2782 2783 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2784 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2785 OuterLoopArgs.IncExpr = S.getInc(); 2786 OuterLoopArgs.Init = S.getInit(); 2787 OuterLoopArgs.Cond = S.getCond(); 2788 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2789 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2790 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2791 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2792 } 2793 2794 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2795 const unsigned IVSize, const bool IVSigned) {} 2796 2797 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2798 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2799 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2800 const CodeGenLoopTy &CodeGenLoopContent) { 2801 2802 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2803 2804 // Emit outer loop. 2805 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2806 // dynamic 2807 // 2808 2809 const Expr *IVExpr = S.getIterationVariable(); 2810 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2811 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2812 2813 CGOpenMPRuntime::StaticRTInput StaticInit( 2814 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2815 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2816 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2817 2818 // for combined 'distribute' and 'for' the increment expression of distribute 2819 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2820 Expr *IncExpr; 2821 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2822 IncExpr = S.getDistInc(); 2823 else 2824 IncExpr = S.getInc(); 2825 2826 // this routine is shared by 'omp distribute parallel for' and 2827 // 'omp distribute': select the right EUB expression depending on the 2828 // directive 2829 OMPLoopArguments OuterLoopArgs; 2830 OuterLoopArgs.LB = LoopArgs.LB; 2831 OuterLoopArgs.UB = LoopArgs.UB; 2832 OuterLoopArgs.ST = LoopArgs.ST; 2833 OuterLoopArgs.IL = LoopArgs.IL; 2834 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2835 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2836 ? S.getCombinedEnsureUpperBound() 2837 : S.getEnsureUpperBound(); 2838 OuterLoopArgs.IncExpr = IncExpr; 2839 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2840 ? S.getCombinedInit() 2841 : S.getInit(); 2842 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2843 ? S.getCombinedCond() 2844 : S.getCond(); 2845 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2846 ? S.getCombinedNextLowerBound() 2847 : S.getNextLowerBound(); 2848 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2849 ? S.getCombinedNextUpperBound() 2850 : S.getNextUpperBound(); 2851 2852 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2853 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2854 emitEmptyOrdered); 2855 } 2856 2857 static std::pair<LValue, LValue> 2858 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2859 const OMPExecutableDirective &S) { 2860 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2861 LValue LB = 2862 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2863 LValue UB = 2864 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2865 2866 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2867 // parallel for') we need to use the 'distribute' 2868 // chunk lower and upper bounds rather than the whole loop iteration 2869 // space. These are parameters to the outlined function for 'parallel' 2870 // and we copy the bounds of the previous schedule into the 2871 // the current ones. 2872 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2873 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2874 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2875 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2876 PrevLBVal = CGF.EmitScalarConversion( 2877 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2878 LS.getIterationVariable()->getType(), 2879 LS.getPrevLowerBoundVariable()->getExprLoc()); 2880 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2881 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2882 PrevUBVal = CGF.EmitScalarConversion( 2883 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2884 LS.getIterationVariable()->getType(), 2885 LS.getPrevUpperBoundVariable()->getExprLoc()); 2886 2887 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2888 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2889 2890 return {LB, UB}; 2891 } 2892 2893 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2894 /// we need to use the LB and UB expressions generated by the worksharing 2895 /// code generation support, whereas in non combined situations we would 2896 /// just emit 0 and the LastIteration expression 2897 /// This function is necessary due to the difference of the LB and UB 2898 /// types for the RT emission routines for 'for_static_init' and 2899 /// 'for_dispatch_init' 2900 static std::pair<llvm::Value *, llvm::Value *> 2901 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2902 const OMPExecutableDirective &S, 2903 Address LB, Address UB) { 2904 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2905 const Expr *IVExpr = LS.getIterationVariable(); 2906 // when implementing a dynamic schedule for a 'for' combined with a 2907 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2908 // is not normalized as each team only executes its own assigned 2909 // distribute chunk 2910 QualType IteratorTy = IVExpr->getType(); 2911 llvm::Value *LBVal = 2912 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2913 llvm::Value *UBVal = 2914 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2915 return {LBVal, UBVal}; 2916 } 2917 2918 static void emitDistributeParallelForDistributeInnerBoundParams( 2919 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2920 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2921 const auto &Dir = cast<OMPLoopDirective>(S); 2922 LValue LB = 2923 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2924 llvm::Value *LBCast = 2925 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2926 CGF.SizeTy, /*isSigned=*/false); 2927 CapturedVars.push_back(LBCast); 2928 LValue UB = 2929 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2930 2931 llvm::Value *UBCast = 2932 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2933 CGF.SizeTy, /*isSigned=*/false); 2934 CapturedVars.push_back(UBCast); 2935 } 2936 2937 static void 2938 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2939 const OMPLoopDirective &S, 2940 CodeGenFunction::JumpDest LoopExit) { 2941 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2942 PrePostActionTy &Action) { 2943 Action.Enter(CGF); 2944 bool HasCancel = false; 2945 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2946 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2947 HasCancel = D->hasCancel(); 2948 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2949 HasCancel = D->hasCancel(); 2950 else if (const auto *D = 2951 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2952 HasCancel = D->hasCancel(); 2953 } 2954 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2955 HasCancel); 2956 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2957 emitDistributeParallelForInnerBounds, 2958 emitDistributeParallelForDispatchBounds); 2959 }; 2960 2961 emitCommonOMPParallelDirective( 2962 CGF, S, 2963 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2964 CGInlinedWorksharingLoop, 2965 emitDistributeParallelForDistributeInnerBoundParams); 2966 } 2967 2968 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2969 const OMPDistributeParallelForDirective &S) { 2970 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2971 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2972 S.getDistInc()); 2973 }; 2974 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2975 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2976 } 2977 2978 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2979 const OMPDistributeParallelForSimdDirective &S) { 2980 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2981 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2982 S.getDistInc()); 2983 }; 2984 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2985 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2986 } 2987 2988 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2989 const OMPDistributeSimdDirective &S) { 2990 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2991 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2992 }; 2993 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2994 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2995 } 2996 2997 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2998 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2999 // Emit SPMD target parallel for region as a standalone region. 3000 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3001 emitOMPSimdRegion(CGF, S, Action); 3002 }; 3003 llvm::Function *Fn; 3004 llvm::Constant *Addr; 3005 // Emit target region as a standalone region. 3006 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3007 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3008 assert(Fn && Addr && "Target device function emission failed."); 3009 } 3010 3011 void CodeGenFunction::EmitOMPTargetSimdDirective( 3012 const OMPTargetSimdDirective &S) { 3013 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3014 emitOMPSimdRegion(CGF, S, Action); 3015 }; 3016 emitCommonOMPTargetDirective(*this, S, CodeGen); 3017 } 3018 3019 namespace { 3020 struct ScheduleKindModifiersTy { 3021 OpenMPScheduleClauseKind Kind; 3022 OpenMPScheduleClauseModifier M1; 3023 OpenMPScheduleClauseModifier M2; 3024 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 3025 OpenMPScheduleClauseModifier M1, 3026 OpenMPScheduleClauseModifier M2) 3027 : Kind(Kind), M1(M1), M2(M2) {} 3028 }; 3029 } // namespace 3030 3031 bool CodeGenFunction::EmitOMPWorksharingLoop( 3032 const OMPLoopDirective &S, Expr *EUB, 3033 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 3034 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 3035 // Emit the loop iteration variable. 3036 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3037 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3038 EmitVarDecl(*IVDecl); 3039 3040 // Emit the iterations count variable. 3041 // If it is not a variable, Sema decided to calculate iterations count on each 3042 // iteration (e.g., it is foldable into a constant). 3043 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3044 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3045 // Emit calculation of the iterations count. 3046 EmitIgnoredExpr(S.getCalcLastIteration()); 3047 } 3048 3049 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 3050 3051 bool HasLastprivateClause; 3052 // Check pre-condition. 3053 { 3054 OMPLoopScope PreInitScope(*this, S); 3055 // Skip the entire loop if we don't meet the precondition. 3056 // If the condition constant folds and can be elided, avoid emitting the 3057 // whole loop. 3058 bool CondConstant; 3059 llvm::BasicBlock *ContBlock = nullptr; 3060 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3061 if (!CondConstant) 3062 return false; 3063 } else { 3064 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 3065 ContBlock = createBasicBlock("omp.precond.end"); 3066 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3067 getProfileCount(&S)); 3068 EmitBlock(ThenBlock); 3069 incrementProfileCounter(&S); 3070 } 3071 3072 RunCleanupsScope DoacrossCleanupScope(*this); 3073 bool Ordered = false; 3074 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 3075 if (OrderedClause->getNumForLoops()) 3076 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 3077 else 3078 Ordered = true; 3079 } 3080 3081 llvm::DenseSet<const Expr *> EmittedFinals; 3082 emitAlignedClause(*this, S); 3083 bool HasLinears = EmitOMPLinearClauseInit(S); 3084 // Emit helper vars inits. 3085 3086 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 3087 LValue LB = Bounds.first; 3088 LValue UB = Bounds.second; 3089 LValue ST = 3090 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3091 LValue IL = 3092 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3093 3094 // Emit 'then' code. 3095 { 3096 OMPPrivateScope LoopScope(*this); 3097 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 3098 // Emit implicit barrier to synchronize threads and avoid data races on 3099 // initialization of firstprivate variables and post-update of 3100 // lastprivate variables. 3101 CGM.getOpenMPRuntime().emitBarrierCall( 3102 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3103 /*ForceSimpleCall=*/true); 3104 } 3105 EmitOMPPrivateClause(S, LoopScope); 3106 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 3107 *this, S, EmitLValue(S.getIterationVariable())); 3108 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3109 EmitOMPReductionClauseInit(S, LoopScope); 3110 EmitOMPPrivateLoopCounters(S, LoopScope); 3111 EmitOMPLinearClause(S, LoopScope); 3112 (void)LoopScope.Privatize(); 3113 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3114 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 3115 3116 // Detect the loop schedule kind and chunk. 3117 const Expr *ChunkExpr = nullptr; 3118 OpenMPScheduleTy ScheduleKind; 3119 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 3120 ScheduleKind.Schedule = C->getScheduleKind(); 3121 ScheduleKind.M1 = C->getFirstScheduleModifier(); 3122 ScheduleKind.M2 = C->getSecondScheduleModifier(); 3123 ChunkExpr = C->getChunkSize(); 3124 } else { 3125 // Default behaviour for schedule clause. 3126 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 3127 *this, S, ScheduleKind.Schedule, ChunkExpr); 3128 } 3129 bool HasChunkSizeOne = false; 3130 llvm::Value *Chunk = nullptr; 3131 if (ChunkExpr) { 3132 Chunk = EmitScalarExpr(ChunkExpr); 3133 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 3134 S.getIterationVariable()->getType(), 3135 S.getBeginLoc()); 3136 Expr::EvalResult Result; 3137 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 3138 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 3139 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 3140 } 3141 } 3142 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3143 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3144 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 3145 // If the static schedule kind is specified or if the ordered clause is 3146 // specified, and if no monotonic modifier is specified, the effect will 3147 // be as if the monotonic modifier was specified. 3148 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 3149 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 3150 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 3151 bool IsMonotonic = 3152 Ordered || 3153 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 3154 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 3155 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 3156 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 3157 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 3158 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 3159 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 3160 /* Chunked */ Chunk != nullptr) || 3161 StaticChunkedOne) && 3162 !Ordered) { 3163 JumpDest LoopExit = 3164 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3165 emitCommonSimdLoop( 3166 *this, S, 3167 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 3168 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3169 CGF.EmitOMPSimdInit(S, IsMonotonic); 3170 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 3171 if (C->getKind() == OMPC_ORDER_concurrent) 3172 CGF.LoopStack.setParallel(/*Enable=*/true); 3173 } 3174 }, 3175 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 3176 &S, ScheduleKind, LoopExit, 3177 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 3178 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 3179 // When no chunk_size is specified, the iteration space is divided 3180 // into chunks that are approximately equal in size, and at most 3181 // one chunk is distributed to each thread. Note that the size of 3182 // the chunks is unspecified in this case. 3183 CGOpenMPRuntime::StaticRTInput StaticInit( 3184 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 3185 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 3186 StaticChunkedOne ? Chunk : nullptr); 3187 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3188 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 3189 StaticInit); 3190 // UB = min(UB, GlobalUB); 3191 if (!StaticChunkedOne) 3192 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 3193 // IV = LB; 3194 CGF.EmitIgnoredExpr(S.getInit()); 3195 // For unchunked static schedule generate: 3196 // 3197 // while (idx <= UB) { 3198 // BODY; 3199 // ++idx; 3200 // } 3201 // 3202 // For static schedule with chunk one: 3203 // 3204 // while (IV <= PrevUB) { 3205 // BODY; 3206 // IV += ST; 3207 // } 3208 CGF.EmitOMPInnerLoop( 3209 S, LoopScope.requiresCleanups(), 3210 StaticChunkedOne ? S.getCombinedParForInDistCond() 3211 : S.getCond(), 3212 StaticChunkedOne ? S.getDistInc() : S.getInc(), 3213 [&S, LoopExit](CodeGenFunction &CGF) { 3214 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 3215 }, 3216 [](CodeGenFunction &) {}); 3217 }); 3218 EmitBlock(LoopExit.getBlock()); 3219 // Tell the runtime we are done. 3220 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3221 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3222 S.getDirectiveKind()); 3223 }; 3224 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 3225 } else { 3226 // Emit the outer loop, which requests its work chunk [LB..UB] from 3227 // runtime and runs the inner loop to process it. 3228 const OMPLoopArguments LoopArguments( 3229 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 3230 IL.getAddress(*this), Chunk, EUB); 3231 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 3232 LoopArguments, CGDispatchBounds); 3233 } 3234 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 3235 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 3236 return CGF.Builder.CreateIsNotNull( 3237 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3238 }); 3239 } 3240 EmitOMPReductionClauseFinal( 3241 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 3242 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 3243 : /*Parallel only*/ OMPD_parallel); 3244 // Emit post-update of the reduction variables if IsLastIter != 0. 3245 emitPostUpdateForReductionClause( 3246 *this, S, [IL, &S](CodeGenFunction &CGF) { 3247 return CGF.Builder.CreateIsNotNull( 3248 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3249 }); 3250 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3251 if (HasLastprivateClause) 3252 EmitOMPLastprivateClauseFinal( 3253 S, isOpenMPSimdDirective(S.getDirectiveKind()), 3254 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 3255 } 3256 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 3257 return CGF.Builder.CreateIsNotNull( 3258 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3259 }); 3260 DoacrossCleanupScope.ForceCleanup(); 3261 // We're now done with the loop, so jump to the continuation block. 3262 if (ContBlock) { 3263 EmitBranch(ContBlock); 3264 EmitBlock(ContBlock, /*IsFinished=*/true); 3265 } 3266 } 3267 return HasLastprivateClause; 3268 } 3269 3270 /// The following two functions generate expressions for the loop lower 3271 /// and upper bounds in case of static and dynamic (dispatch) schedule 3272 /// of the associated 'for' or 'distribute' loop. 3273 static std::pair<LValue, LValue> 3274 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3275 const auto &LS = cast<OMPLoopDirective>(S); 3276 LValue LB = 3277 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3278 LValue UB = 3279 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3280 return {LB, UB}; 3281 } 3282 3283 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3284 /// consider the lower and upper bound expressions generated by the 3285 /// worksharing loop support, but we use 0 and the iteration space size as 3286 /// constants 3287 static std::pair<llvm::Value *, llvm::Value *> 3288 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3289 Address LB, Address UB) { 3290 const auto &LS = cast<OMPLoopDirective>(S); 3291 const Expr *IVExpr = LS.getIterationVariable(); 3292 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3293 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3294 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3295 return {LBVal, UBVal}; 3296 } 3297 3298 /// Emits the code for the directive with inscan reductions. 3299 /// The code is the following: 3300 /// \code 3301 /// size num_iters = <num_iters>; 3302 /// <type> buffer[num_iters]; 3303 /// #pragma omp ... 3304 /// for (i: 0..<num_iters>) { 3305 /// <input phase>; 3306 /// buffer[i] = red; 3307 /// } 3308 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3309 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3310 /// buffer[i] op= buffer[i-pow(2,k)]; 3311 /// #pragma omp ... 3312 /// for (0..<num_iters>) { 3313 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3314 /// <scan phase>; 3315 /// } 3316 /// \endcode 3317 static void emitScanBasedDirective( 3318 CodeGenFunction &CGF, const OMPLoopDirective &S, 3319 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3320 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3321 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3322 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3323 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3324 SmallVector<const Expr *, 4> Shareds; 3325 SmallVector<const Expr *, 4> Privates; 3326 SmallVector<const Expr *, 4> ReductionOps; 3327 SmallVector<const Expr *, 4> LHSs; 3328 SmallVector<const Expr *, 4> RHSs; 3329 SmallVector<const Expr *, 4> CopyOps; 3330 SmallVector<const Expr *, 4> CopyArrayTemps; 3331 SmallVector<const Expr *, 4> CopyArrayElems; 3332 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3333 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3334 "Only inscan reductions are expected."); 3335 Shareds.append(C->varlist_begin(), C->varlist_end()); 3336 Privates.append(C->privates().begin(), C->privates().end()); 3337 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3338 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3339 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3340 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 3341 CopyArrayTemps.append(C->copy_array_temps().begin(), 3342 C->copy_array_temps().end()); 3343 CopyArrayElems.append(C->copy_array_elems().begin(), 3344 C->copy_array_elems().end()); 3345 } 3346 { 3347 // Emit buffers for each reduction variables. 3348 // ReductionCodeGen is required to emit correctly the code for array 3349 // reductions. 3350 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3351 unsigned Count = 0; 3352 auto *ITA = CopyArrayTemps.begin(); 3353 for (const Expr *IRef : Privates) { 3354 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3355 // Emit variably modified arrays, used for arrays/array sections 3356 // reductions. 3357 if (PrivateVD->getType()->isVariablyModifiedType()) { 3358 RedCG.emitSharedOrigLValue(CGF, Count); 3359 RedCG.emitAggregateType(CGF, Count); 3360 } 3361 CodeGenFunction::OpaqueValueMapping DimMapping( 3362 CGF, 3363 cast<OpaqueValueExpr>( 3364 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3365 ->getSizeExpr()), 3366 RValue::get(OMPScanNumIterations)); 3367 // Emit temp buffer. 3368 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3369 ++ITA; 3370 ++Count; 3371 } 3372 } 3373 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3374 { 3375 // Emit loop with input phase: 3376 // #pragma omp ... 3377 // for (i: 0..<num_iters>) { 3378 // <input phase>; 3379 // buffer[i] = red; 3380 // } 3381 CGF.OMPFirstScanLoop = true; 3382 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3383 FirstGen(CGF); 3384 } 3385 // Emit prefix reduction: 3386 // for (int k = 0; k <= ceil(log2(n)); ++k) 3387 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3388 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3389 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3390 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3391 llvm::Value *Arg = 3392 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3393 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3394 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3395 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3396 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3397 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3398 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3399 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3400 CGF.EmitBlock(LoopBB); 3401 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3402 // size pow2k = 1; 3403 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3404 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3405 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3406 // for (size i = n - 1; i >= 2 ^ k; --i) 3407 // tmp[i] op= tmp[i-pow2k]; 3408 llvm::BasicBlock *InnerLoopBB = 3409 CGF.createBasicBlock("omp.inner.log.scan.body"); 3410 llvm::BasicBlock *InnerExitBB = 3411 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3412 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3413 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3414 CGF.EmitBlock(InnerLoopBB); 3415 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3416 IVal->addIncoming(NMin1, LoopBB); 3417 { 3418 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3419 auto *ILHS = LHSs.begin(); 3420 auto *IRHS = RHSs.begin(); 3421 for (const Expr *CopyArrayElem : CopyArrayElems) { 3422 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3423 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3424 Address LHSAddr = Address::invalid(); 3425 { 3426 CodeGenFunction::OpaqueValueMapping IdxMapping( 3427 CGF, 3428 cast<OpaqueValueExpr>( 3429 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3430 RValue::get(IVal)); 3431 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3432 } 3433 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3434 Address RHSAddr = Address::invalid(); 3435 { 3436 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3437 CodeGenFunction::OpaqueValueMapping IdxMapping( 3438 CGF, 3439 cast<OpaqueValueExpr>( 3440 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3441 RValue::get(OffsetIVal)); 3442 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3443 } 3444 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3445 ++ILHS; 3446 ++IRHS; 3447 } 3448 PrivScope.Privatize(); 3449 CGF.CGM.getOpenMPRuntime().emitReduction( 3450 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3451 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3452 } 3453 llvm::Value *NextIVal = 3454 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3455 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3456 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3457 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3458 CGF.EmitBlock(InnerExitBB); 3459 llvm::Value *Next = 3460 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3461 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3462 // pow2k <<= 1; 3463 llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3464 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3465 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3466 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3467 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3468 CGF.EmitBlock(ExitBB); 3469 3470 CGF.OMPFirstScanLoop = false; 3471 SecondGen(CGF); 3472 } 3473 3474 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3475 const OMPLoopDirective &S, 3476 bool HasCancel) { 3477 bool HasLastprivates; 3478 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3479 [](const OMPReductionClause *C) { 3480 return C->getModifier() == OMPC_REDUCTION_inscan; 3481 })) { 3482 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3483 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3484 OMPLoopScope LoopScope(CGF, S); 3485 return CGF.EmitScalarExpr(S.getNumIterations()); 3486 }; 3487 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3488 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3489 CGF, S.getDirectiveKind(), HasCancel); 3490 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3491 emitForLoopBounds, 3492 emitDispatchForLoopBounds); 3493 // Emit an implicit barrier at the end. 3494 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3495 OMPD_for); 3496 }; 3497 const auto &&SecondGen = [&S, HasCancel, 3498 &HasLastprivates](CodeGenFunction &CGF) { 3499 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3500 CGF, S.getDirectiveKind(), HasCancel); 3501 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3502 emitForLoopBounds, 3503 emitDispatchForLoopBounds); 3504 }; 3505 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3506 } else { 3507 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3508 HasCancel); 3509 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3510 emitForLoopBounds, 3511 emitDispatchForLoopBounds); 3512 } 3513 return HasLastprivates; 3514 } 3515 3516 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) { 3517 if (S.hasCancel()) 3518 return false; 3519 for (OMPClause *C : S.clauses()) 3520 if (!isa<OMPNowaitClause>(C)) 3521 return false; 3522 3523 return true; 3524 } 3525 3526 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3527 bool HasLastprivates = false; 3528 bool UseOMPIRBuilder = 3529 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S); 3530 auto &&CodeGen = [this, &S, &HasLastprivates, 3531 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) { 3532 // Use the OpenMPIRBuilder if enabled. 3533 if (UseOMPIRBuilder) { 3534 // Emit the associated statement and get its loop representation. 3535 const Stmt *Inner = S.getRawStmt(); 3536 llvm::CanonicalLoopInfo *CLI = 3537 EmitOMPCollapsedCanonicalLoopNest(Inner, 1); 3538 3539 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>(); 3540 llvm::OpenMPIRBuilder &OMPBuilder = 3541 CGM.getOpenMPRuntime().getOMPBuilder(); 3542 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP( 3543 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator()); 3544 OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier); 3545 return; 3546 } 3547 3548 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3549 }; 3550 { 3551 auto LPCRegion = 3552 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3553 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3554 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3555 S.hasCancel()); 3556 } 3557 3558 if (!UseOMPIRBuilder) { 3559 // Emit an implicit barrier at the end. 3560 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3561 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3562 } 3563 // Check for outer lastprivate conditional update. 3564 checkForLastprivateConditionalUpdate(*this, S); 3565 } 3566 3567 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3568 bool HasLastprivates = false; 3569 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3570 PrePostActionTy &) { 3571 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3572 }; 3573 { 3574 auto LPCRegion = 3575 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3576 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3577 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3578 } 3579 3580 // Emit an implicit barrier at the end. 3581 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3582 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3583 // Check for outer lastprivate conditional update. 3584 checkForLastprivateConditionalUpdate(*this, S); 3585 } 3586 3587 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3588 const Twine &Name, 3589 llvm::Value *Init = nullptr) { 3590 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3591 if (Init) 3592 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3593 return LVal; 3594 } 3595 3596 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3597 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3598 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3599 bool HasLastprivates = false; 3600 auto &&CodeGen = [&S, CapturedStmt, CS, 3601 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3602 const ASTContext &C = CGF.getContext(); 3603 QualType KmpInt32Ty = 3604 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3605 // Emit helper vars inits. 3606 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3607 CGF.Builder.getInt32(0)); 3608 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3609 ? CGF.Builder.getInt32(CS->size() - 1) 3610 : CGF.Builder.getInt32(0); 3611 LValue UB = 3612 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3613 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3614 CGF.Builder.getInt32(1)); 3615 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3616 CGF.Builder.getInt32(0)); 3617 // Loop counter. 3618 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3619 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3620 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3621 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3622 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3623 // Generate condition for loop. 3624 BinaryOperator *Cond = BinaryOperator::Create( 3625 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3626 S.getBeginLoc(), FPOptionsOverride()); 3627 // Increment for loop counter. 3628 UnaryOperator *Inc = UnaryOperator::Create( 3629 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3630 S.getBeginLoc(), true, FPOptionsOverride()); 3631 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3632 // Iterate through all sections and emit a switch construct: 3633 // switch (IV) { 3634 // case 0: 3635 // <SectionStmt[0]>; 3636 // break; 3637 // ... 3638 // case <NumSection> - 1: 3639 // <SectionStmt[<NumSection> - 1]>; 3640 // break; 3641 // } 3642 // .omp.sections.exit: 3643 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3644 llvm::SwitchInst *SwitchStmt = 3645 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3646 ExitBB, CS == nullptr ? 1 : CS->size()); 3647 if (CS) { 3648 unsigned CaseNumber = 0; 3649 for (const Stmt *SubStmt : CS->children()) { 3650 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3651 CGF.EmitBlock(CaseBB); 3652 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3653 CGF.EmitStmt(SubStmt); 3654 CGF.EmitBranch(ExitBB); 3655 ++CaseNumber; 3656 } 3657 } else { 3658 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3659 CGF.EmitBlock(CaseBB); 3660 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3661 CGF.EmitStmt(CapturedStmt); 3662 CGF.EmitBranch(ExitBB); 3663 } 3664 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3665 }; 3666 3667 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3668 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3669 // Emit implicit barrier to synchronize threads and avoid data races on 3670 // initialization of firstprivate variables and post-update of lastprivate 3671 // variables. 3672 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3673 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3674 /*ForceSimpleCall=*/true); 3675 } 3676 CGF.EmitOMPPrivateClause(S, LoopScope); 3677 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3678 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3679 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3680 (void)LoopScope.Privatize(); 3681 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3682 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3683 3684 // Emit static non-chunked loop. 3685 OpenMPScheduleTy ScheduleKind; 3686 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3687 CGOpenMPRuntime::StaticRTInput StaticInit( 3688 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3689 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3690 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3691 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3692 // UB = min(UB, GlobalUB); 3693 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3694 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3695 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3696 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3697 // IV = LB; 3698 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3699 // while (idx <= UB) { BODY; ++idx; } 3700 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3701 [](CodeGenFunction &) {}); 3702 // Tell the runtime we are done. 3703 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3704 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3705 S.getDirectiveKind()); 3706 }; 3707 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3708 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3709 // Emit post-update of the reduction variables if IsLastIter != 0. 3710 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3711 return CGF.Builder.CreateIsNotNull( 3712 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3713 }); 3714 3715 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3716 if (HasLastprivates) 3717 CGF.EmitOMPLastprivateClauseFinal( 3718 S, /*NoFinals=*/false, 3719 CGF.Builder.CreateIsNotNull( 3720 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3721 }; 3722 3723 bool HasCancel = false; 3724 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3725 HasCancel = OSD->hasCancel(); 3726 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3727 HasCancel = OPSD->hasCancel(); 3728 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3729 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3730 HasCancel); 3731 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3732 // clause. Otherwise the barrier will be generated by the codegen for the 3733 // directive. 3734 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3735 // Emit implicit barrier to synchronize threads and avoid data races on 3736 // initialization of firstprivate variables. 3737 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3738 OMPD_unknown); 3739 } 3740 } 3741 3742 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3743 { 3744 auto LPCRegion = 3745 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3746 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3747 EmitSections(S); 3748 } 3749 // Emit an implicit barrier at the end. 3750 if (!S.getSingleClause<OMPNowaitClause>()) { 3751 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3752 OMPD_sections); 3753 } 3754 // Check for outer lastprivate conditional update. 3755 checkForLastprivateConditionalUpdate(*this, S); 3756 } 3757 3758 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3759 LexicalScope Scope(*this, S.getSourceRange()); 3760 EmitStopPoint(&S); 3761 EmitStmt(S.getAssociatedStmt()); 3762 } 3763 3764 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3765 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3766 llvm::SmallVector<const Expr *, 8> DestExprs; 3767 llvm::SmallVector<const Expr *, 8> SrcExprs; 3768 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3769 // Check if there are any 'copyprivate' clauses associated with this 3770 // 'single' construct. 3771 // Build a list of copyprivate variables along with helper expressions 3772 // (<source>, <destination>, <destination>=<source> expressions) 3773 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3774 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3775 DestExprs.append(C->destination_exprs().begin(), 3776 C->destination_exprs().end()); 3777 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3778 AssignmentOps.append(C->assignment_ops().begin(), 3779 C->assignment_ops().end()); 3780 } 3781 // Emit code for 'single' region along with 'copyprivate' clauses 3782 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3783 Action.Enter(CGF); 3784 OMPPrivateScope SingleScope(CGF); 3785 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3786 CGF.EmitOMPPrivateClause(S, SingleScope); 3787 (void)SingleScope.Privatize(); 3788 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3789 }; 3790 { 3791 auto LPCRegion = 3792 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3793 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3794 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3795 CopyprivateVars, DestExprs, 3796 SrcExprs, AssignmentOps); 3797 } 3798 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3799 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3800 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3801 CGM.getOpenMPRuntime().emitBarrierCall( 3802 *this, S.getBeginLoc(), 3803 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3804 } 3805 // Check for outer lastprivate conditional update. 3806 checkForLastprivateConditionalUpdate(*this, S); 3807 } 3808 3809 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3810 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3811 Action.Enter(CGF); 3812 CGF.EmitStmt(S.getRawStmt()); 3813 }; 3814 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3815 } 3816 3817 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3818 if (CGM.getLangOpts().OpenMPIRBuilder) { 3819 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3820 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3821 3822 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt(); 3823 3824 auto FiniCB = [this](InsertPointTy IP) { 3825 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3826 }; 3827 3828 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3829 InsertPointTy CodeGenIP, 3830 llvm::BasicBlock &FiniBB) { 3831 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3832 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3833 CodeGenIP, FiniBB); 3834 }; 3835 3836 LexicalScope Scope(*this, S.getSourceRange()); 3837 EmitStopPoint(&S); 3838 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB)); 3839 3840 return; 3841 } 3842 LexicalScope Scope(*this, S.getSourceRange()); 3843 EmitStopPoint(&S); 3844 emitMaster(*this, S); 3845 } 3846 3847 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3848 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3849 Action.Enter(CGF); 3850 CGF.EmitStmt(S.getRawStmt()); 3851 }; 3852 Expr *Filter = nullptr; 3853 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3854 Filter = FilterClause->getThreadID(); 3855 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(), 3856 Filter); 3857 } 3858 3859 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) { 3860 if (CGM.getLangOpts().OpenMPIRBuilder) { 3861 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3862 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3863 3864 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt(); 3865 const Expr *Filter = nullptr; 3866 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>()) 3867 Filter = FilterClause->getThreadID(); 3868 llvm::Value *FilterVal = Filter 3869 ? EmitScalarExpr(Filter, CGM.Int32Ty) 3870 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0); 3871 3872 auto FiniCB = [this](InsertPointTy IP) { 3873 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3874 }; 3875 3876 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP, 3877 InsertPointTy CodeGenIP, 3878 llvm::BasicBlock &FiniBB) { 3879 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3880 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt, 3881 CodeGenIP, FiniBB); 3882 }; 3883 3884 LexicalScope Scope(*this, S.getSourceRange()); 3885 EmitStopPoint(&S); 3886 Builder.restoreIP( 3887 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal)); 3888 3889 return; 3890 } 3891 LexicalScope Scope(*this, S.getSourceRange()); 3892 EmitStopPoint(&S); 3893 emitMasked(*this, S); 3894 } 3895 3896 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3897 if (CGM.getLangOpts().OpenMPIRBuilder) { 3898 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 3899 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3900 3901 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt(); 3902 const Expr *Hint = nullptr; 3903 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3904 Hint = HintClause->getHint(); 3905 3906 // TODO: This is slightly different from what's currently being done in 3907 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3908 // about typing is final. 3909 llvm::Value *HintInst = nullptr; 3910 if (Hint) 3911 HintInst = 3912 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3913 3914 auto FiniCB = [this](InsertPointTy IP) { 3915 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3916 }; 3917 3918 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3919 InsertPointTy CodeGenIP, 3920 llvm::BasicBlock &FiniBB) { 3921 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3922 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3923 CodeGenIP, FiniBB); 3924 }; 3925 3926 LexicalScope Scope(*this, S.getSourceRange()); 3927 EmitStopPoint(&S); 3928 Builder.restoreIP(OMPBuilder.createCritical( 3929 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3930 HintInst)); 3931 3932 return; 3933 } 3934 3935 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3936 Action.Enter(CGF); 3937 CGF.EmitStmt(S.getAssociatedStmt()); 3938 }; 3939 const Expr *Hint = nullptr; 3940 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3941 Hint = HintClause->getHint(); 3942 LexicalScope Scope(*this, S.getSourceRange()); 3943 EmitStopPoint(&S); 3944 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3945 S.getDirectiveName().getAsString(), 3946 CodeGen, S.getBeginLoc(), Hint); 3947 } 3948 3949 void CodeGenFunction::EmitOMPParallelForDirective( 3950 const OMPParallelForDirective &S) { 3951 // Emit directive as a combined directive that consists of two implicit 3952 // directives: 'parallel' with 'for' directive. 3953 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3954 Action.Enter(CGF); 3955 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 3956 }; 3957 { 3958 auto LPCRegion = 3959 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3960 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3961 emitEmptyBoundParameters); 3962 } 3963 // Check for outer lastprivate conditional update. 3964 checkForLastprivateConditionalUpdate(*this, S); 3965 } 3966 3967 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3968 const OMPParallelForSimdDirective &S) { 3969 // Emit directive as a combined directive that consists of two implicit 3970 // directives: 'parallel' with 'for' directive. 3971 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3972 Action.Enter(CGF); 3973 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3974 }; 3975 { 3976 auto LPCRegion = 3977 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3978 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 3979 emitEmptyBoundParameters); 3980 } 3981 // Check for outer lastprivate conditional update. 3982 checkForLastprivateConditionalUpdate(*this, S); 3983 } 3984 3985 void CodeGenFunction::EmitOMPParallelMasterDirective( 3986 const OMPParallelMasterDirective &S) { 3987 // Emit directive as a combined directive that consists of two implicit 3988 // directives: 'parallel' with 'master' directive. 3989 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3990 Action.Enter(CGF); 3991 OMPPrivateScope PrivateScope(CGF); 3992 bool Copyins = CGF.EmitOMPCopyinClause(S); 3993 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3994 if (Copyins) { 3995 // Emit implicit barrier to synchronize threads and avoid data races on 3996 // propagation master's thread values of threadprivate variables to local 3997 // instances of that variables of all other implicit threads. 3998 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3999 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4000 /*ForceSimpleCall=*/true); 4001 } 4002 CGF.EmitOMPPrivateClause(S, PrivateScope); 4003 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4004 (void)PrivateScope.Privatize(); 4005 emitMaster(CGF, S); 4006 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4007 }; 4008 { 4009 auto LPCRegion = 4010 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4011 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 4012 emitEmptyBoundParameters); 4013 emitPostUpdateForReductionClause(*this, S, 4014 [](CodeGenFunction &) { return nullptr; }); 4015 } 4016 // Check for outer lastprivate conditional update. 4017 checkForLastprivateConditionalUpdate(*this, S); 4018 } 4019 4020 void CodeGenFunction::EmitOMPParallelSectionsDirective( 4021 const OMPParallelSectionsDirective &S) { 4022 // Emit directive as a combined directive that consists of two implicit 4023 // directives: 'parallel' with 'sections' directive. 4024 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4025 Action.Enter(CGF); 4026 CGF.EmitSections(S); 4027 }; 4028 { 4029 auto LPCRegion = 4030 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4031 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 4032 emitEmptyBoundParameters); 4033 } 4034 // Check for outer lastprivate conditional update. 4035 checkForLastprivateConditionalUpdate(*this, S); 4036 } 4037 4038 namespace { 4039 /// Get the list of variables declared in the context of the untied tasks. 4040 class CheckVarsEscapingUntiedTaskDeclContext final 4041 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> { 4042 llvm::SmallVector<const VarDecl *, 4> PrivateDecls; 4043 4044 public: 4045 explicit CheckVarsEscapingUntiedTaskDeclContext() = default; 4046 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default; 4047 void VisitDeclStmt(const DeclStmt *S) { 4048 if (!S) 4049 return; 4050 // Need to privatize only local vars, static locals can be processed as is. 4051 for (const Decl *D : S->decls()) { 4052 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) 4053 if (VD->hasLocalStorage()) 4054 PrivateDecls.push_back(VD); 4055 } 4056 } 4057 void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; } 4058 void VisitCapturedStmt(const CapturedStmt *) { return; } 4059 void VisitLambdaExpr(const LambdaExpr *) { return; } 4060 void VisitBlockExpr(const BlockExpr *) { return; } 4061 void VisitStmt(const Stmt *S) { 4062 if (!S) 4063 return; 4064 for (const Stmt *Child : S->children()) 4065 if (Child) 4066 Visit(Child); 4067 } 4068 4069 /// Swaps list of vars with the provided one. 4070 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; } 4071 }; 4072 } // anonymous namespace 4073 4074 void CodeGenFunction::EmitOMPTaskBasedDirective( 4075 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 4076 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 4077 OMPTaskDataTy &Data) { 4078 // Emit outlined function for task construct. 4079 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 4080 auto I = CS->getCapturedDecl()->param_begin(); 4081 auto PartId = std::next(I); 4082 auto TaskT = std::next(I, 4); 4083 // Check if the task is final 4084 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 4085 // If the condition constant folds and can be elided, try to avoid emitting 4086 // the condition and the dead arm of the if/else. 4087 const Expr *Cond = Clause->getCondition(); 4088 bool CondConstant; 4089 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 4090 Data.Final.setInt(CondConstant); 4091 else 4092 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 4093 } else { 4094 // By default the task is not final. 4095 Data.Final.setInt(/*IntVal=*/false); 4096 } 4097 // Check if the task has 'priority' clause. 4098 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 4099 const Expr *Prio = Clause->getPriority(); 4100 Data.Priority.setInt(/*IntVal=*/true); 4101 Data.Priority.setPointer(EmitScalarConversion( 4102 EmitScalarExpr(Prio), Prio->getType(), 4103 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 4104 Prio->getExprLoc())); 4105 } 4106 // The first function argument for tasks is a thread id, the second one is a 4107 // part id (0 for tied tasks, >=0 for untied task). 4108 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 4109 // Get list of private variables. 4110 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 4111 auto IRef = C->varlist_begin(); 4112 for (const Expr *IInit : C->private_copies()) { 4113 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4114 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4115 Data.PrivateVars.push_back(*IRef); 4116 Data.PrivateCopies.push_back(IInit); 4117 } 4118 ++IRef; 4119 } 4120 } 4121 EmittedAsPrivate.clear(); 4122 // Get list of firstprivate variables. 4123 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4124 auto IRef = C->varlist_begin(); 4125 auto IElemInitRef = C->inits().begin(); 4126 for (const Expr *IInit : C->private_copies()) { 4127 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4128 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4129 Data.FirstprivateVars.push_back(*IRef); 4130 Data.FirstprivateCopies.push_back(IInit); 4131 Data.FirstprivateInits.push_back(*IElemInitRef); 4132 } 4133 ++IRef; 4134 ++IElemInitRef; 4135 } 4136 } 4137 // Get list of lastprivate variables (for taskloops). 4138 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 4139 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 4140 auto IRef = C->varlist_begin(); 4141 auto ID = C->destination_exprs().begin(); 4142 for (const Expr *IInit : C->private_copies()) { 4143 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 4144 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 4145 Data.LastprivateVars.push_back(*IRef); 4146 Data.LastprivateCopies.push_back(IInit); 4147 } 4148 LastprivateDstsOrigs.insert( 4149 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 4150 cast<DeclRefExpr>(*IRef)}); 4151 ++IRef; 4152 ++ID; 4153 } 4154 } 4155 SmallVector<const Expr *, 4> LHSs; 4156 SmallVector<const Expr *, 4> RHSs; 4157 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 4158 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4159 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4160 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4161 Data.ReductionOps.append(C->reduction_ops().begin(), 4162 C->reduction_ops().end()); 4163 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4164 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4165 } 4166 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 4167 *this, S.getBeginLoc(), LHSs, RHSs, Data); 4168 // Build list of dependences. 4169 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4170 OMPTaskDataTy::DependData &DD = 4171 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4172 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4173 } 4174 // Get list of local vars for untied tasks. 4175 if (!Data.Tied) { 4176 CheckVarsEscapingUntiedTaskDeclContext Checker; 4177 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt()); 4178 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(), 4179 Checker.getPrivateDecls().end()); 4180 } 4181 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 4182 CapturedRegion](CodeGenFunction &CGF, 4183 PrePostActionTy &Action) { 4184 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> 4185 UntiedLocalVars; 4186 // Set proper addresses for generated private copies. 4187 OMPPrivateScope Scope(CGF); 4188 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 4189 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 4190 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) { 4191 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4192 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4193 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4194 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4195 CS->getCapturedDecl()->getParam(PrivatesParam))); 4196 // Map privates. 4197 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4198 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4199 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4200 CallArgs.push_back(PrivatesPtr); 4201 ParamTypes.push_back(PrivatesPtr->getType()); 4202 for (const Expr *E : Data.PrivateVars) { 4203 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4204 Address PrivatePtr = CGF.CreateMemTemp( 4205 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 4206 PrivatePtrs.emplace_back(VD, PrivatePtr); 4207 CallArgs.push_back(PrivatePtr.getPointer()); 4208 ParamTypes.push_back(PrivatePtr.getType()); 4209 } 4210 for (const Expr *E : Data.FirstprivateVars) { 4211 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4212 Address PrivatePtr = 4213 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4214 ".firstpriv.ptr.addr"); 4215 PrivatePtrs.emplace_back(VD, PrivatePtr); 4216 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 4217 CallArgs.push_back(PrivatePtr.getPointer()); 4218 ParamTypes.push_back(PrivatePtr.getType()); 4219 } 4220 for (const Expr *E : Data.LastprivateVars) { 4221 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4222 Address PrivatePtr = 4223 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4224 ".lastpriv.ptr.addr"); 4225 PrivatePtrs.emplace_back(VD, PrivatePtr); 4226 CallArgs.push_back(PrivatePtr.getPointer()); 4227 ParamTypes.push_back(PrivatePtr.getType()); 4228 } 4229 for (const VarDecl *VD : Data.PrivateLocals) { 4230 QualType Ty = VD->getType().getNonReferenceType(); 4231 if (VD->getType()->isLValueReferenceType()) 4232 Ty = CGF.getContext().getPointerType(Ty); 4233 if (isAllocatableDecl(VD)) 4234 Ty = CGF.getContext().getPointerType(Ty); 4235 Address PrivatePtr = CGF.CreateMemTemp( 4236 CGF.getContext().getPointerType(Ty), ".local.ptr.addr"); 4237 UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid()); 4238 CallArgs.push_back(PrivatePtr.getPointer()); 4239 ParamTypes.push_back(PrivatePtr.getType()); 4240 } 4241 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4242 ParamTypes, /*isVarArg=*/false); 4243 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4244 CopyFn, CopyFnTy->getPointerTo()); 4245 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4246 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4247 for (const auto &Pair : LastprivateDstsOrigs) { 4248 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 4249 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 4250 /*RefersToEnclosingVariableOrCapture=*/ 4251 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 4252 Pair.second->getType(), VK_LValue, 4253 Pair.second->getExprLoc()); 4254 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 4255 return CGF.EmitLValue(&DRE).getAddress(CGF); 4256 }); 4257 } 4258 for (const auto &Pair : PrivatePtrs) { 4259 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4260 CGF.getContext().getDeclAlign(Pair.first)); 4261 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4262 } 4263 // Adjust mapping for internal locals by mapping actual memory instead of 4264 // a pointer to this memory. 4265 for (auto &Pair : UntiedLocalVars) { 4266 if (isAllocatableDecl(Pair.first)) { 4267 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4268 Address Replacement(Ptr, CGF.getPointerAlign()); 4269 Pair.getSecond().first = Replacement; 4270 Ptr = CGF.Builder.CreateLoad(Replacement); 4271 Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4272 Pair.getSecond().second = Replacement; 4273 } else { 4274 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first); 4275 Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first)); 4276 Pair.getSecond().first = Replacement; 4277 } 4278 } 4279 } 4280 if (Data.Reductions) { 4281 OMPPrivateScope FirstprivateScope(CGF); 4282 for (const auto &Pair : FirstprivatePtrs) { 4283 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4284 CGF.getContext().getDeclAlign(Pair.first)); 4285 FirstprivateScope.addPrivate(Pair.first, 4286 [Replacement]() { return Replacement; }); 4287 } 4288 (void)FirstprivateScope.Privatize(); 4289 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 4290 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 4291 Data.ReductionCopies, Data.ReductionOps); 4292 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 4293 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 4294 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 4295 RedCG.emitSharedOrigLValue(CGF, Cnt); 4296 RedCG.emitAggregateType(CGF, Cnt); 4297 // FIXME: This must removed once the runtime library is fixed. 4298 // Emit required threadprivate variables for 4299 // initializer/combiner/finalizer. 4300 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4301 RedCG, Cnt); 4302 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4303 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4304 Replacement = 4305 Address(CGF.EmitScalarConversion( 4306 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4307 CGF.getContext().getPointerType( 4308 Data.ReductionCopies[Cnt]->getType()), 4309 Data.ReductionCopies[Cnt]->getExprLoc()), 4310 Replacement.getAlignment()); 4311 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4312 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 4313 [Replacement]() { return Replacement; }); 4314 } 4315 } 4316 // Privatize all private variables except for in_reduction items. 4317 (void)Scope.Privatize(); 4318 SmallVector<const Expr *, 4> InRedVars; 4319 SmallVector<const Expr *, 4> InRedPrivs; 4320 SmallVector<const Expr *, 4> InRedOps; 4321 SmallVector<const Expr *, 4> TaskgroupDescriptors; 4322 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 4323 auto IPriv = C->privates().begin(); 4324 auto IRed = C->reduction_ops().begin(); 4325 auto ITD = C->taskgroup_descriptors().begin(); 4326 for (const Expr *Ref : C->varlists()) { 4327 InRedVars.emplace_back(Ref); 4328 InRedPrivs.emplace_back(*IPriv); 4329 InRedOps.emplace_back(*IRed); 4330 TaskgroupDescriptors.emplace_back(*ITD); 4331 std::advance(IPriv, 1); 4332 std::advance(IRed, 1); 4333 std::advance(ITD, 1); 4334 } 4335 } 4336 // Privatize in_reduction items here, because taskgroup descriptors must be 4337 // privatized earlier. 4338 OMPPrivateScope InRedScope(CGF); 4339 if (!InRedVars.empty()) { 4340 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 4341 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 4342 RedCG.emitSharedOrigLValue(CGF, Cnt); 4343 RedCG.emitAggregateType(CGF, Cnt); 4344 // The taskgroup descriptor variable is always implicit firstprivate and 4345 // privatized already during processing of the firstprivates. 4346 // FIXME: This must removed once the runtime library is fixed. 4347 // Emit required threadprivate variables for 4348 // initializer/combiner/finalizer. 4349 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 4350 RedCG, Cnt); 4351 llvm::Value *ReductionsPtr; 4352 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 4353 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 4354 TRExpr->getExprLoc()); 4355 } else { 4356 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4357 } 4358 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 4359 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 4360 Replacement = Address( 4361 CGF.EmitScalarConversion( 4362 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 4363 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 4364 InRedPrivs[Cnt]->getExprLoc()), 4365 Replacement.getAlignment()); 4366 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 4367 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 4368 [Replacement]() { return Replacement; }); 4369 } 4370 } 4371 (void)InRedScope.Privatize(); 4372 4373 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF, 4374 UntiedLocalVars); 4375 Action.Enter(CGF); 4376 BodyGen(CGF); 4377 }; 4378 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4379 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 4380 Data.NumberOfParts); 4381 OMPLexicalScope Scope(*this, S, llvm::None, 4382 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4383 !isOpenMPSimdDirective(S.getDirectiveKind())); 4384 TaskGen(*this, OutlinedFn, Data); 4385 } 4386 4387 static ImplicitParamDecl * 4388 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 4389 QualType Ty, CapturedDecl *CD, 4390 SourceLocation Loc) { 4391 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4392 ImplicitParamDecl::Other); 4393 auto *OrigRef = DeclRefExpr::Create( 4394 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 4395 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4396 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 4397 ImplicitParamDecl::Other); 4398 auto *PrivateRef = DeclRefExpr::Create( 4399 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 4400 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 4401 QualType ElemType = C.getBaseElementType(Ty); 4402 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 4403 ImplicitParamDecl::Other); 4404 auto *InitRef = DeclRefExpr::Create( 4405 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 4406 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 4407 PrivateVD->setInitStyle(VarDecl::CInit); 4408 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 4409 InitRef, /*BasePath=*/nullptr, 4410 VK_RValue, FPOptionsOverride())); 4411 Data.FirstprivateVars.emplace_back(OrigRef); 4412 Data.FirstprivateCopies.emplace_back(PrivateRef); 4413 Data.FirstprivateInits.emplace_back(InitRef); 4414 return OrigVD; 4415 } 4416 4417 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 4418 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 4419 OMPTargetDataInfo &InputInfo) { 4420 // Emit outlined function for task construct. 4421 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4422 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4423 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4424 auto I = CS->getCapturedDecl()->param_begin(); 4425 auto PartId = std::next(I); 4426 auto TaskT = std::next(I, 4); 4427 OMPTaskDataTy Data; 4428 // The task is not final. 4429 Data.Final.setInt(/*IntVal=*/false); 4430 // Get list of firstprivate variables. 4431 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4432 auto IRef = C->varlist_begin(); 4433 auto IElemInitRef = C->inits().begin(); 4434 for (auto *IInit : C->private_copies()) { 4435 Data.FirstprivateVars.push_back(*IRef); 4436 Data.FirstprivateCopies.push_back(IInit); 4437 Data.FirstprivateInits.push_back(*IElemInitRef); 4438 ++IRef; 4439 ++IElemInitRef; 4440 } 4441 } 4442 OMPPrivateScope TargetScope(*this); 4443 VarDecl *BPVD = nullptr; 4444 VarDecl *PVD = nullptr; 4445 VarDecl *SVD = nullptr; 4446 VarDecl *MVD = nullptr; 4447 if (InputInfo.NumberOfTargetItems > 0) { 4448 auto *CD = CapturedDecl::Create( 4449 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4450 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4451 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType( 4452 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4453 /*IndexTypeQuals=*/0); 4454 BPVD = createImplicitFirstprivateForType( 4455 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4456 PVD = createImplicitFirstprivateForType( 4457 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4458 QualType SizesType = getContext().getConstantArrayType( 4459 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4460 ArrSize, nullptr, ArrayType::Normal, 4461 /*IndexTypeQuals=*/0); 4462 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4463 S.getBeginLoc()); 4464 TargetScope.addPrivate( 4465 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4466 TargetScope.addPrivate(PVD, 4467 [&InputInfo]() { return InputInfo.PointersArray; }); 4468 TargetScope.addPrivate(SVD, 4469 [&InputInfo]() { return InputInfo.SizesArray; }); 4470 // If there is no user-defined mapper, the mapper array will be nullptr. In 4471 // this case, we don't need to privatize it. 4472 if (!dyn_cast_or_null<llvm::ConstantPointerNull>( 4473 InputInfo.MappersArray.getPointer())) { 4474 MVD = createImplicitFirstprivateForType( 4475 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc()); 4476 TargetScope.addPrivate(MVD, 4477 [&InputInfo]() { return InputInfo.MappersArray; }); 4478 } 4479 } 4480 (void)TargetScope.Privatize(); 4481 // Build list of dependences. 4482 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4483 OMPTaskDataTy::DependData &DD = 4484 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4485 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4486 } 4487 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, 4488 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4489 // Set proper addresses for generated private copies. 4490 OMPPrivateScope Scope(CGF); 4491 if (!Data.FirstprivateVars.empty()) { 4492 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4493 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4494 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4495 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4496 CS->getCapturedDecl()->getParam(PrivatesParam))); 4497 // Map privates. 4498 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4499 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4500 llvm::SmallVector<llvm::Type *, 4> ParamTypes; 4501 CallArgs.push_back(PrivatesPtr); 4502 ParamTypes.push_back(PrivatesPtr->getType()); 4503 for (const Expr *E : Data.FirstprivateVars) { 4504 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4505 Address PrivatePtr = 4506 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4507 ".firstpriv.ptr.addr"); 4508 PrivatePtrs.emplace_back(VD, PrivatePtr); 4509 CallArgs.push_back(PrivatePtr.getPointer()); 4510 ParamTypes.push_back(PrivatePtr.getType()); 4511 } 4512 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(), 4513 ParamTypes, /*isVarArg=*/false); 4514 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4515 CopyFn, CopyFnTy->getPointerTo()); 4516 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4517 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4518 for (const auto &Pair : PrivatePtrs) { 4519 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4520 CGF.getContext().getDeclAlign(Pair.first)); 4521 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4522 } 4523 } 4524 // Privatize all private variables except for in_reduction items. 4525 (void)Scope.Privatize(); 4526 if (InputInfo.NumberOfTargetItems > 0) { 4527 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4528 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4529 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4530 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4531 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4532 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4533 // If MVD is nullptr, the mapper array is not privatized 4534 if (MVD) 4535 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP( 4536 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0); 4537 } 4538 4539 Action.Enter(CGF); 4540 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4541 BodyGen(CGF); 4542 }; 4543 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4544 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4545 Data.NumberOfParts); 4546 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4547 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4548 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4549 SourceLocation()); 4550 4551 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4552 SharedsTy, CapturedStruct, &IfCond, Data); 4553 } 4554 4555 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4556 // Emit outlined function for task construct. 4557 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4558 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4559 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4560 const Expr *IfCond = nullptr; 4561 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4562 if (C->getNameModifier() == OMPD_unknown || 4563 C->getNameModifier() == OMPD_task) { 4564 IfCond = C->getCondition(); 4565 break; 4566 } 4567 } 4568 4569 OMPTaskDataTy Data; 4570 // Check if we should emit tied or untied task. 4571 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4572 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4573 CGF.EmitStmt(CS->getCapturedStmt()); 4574 }; 4575 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4576 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4577 const OMPTaskDataTy &Data) { 4578 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4579 SharedsTy, CapturedStruct, IfCond, 4580 Data); 4581 }; 4582 auto LPCRegion = 4583 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4584 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4585 } 4586 4587 void CodeGenFunction::EmitOMPTaskyieldDirective( 4588 const OMPTaskyieldDirective &S) { 4589 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4590 } 4591 4592 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4593 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4594 } 4595 4596 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4597 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4598 } 4599 4600 void CodeGenFunction::EmitOMPTaskgroupDirective( 4601 const OMPTaskgroupDirective &S) { 4602 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4603 Action.Enter(CGF); 4604 if (const Expr *E = S.getReductionRef()) { 4605 SmallVector<const Expr *, 4> LHSs; 4606 SmallVector<const Expr *, 4> RHSs; 4607 OMPTaskDataTy Data; 4608 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4609 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4610 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4611 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4612 Data.ReductionOps.append(C->reduction_ops().begin(), 4613 C->reduction_ops().end()); 4614 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4615 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4616 } 4617 llvm::Value *ReductionDesc = 4618 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4619 LHSs, RHSs, Data); 4620 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4621 CGF.EmitVarDecl(*VD); 4622 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4623 /*Volatile=*/false, E->getType()); 4624 } 4625 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4626 }; 4627 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4628 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4629 } 4630 4631 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4632 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4633 ? llvm::AtomicOrdering::NotAtomic 4634 : llvm::AtomicOrdering::AcquireRelease; 4635 CGM.getOpenMPRuntime().emitFlush( 4636 *this, 4637 [&S]() -> ArrayRef<const Expr *> { 4638 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4639 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4640 FlushClause->varlist_end()); 4641 return llvm::None; 4642 }(), 4643 S.getBeginLoc(), AO); 4644 } 4645 4646 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4647 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4648 LValue DOLVal = EmitLValue(DO->getDepobj()); 4649 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4650 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4651 DC->getModifier()); 4652 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4653 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4654 *this, Dependencies, DC->getBeginLoc()); 4655 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4656 return; 4657 } 4658 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4659 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4660 return; 4661 } 4662 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4663 CGM.getOpenMPRuntime().emitUpdateClause( 4664 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4665 return; 4666 } 4667 } 4668 4669 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4670 if (!OMPParentLoopDirectiveForScan) 4671 return; 4672 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4673 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4674 SmallVector<const Expr *, 4> Shareds; 4675 SmallVector<const Expr *, 4> Privates; 4676 SmallVector<const Expr *, 4> LHSs; 4677 SmallVector<const Expr *, 4> RHSs; 4678 SmallVector<const Expr *, 4> ReductionOps; 4679 SmallVector<const Expr *, 4> CopyOps; 4680 SmallVector<const Expr *, 4> CopyArrayTemps; 4681 SmallVector<const Expr *, 4> CopyArrayElems; 4682 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4683 if (C->getModifier() != OMPC_REDUCTION_inscan) 4684 continue; 4685 Shareds.append(C->varlist_begin(), C->varlist_end()); 4686 Privates.append(C->privates().begin(), C->privates().end()); 4687 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4688 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4689 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4690 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4691 CopyArrayTemps.append(C->copy_array_temps().begin(), 4692 C->copy_array_temps().end()); 4693 CopyArrayElems.append(C->copy_array_elems().begin(), 4694 C->copy_array_elems().end()); 4695 } 4696 if (ParentDir.getDirectiveKind() == OMPD_simd || 4697 (getLangOpts().OpenMPSimd && 4698 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4699 // For simd directive and simd-based directives in simd only mode, use the 4700 // following codegen: 4701 // int x = 0; 4702 // #pragma omp simd reduction(inscan, +: x) 4703 // for (..) { 4704 // <first part> 4705 // #pragma omp scan inclusive(x) 4706 // <second part> 4707 // } 4708 // is transformed to: 4709 // int x = 0; 4710 // for (..) { 4711 // int x_priv = 0; 4712 // <first part> 4713 // x = x_priv + x; 4714 // x_priv = x; 4715 // <second part> 4716 // } 4717 // and 4718 // int x = 0; 4719 // #pragma omp simd reduction(inscan, +: x) 4720 // for (..) { 4721 // <first part> 4722 // #pragma omp scan exclusive(x) 4723 // <second part> 4724 // } 4725 // to 4726 // int x = 0; 4727 // for (..) { 4728 // int x_priv = 0; 4729 // <second part> 4730 // int temp = x; 4731 // x = x_priv + x; 4732 // x_priv = temp; 4733 // <first part> 4734 // } 4735 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4736 EmitBranch(IsInclusive 4737 ? OMPScanReduce 4738 : BreakContinueStack.back().ContinueBlock.getBlock()); 4739 EmitBlock(OMPScanDispatch); 4740 { 4741 // New scope for correct construction/destruction of temp variables for 4742 // exclusive scan. 4743 LexicalScope Scope(*this, S.getSourceRange()); 4744 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4745 EmitBlock(OMPScanReduce); 4746 if (!IsInclusive) { 4747 // Create temp var and copy LHS value to this temp value. 4748 // TMP = LHS; 4749 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4750 const Expr *PrivateExpr = Privates[I]; 4751 const Expr *TempExpr = CopyArrayTemps[I]; 4752 EmitAutoVarDecl( 4753 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4754 LValue DestLVal = EmitLValue(TempExpr); 4755 LValue SrcLVal = EmitLValue(LHSs[I]); 4756 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4757 SrcLVal.getAddress(*this), 4758 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4759 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4760 CopyOps[I]); 4761 } 4762 } 4763 CGM.getOpenMPRuntime().emitReduction( 4764 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4765 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4766 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4767 const Expr *PrivateExpr = Privates[I]; 4768 LValue DestLVal; 4769 LValue SrcLVal; 4770 if (IsInclusive) { 4771 DestLVal = EmitLValue(RHSs[I]); 4772 SrcLVal = EmitLValue(LHSs[I]); 4773 } else { 4774 const Expr *TempExpr = CopyArrayTemps[I]; 4775 DestLVal = EmitLValue(RHSs[I]); 4776 SrcLVal = EmitLValue(TempExpr); 4777 } 4778 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4779 SrcLVal.getAddress(*this), 4780 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4781 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4782 CopyOps[I]); 4783 } 4784 } 4785 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4786 OMPScanExitBlock = IsInclusive 4787 ? BreakContinueStack.back().ContinueBlock.getBlock() 4788 : OMPScanReduce; 4789 EmitBlock(OMPAfterScanBlock); 4790 return; 4791 } 4792 if (!IsInclusive) { 4793 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4794 EmitBlock(OMPScanExitBlock); 4795 } 4796 if (OMPFirstScanLoop) { 4797 // Emit buffer[i] = red; at the end of the input phase. 4798 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4799 .getIterationVariable() 4800 ->IgnoreParenImpCasts(); 4801 LValue IdxLVal = EmitLValue(IVExpr); 4802 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4803 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4804 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4805 const Expr *PrivateExpr = Privates[I]; 4806 const Expr *OrigExpr = Shareds[I]; 4807 const Expr *CopyArrayElem = CopyArrayElems[I]; 4808 OpaqueValueMapping IdxMapping( 4809 *this, 4810 cast<OpaqueValueExpr>( 4811 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4812 RValue::get(IdxVal)); 4813 LValue DestLVal = EmitLValue(CopyArrayElem); 4814 LValue SrcLVal = EmitLValue(OrigExpr); 4815 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4816 SrcLVal.getAddress(*this), 4817 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4818 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4819 CopyOps[I]); 4820 } 4821 } 4822 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4823 if (IsInclusive) { 4824 EmitBlock(OMPScanExitBlock); 4825 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4826 } 4827 EmitBlock(OMPScanDispatch); 4828 if (!OMPFirstScanLoop) { 4829 // Emit red = buffer[i]; at the entrance to the scan phase. 4830 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4831 .getIterationVariable() 4832 ->IgnoreParenImpCasts(); 4833 LValue IdxLVal = EmitLValue(IVExpr); 4834 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4835 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4836 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4837 if (!IsInclusive) { 4838 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4839 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4840 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4841 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4842 EmitBlock(ContBB); 4843 // Use idx - 1 iteration for exclusive scan. 4844 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4845 } 4846 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4847 const Expr *PrivateExpr = Privates[I]; 4848 const Expr *OrigExpr = Shareds[I]; 4849 const Expr *CopyArrayElem = CopyArrayElems[I]; 4850 OpaqueValueMapping IdxMapping( 4851 *this, 4852 cast<OpaqueValueExpr>( 4853 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4854 RValue::get(IdxVal)); 4855 LValue SrcLVal = EmitLValue(CopyArrayElem); 4856 LValue DestLVal = EmitLValue(OrigExpr); 4857 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4858 SrcLVal.getAddress(*this), 4859 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4860 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4861 CopyOps[I]); 4862 } 4863 if (!IsInclusive) { 4864 EmitBlock(ExclusiveExitBB); 4865 } 4866 } 4867 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4868 : OMPAfterScanBlock); 4869 EmitBlock(OMPAfterScanBlock); 4870 } 4871 4872 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4873 const CodeGenLoopTy &CodeGenLoop, 4874 Expr *IncExpr) { 4875 // Emit the loop iteration variable. 4876 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4877 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4878 EmitVarDecl(*IVDecl); 4879 4880 // Emit the iterations count variable. 4881 // If it is not a variable, Sema decided to calculate iterations count on each 4882 // iteration (e.g., it is foldable into a constant). 4883 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4884 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4885 // Emit calculation of the iterations count. 4886 EmitIgnoredExpr(S.getCalcLastIteration()); 4887 } 4888 4889 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4890 4891 bool HasLastprivateClause = false; 4892 // Check pre-condition. 4893 { 4894 OMPLoopScope PreInitScope(*this, S); 4895 // Skip the entire loop if we don't meet the precondition. 4896 // If the condition constant folds and can be elided, avoid emitting the 4897 // whole loop. 4898 bool CondConstant; 4899 llvm::BasicBlock *ContBlock = nullptr; 4900 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4901 if (!CondConstant) 4902 return; 4903 } else { 4904 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4905 ContBlock = createBasicBlock("omp.precond.end"); 4906 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4907 getProfileCount(&S)); 4908 EmitBlock(ThenBlock); 4909 incrementProfileCounter(&S); 4910 } 4911 4912 emitAlignedClause(*this, S); 4913 // Emit 'then' code. 4914 { 4915 // Emit helper vars inits. 4916 4917 LValue LB = EmitOMPHelperVar( 4918 *this, cast<DeclRefExpr>( 4919 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4920 ? S.getCombinedLowerBoundVariable() 4921 : S.getLowerBoundVariable()))); 4922 LValue UB = EmitOMPHelperVar( 4923 *this, cast<DeclRefExpr>( 4924 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4925 ? S.getCombinedUpperBoundVariable() 4926 : S.getUpperBoundVariable()))); 4927 LValue ST = 4928 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 4929 LValue IL = 4930 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 4931 4932 OMPPrivateScope LoopScope(*this); 4933 if (EmitOMPFirstprivateClause(S, LoopScope)) { 4934 // Emit implicit barrier to synchronize threads and avoid data races 4935 // on initialization of firstprivate variables and post-update of 4936 // lastprivate variables. 4937 CGM.getOpenMPRuntime().emitBarrierCall( 4938 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4939 /*ForceSimpleCall=*/true); 4940 } 4941 EmitOMPPrivateClause(S, LoopScope); 4942 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4943 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4944 !isOpenMPTeamsDirective(S.getDirectiveKind())) 4945 EmitOMPReductionClauseInit(S, LoopScope); 4946 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 4947 EmitOMPPrivateLoopCounters(S, LoopScope); 4948 (void)LoopScope.Privatize(); 4949 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4950 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 4951 4952 // Detect the distribute schedule kind and chunk. 4953 llvm::Value *Chunk = nullptr; 4954 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 4955 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 4956 ScheduleKind = C->getDistScheduleKind(); 4957 if (const Expr *Ch = C->getChunkSize()) { 4958 Chunk = EmitScalarExpr(Ch); 4959 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 4960 S.getIterationVariable()->getType(), 4961 S.getBeginLoc()); 4962 } 4963 } else { 4964 // Default behaviour for dist_schedule clause. 4965 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 4966 *this, S, ScheduleKind, Chunk); 4967 } 4968 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 4969 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 4970 4971 // OpenMP [2.10.8, distribute Construct, Description] 4972 // If dist_schedule is specified, kind must be static. If specified, 4973 // iterations are divided into chunks of size chunk_size, chunks are 4974 // assigned to the teams of the league in a round-robin fashion in the 4975 // order of the team number. When no chunk_size is specified, the 4976 // iteration space is divided into chunks that are approximately equal 4977 // in size, and at most one chunk is distributed to each team of the 4978 // league. The size of the chunks is unspecified in this case. 4979 bool StaticChunked = RT.isStaticChunked( 4980 ScheduleKind, /* Chunked */ Chunk != nullptr) && 4981 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 4982 if (RT.isStaticNonchunked(ScheduleKind, 4983 /* Chunked */ Chunk != nullptr) || 4984 StaticChunked) { 4985 CGOpenMPRuntime::StaticRTInput StaticInit( 4986 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 4987 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4988 StaticChunked ? Chunk : nullptr); 4989 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 4990 StaticInit); 4991 JumpDest LoopExit = 4992 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 4993 // UB = min(UB, GlobalUB); 4994 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4995 ? S.getCombinedEnsureUpperBound() 4996 : S.getEnsureUpperBound()); 4997 // IV = LB; 4998 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4999 ? S.getCombinedInit() 5000 : S.getInit()); 5001 5002 const Expr *Cond = 5003 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 5004 ? S.getCombinedCond() 5005 : S.getCond(); 5006 5007 if (StaticChunked) 5008 Cond = S.getCombinedDistCond(); 5009 5010 // For static unchunked schedules generate: 5011 // 5012 // 1. For distribute alone, codegen 5013 // while (idx <= UB) { 5014 // BODY; 5015 // ++idx; 5016 // } 5017 // 5018 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 5019 // while (idx <= UB) { 5020 // <CodeGen rest of pragma>(LB, UB); 5021 // idx += ST; 5022 // } 5023 // 5024 // For static chunk one schedule generate: 5025 // 5026 // while (IV <= GlobalUB) { 5027 // <CodeGen rest of pragma>(LB, UB); 5028 // LB += ST; 5029 // UB += ST; 5030 // UB = min(UB, GlobalUB); 5031 // IV = LB; 5032 // } 5033 // 5034 emitCommonSimdLoop( 5035 *this, S, 5036 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5037 if (isOpenMPSimdDirective(S.getDirectiveKind())) 5038 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 5039 }, 5040 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 5041 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 5042 CGF.EmitOMPInnerLoop( 5043 S, LoopScope.requiresCleanups(), Cond, IncExpr, 5044 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 5045 CodeGenLoop(CGF, S, LoopExit); 5046 }, 5047 [&S, StaticChunked](CodeGenFunction &CGF) { 5048 if (StaticChunked) { 5049 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 5050 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 5051 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 5052 CGF.EmitIgnoredExpr(S.getCombinedInit()); 5053 } 5054 }); 5055 }); 5056 EmitBlock(LoopExit.getBlock()); 5057 // Tell the runtime we are done. 5058 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 5059 } else { 5060 // Emit the outer loop, which requests its work chunk [LB..UB] from 5061 // runtime and runs the inner loop to process it. 5062 const OMPLoopArguments LoopArguments = { 5063 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 5064 IL.getAddress(*this), Chunk}; 5065 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 5066 CodeGenLoop); 5067 } 5068 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 5069 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 5070 return CGF.Builder.CreateIsNotNull( 5071 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5072 }); 5073 } 5074 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 5075 !isOpenMPParallelDirective(S.getDirectiveKind()) && 5076 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 5077 EmitOMPReductionClauseFinal(S, OMPD_simd); 5078 // Emit post-update of the reduction variables if IsLastIter != 0. 5079 emitPostUpdateForReductionClause( 5080 *this, S, [IL, &S](CodeGenFunction &CGF) { 5081 return CGF.Builder.CreateIsNotNull( 5082 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 5083 }); 5084 } 5085 // Emit final copy of the lastprivate variables if IsLastIter != 0. 5086 if (HasLastprivateClause) { 5087 EmitOMPLastprivateClauseFinal( 5088 S, /*NoFinals=*/false, 5089 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 5090 } 5091 } 5092 5093 // We're now done with the loop, so jump to the continuation block. 5094 if (ContBlock) { 5095 EmitBranch(ContBlock); 5096 EmitBlock(ContBlock, true); 5097 } 5098 } 5099 } 5100 5101 void CodeGenFunction::EmitOMPDistributeDirective( 5102 const OMPDistributeDirective &S) { 5103 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5104 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5105 }; 5106 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5107 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 5108 } 5109 5110 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 5111 const CapturedStmt *S, 5112 SourceLocation Loc) { 5113 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 5114 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 5115 CGF.CapturedStmtInfo = &CapStmtInfo; 5116 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 5117 Fn->setDoesNotRecurse(); 5118 return Fn; 5119 } 5120 5121 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 5122 if (S.hasClausesOfKind<OMPDependClause>()) { 5123 assert(!S.hasAssociatedStmt() && 5124 "No associated statement must be in ordered depend construct."); 5125 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 5126 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 5127 return; 5128 } 5129 const auto *C = S.getSingleClause<OMPSIMDClause>(); 5130 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 5131 PrePostActionTy &Action) { 5132 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 5133 if (C) { 5134 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5135 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5136 llvm::Function *OutlinedFn = 5137 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 5138 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 5139 OutlinedFn, CapturedVars); 5140 } else { 5141 Action.Enter(CGF); 5142 CGF.EmitStmt(CS->getCapturedStmt()); 5143 } 5144 }; 5145 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5146 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 5147 } 5148 5149 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 5150 QualType SrcType, QualType DestType, 5151 SourceLocation Loc) { 5152 assert(CGF.hasScalarEvaluationKind(DestType) && 5153 "DestType must have scalar evaluation kind."); 5154 assert(!Val.isAggregate() && "Must be a scalar or complex."); 5155 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 5156 DestType, Loc) 5157 : CGF.EmitComplexToScalarConversion( 5158 Val.getComplexVal(), SrcType, DestType, Loc); 5159 } 5160 5161 static CodeGenFunction::ComplexPairTy 5162 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 5163 QualType DestType, SourceLocation Loc) { 5164 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 5165 "DestType must have complex evaluation kind."); 5166 CodeGenFunction::ComplexPairTy ComplexVal; 5167 if (Val.isScalar()) { 5168 // Convert the input element to the element type of the complex. 5169 QualType DestElementType = 5170 DestType->castAs<ComplexType>()->getElementType(); 5171 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 5172 Val.getScalarVal(), SrcType, DestElementType, Loc); 5173 ComplexVal = CodeGenFunction::ComplexPairTy( 5174 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 5175 } else { 5176 assert(Val.isComplex() && "Must be a scalar or complex."); 5177 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 5178 QualType DestElementType = 5179 DestType->castAs<ComplexType>()->getElementType(); 5180 ComplexVal.first = CGF.EmitScalarConversion( 5181 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 5182 ComplexVal.second = CGF.EmitScalarConversion( 5183 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 5184 } 5185 return ComplexVal; 5186 } 5187 5188 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5189 LValue LVal, RValue RVal) { 5190 if (LVal.isGlobalReg()) 5191 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 5192 else 5193 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 5194 } 5195 5196 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 5197 llvm::AtomicOrdering AO, LValue LVal, 5198 SourceLocation Loc) { 5199 if (LVal.isGlobalReg()) 5200 return CGF.EmitLoadOfLValue(LVal, Loc); 5201 return CGF.EmitAtomicLoad( 5202 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 5203 LVal.isVolatile()); 5204 } 5205 5206 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 5207 QualType RValTy, SourceLocation Loc) { 5208 switch (getEvaluationKind(LVal.getType())) { 5209 case TEK_Scalar: 5210 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 5211 *this, RVal, RValTy, LVal.getType(), Loc)), 5212 LVal); 5213 break; 5214 case TEK_Complex: 5215 EmitStoreOfComplex( 5216 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 5217 /*isInit=*/false); 5218 break; 5219 case TEK_Aggregate: 5220 llvm_unreachable("Must be a scalar or complex."); 5221 } 5222 } 5223 5224 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 5225 const Expr *X, const Expr *V, 5226 SourceLocation Loc) { 5227 // v = x; 5228 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 5229 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 5230 LValue XLValue = CGF.EmitLValue(X); 5231 LValue VLValue = CGF.EmitLValue(V); 5232 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 5233 // OpenMP, 2.17.7, atomic Construct 5234 // If the read or capture clause is specified and the acquire, acq_rel, or 5235 // seq_cst clause is specified then the strong flush on exit from the atomic 5236 // operation is also an acquire flush. 5237 switch (AO) { 5238 case llvm::AtomicOrdering::Acquire: 5239 case llvm::AtomicOrdering::AcquireRelease: 5240 case llvm::AtomicOrdering::SequentiallyConsistent: 5241 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5242 llvm::AtomicOrdering::Acquire); 5243 break; 5244 case llvm::AtomicOrdering::Monotonic: 5245 case llvm::AtomicOrdering::Release: 5246 break; 5247 case llvm::AtomicOrdering::NotAtomic: 5248 case llvm::AtomicOrdering::Unordered: 5249 llvm_unreachable("Unexpected ordering."); 5250 } 5251 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 5252 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5253 } 5254 5255 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 5256 llvm::AtomicOrdering AO, const Expr *X, 5257 const Expr *E, SourceLocation Loc) { 5258 // x = expr; 5259 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 5260 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 5261 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5262 // OpenMP, 2.17.7, atomic Construct 5263 // If the write, update, or capture clause is specified and the release, 5264 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5265 // the atomic operation is also a release flush. 5266 switch (AO) { 5267 case llvm::AtomicOrdering::Release: 5268 case llvm::AtomicOrdering::AcquireRelease: 5269 case llvm::AtomicOrdering::SequentiallyConsistent: 5270 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5271 llvm::AtomicOrdering::Release); 5272 break; 5273 case llvm::AtomicOrdering::Acquire: 5274 case llvm::AtomicOrdering::Monotonic: 5275 break; 5276 case llvm::AtomicOrdering::NotAtomic: 5277 case llvm::AtomicOrdering::Unordered: 5278 llvm_unreachable("Unexpected ordering."); 5279 } 5280 } 5281 5282 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 5283 RValue Update, 5284 BinaryOperatorKind BO, 5285 llvm::AtomicOrdering AO, 5286 bool IsXLHSInRHSPart) { 5287 ASTContext &Context = CGF.getContext(); 5288 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 5289 // expression is simple and atomic is allowed for the given type for the 5290 // target platform. 5291 if (BO == BO_Comma || !Update.isScalar() || 5292 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 5293 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 5294 (Update.getScalarVal()->getType() != 5295 X.getAddress(CGF).getElementType())) || 5296 !X.getAddress(CGF).getElementType()->isIntegerTy() || 5297 !Context.getTargetInfo().hasBuiltinAtomic( 5298 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 5299 return std::make_pair(false, RValue::get(nullptr)); 5300 5301 llvm::AtomicRMWInst::BinOp RMWOp; 5302 switch (BO) { 5303 case BO_Add: 5304 RMWOp = llvm::AtomicRMWInst::Add; 5305 break; 5306 case BO_Sub: 5307 if (!IsXLHSInRHSPart) 5308 return std::make_pair(false, RValue::get(nullptr)); 5309 RMWOp = llvm::AtomicRMWInst::Sub; 5310 break; 5311 case BO_And: 5312 RMWOp = llvm::AtomicRMWInst::And; 5313 break; 5314 case BO_Or: 5315 RMWOp = llvm::AtomicRMWInst::Or; 5316 break; 5317 case BO_Xor: 5318 RMWOp = llvm::AtomicRMWInst::Xor; 5319 break; 5320 case BO_LT: 5321 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5322 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 5323 : llvm::AtomicRMWInst::Max) 5324 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 5325 : llvm::AtomicRMWInst::UMax); 5326 break; 5327 case BO_GT: 5328 RMWOp = X.getType()->hasSignedIntegerRepresentation() 5329 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 5330 : llvm::AtomicRMWInst::Min) 5331 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 5332 : llvm::AtomicRMWInst::UMin); 5333 break; 5334 case BO_Assign: 5335 RMWOp = llvm::AtomicRMWInst::Xchg; 5336 break; 5337 case BO_Mul: 5338 case BO_Div: 5339 case BO_Rem: 5340 case BO_Shl: 5341 case BO_Shr: 5342 case BO_LAnd: 5343 case BO_LOr: 5344 return std::make_pair(false, RValue::get(nullptr)); 5345 case BO_PtrMemD: 5346 case BO_PtrMemI: 5347 case BO_LE: 5348 case BO_GE: 5349 case BO_EQ: 5350 case BO_NE: 5351 case BO_Cmp: 5352 case BO_AddAssign: 5353 case BO_SubAssign: 5354 case BO_AndAssign: 5355 case BO_OrAssign: 5356 case BO_XorAssign: 5357 case BO_MulAssign: 5358 case BO_DivAssign: 5359 case BO_RemAssign: 5360 case BO_ShlAssign: 5361 case BO_ShrAssign: 5362 case BO_Comma: 5363 llvm_unreachable("Unsupported atomic update operation"); 5364 } 5365 llvm::Value *UpdateVal = Update.getScalarVal(); 5366 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 5367 UpdateVal = CGF.Builder.CreateIntCast( 5368 IC, X.getAddress(CGF).getElementType(), 5369 X.getType()->hasSignedIntegerRepresentation()); 5370 } 5371 llvm::Value *Res = 5372 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 5373 return std::make_pair(true, RValue::get(Res)); 5374 } 5375 5376 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 5377 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 5378 llvm::AtomicOrdering AO, SourceLocation Loc, 5379 const llvm::function_ref<RValue(RValue)> CommonGen) { 5380 // Update expressions are allowed to have the following forms: 5381 // x binop= expr; -> xrval + expr; 5382 // x++, ++x -> xrval + 1; 5383 // x--, --x -> xrval - 1; 5384 // x = x binop expr; -> xrval binop expr 5385 // x = expr Op x; - > expr binop xrval; 5386 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 5387 if (!Res.first) { 5388 if (X.isGlobalReg()) { 5389 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 5390 // 'xrval'. 5391 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 5392 } else { 5393 // Perform compare-and-swap procedure. 5394 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 5395 } 5396 } 5397 return Res; 5398 } 5399 5400 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 5401 llvm::AtomicOrdering AO, const Expr *X, 5402 const Expr *E, const Expr *UE, 5403 bool IsXLHSInRHSPart, SourceLocation Loc) { 5404 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5405 "Update expr in 'atomic update' must be a binary operator."); 5406 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5407 // Update expressions are allowed to have the following forms: 5408 // x binop= expr; -> xrval + expr; 5409 // x++, ++x -> xrval + 1; 5410 // x--, --x -> xrval - 1; 5411 // x = x binop expr; -> xrval binop expr 5412 // x = expr Op x; - > expr binop xrval; 5413 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 5414 LValue XLValue = CGF.EmitLValue(X); 5415 RValue ExprRValue = CGF.EmitAnyExpr(E); 5416 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5417 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5418 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5419 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5420 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 5421 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5422 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5423 return CGF.EmitAnyExpr(UE); 5424 }; 5425 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 5426 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5427 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5428 // OpenMP, 2.17.7, atomic Construct 5429 // If the write, update, or capture clause is specified and the release, 5430 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5431 // the atomic operation is also a release flush. 5432 switch (AO) { 5433 case llvm::AtomicOrdering::Release: 5434 case llvm::AtomicOrdering::AcquireRelease: 5435 case llvm::AtomicOrdering::SequentiallyConsistent: 5436 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5437 llvm::AtomicOrdering::Release); 5438 break; 5439 case llvm::AtomicOrdering::Acquire: 5440 case llvm::AtomicOrdering::Monotonic: 5441 break; 5442 case llvm::AtomicOrdering::NotAtomic: 5443 case llvm::AtomicOrdering::Unordered: 5444 llvm_unreachable("Unexpected ordering."); 5445 } 5446 } 5447 5448 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5449 QualType SourceType, QualType ResType, 5450 SourceLocation Loc) { 5451 switch (CGF.getEvaluationKind(ResType)) { 5452 case TEK_Scalar: 5453 return RValue::get( 5454 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5455 case TEK_Complex: { 5456 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5457 return RValue::getComplex(Res.first, Res.second); 5458 } 5459 case TEK_Aggregate: 5460 break; 5461 } 5462 llvm_unreachable("Must be a scalar or complex."); 5463 } 5464 5465 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5466 llvm::AtomicOrdering AO, 5467 bool IsPostfixUpdate, const Expr *V, 5468 const Expr *X, const Expr *E, 5469 const Expr *UE, bool IsXLHSInRHSPart, 5470 SourceLocation Loc) { 5471 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5472 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5473 RValue NewVVal; 5474 LValue VLValue = CGF.EmitLValue(V); 5475 LValue XLValue = CGF.EmitLValue(X); 5476 RValue ExprRValue = CGF.EmitAnyExpr(E); 5477 QualType NewVValType; 5478 if (UE) { 5479 // 'x' is updated with some additional value. 5480 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5481 "Update expr in 'atomic capture' must be a binary operator."); 5482 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5483 // Update expressions are allowed to have the following forms: 5484 // x binop= expr; -> xrval + expr; 5485 // x++, ++x -> xrval + 1; 5486 // x--, --x -> xrval - 1; 5487 // x = x binop expr; -> xrval binop expr 5488 // x = expr Op x; - > expr binop xrval; 5489 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5490 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5491 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5492 NewVValType = XRValExpr->getType(); 5493 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5494 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5495 IsPostfixUpdate](RValue XRValue) { 5496 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5497 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5498 RValue Res = CGF.EmitAnyExpr(UE); 5499 NewVVal = IsPostfixUpdate ? XRValue : Res; 5500 return Res; 5501 }; 5502 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5503 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5504 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5505 if (Res.first) { 5506 // 'atomicrmw' instruction was generated. 5507 if (IsPostfixUpdate) { 5508 // Use old value from 'atomicrmw'. 5509 NewVVal = Res.second; 5510 } else { 5511 // 'atomicrmw' does not provide new value, so evaluate it using old 5512 // value of 'x'. 5513 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5514 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5515 NewVVal = CGF.EmitAnyExpr(UE); 5516 } 5517 } 5518 } else { 5519 // 'x' is simply rewritten with some 'expr'. 5520 NewVValType = X->getType().getNonReferenceType(); 5521 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5522 X->getType().getNonReferenceType(), Loc); 5523 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5524 NewVVal = XRValue; 5525 return ExprRValue; 5526 }; 5527 // Try to perform atomicrmw xchg, otherwise simple exchange. 5528 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5529 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5530 Loc, Gen); 5531 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5532 if (Res.first) { 5533 // 'atomicrmw' instruction was generated. 5534 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5535 } 5536 } 5537 // Emit post-update store to 'v' of old/new 'x' value. 5538 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5539 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5540 // OpenMP, 2.17.7, atomic Construct 5541 // If the write, update, or capture clause is specified and the release, 5542 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5543 // the atomic operation is also a release flush. 5544 // If the read or capture clause is specified and the acquire, acq_rel, or 5545 // seq_cst clause is specified then the strong flush on exit from the atomic 5546 // operation is also an acquire flush. 5547 switch (AO) { 5548 case llvm::AtomicOrdering::Release: 5549 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5550 llvm::AtomicOrdering::Release); 5551 break; 5552 case llvm::AtomicOrdering::Acquire: 5553 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5554 llvm::AtomicOrdering::Acquire); 5555 break; 5556 case llvm::AtomicOrdering::AcquireRelease: 5557 case llvm::AtomicOrdering::SequentiallyConsistent: 5558 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5559 llvm::AtomicOrdering::AcquireRelease); 5560 break; 5561 case llvm::AtomicOrdering::Monotonic: 5562 break; 5563 case llvm::AtomicOrdering::NotAtomic: 5564 case llvm::AtomicOrdering::Unordered: 5565 llvm_unreachable("Unexpected ordering."); 5566 } 5567 } 5568 5569 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5570 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5571 const Expr *X, const Expr *V, const Expr *E, 5572 const Expr *UE, bool IsXLHSInRHSPart, 5573 SourceLocation Loc) { 5574 switch (Kind) { 5575 case OMPC_read: 5576 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5577 break; 5578 case OMPC_write: 5579 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5580 break; 5581 case OMPC_unknown: 5582 case OMPC_update: 5583 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5584 break; 5585 case OMPC_capture: 5586 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5587 IsXLHSInRHSPart, Loc); 5588 break; 5589 case OMPC_if: 5590 case OMPC_final: 5591 case OMPC_num_threads: 5592 case OMPC_private: 5593 case OMPC_firstprivate: 5594 case OMPC_lastprivate: 5595 case OMPC_reduction: 5596 case OMPC_task_reduction: 5597 case OMPC_in_reduction: 5598 case OMPC_safelen: 5599 case OMPC_simdlen: 5600 case OMPC_sizes: 5601 case OMPC_allocator: 5602 case OMPC_allocate: 5603 case OMPC_collapse: 5604 case OMPC_default: 5605 case OMPC_seq_cst: 5606 case OMPC_acq_rel: 5607 case OMPC_acquire: 5608 case OMPC_release: 5609 case OMPC_relaxed: 5610 case OMPC_shared: 5611 case OMPC_linear: 5612 case OMPC_aligned: 5613 case OMPC_copyin: 5614 case OMPC_copyprivate: 5615 case OMPC_flush: 5616 case OMPC_depobj: 5617 case OMPC_proc_bind: 5618 case OMPC_schedule: 5619 case OMPC_ordered: 5620 case OMPC_nowait: 5621 case OMPC_untied: 5622 case OMPC_threadprivate: 5623 case OMPC_depend: 5624 case OMPC_mergeable: 5625 case OMPC_device: 5626 case OMPC_threads: 5627 case OMPC_simd: 5628 case OMPC_map: 5629 case OMPC_num_teams: 5630 case OMPC_thread_limit: 5631 case OMPC_priority: 5632 case OMPC_grainsize: 5633 case OMPC_nogroup: 5634 case OMPC_num_tasks: 5635 case OMPC_hint: 5636 case OMPC_dist_schedule: 5637 case OMPC_defaultmap: 5638 case OMPC_uniform: 5639 case OMPC_to: 5640 case OMPC_from: 5641 case OMPC_use_device_ptr: 5642 case OMPC_use_device_addr: 5643 case OMPC_is_device_ptr: 5644 case OMPC_unified_address: 5645 case OMPC_unified_shared_memory: 5646 case OMPC_reverse_offload: 5647 case OMPC_dynamic_allocators: 5648 case OMPC_atomic_default_mem_order: 5649 case OMPC_device_type: 5650 case OMPC_match: 5651 case OMPC_nontemporal: 5652 case OMPC_order: 5653 case OMPC_destroy: 5654 case OMPC_detach: 5655 case OMPC_inclusive: 5656 case OMPC_exclusive: 5657 case OMPC_uses_allocators: 5658 case OMPC_affinity: 5659 case OMPC_init: 5660 case OMPC_inbranch: 5661 case OMPC_notinbranch: 5662 case OMPC_link: 5663 case OMPC_use: 5664 case OMPC_novariants: 5665 case OMPC_nocontext: 5666 case OMPC_filter: 5667 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5668 } 5669 } 5670 5671 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5672 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5673 bool MemOrderingSpecified = false; 5674 if (S.getSingleClause<OMPSeqCstClause>()) { 5675 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5676 MemOrderingSpecified = true; 5677 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5678 AO = llvm::AtomicOrdering::AcquireRelease; 5679 MemOrderingSpecified = true; 5680 } else if (S.getSingleClause<OMPAcquireClause>()) { 5681 AO = llvm::AtomicOrdering::Acquire; 5682 MemOrderingSpecified = true; 5683 } else if (S.getSingleClause<OMPReleaseClause>()) { 5684 AO = llvm::AtomicOrdering::Release; 5685 MemOrderingSpecified = true; 5686 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5687 AO = llvm::AtomicOrdering::Monotonic; 5688 MemOrderingSpecified = true; 5689 } 5690 OpenMPClauseKind Kind = OMPC_unknown; 5691 for (const OMPClause *C : S.clauses()) { 5692 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5693 // if it is first). 5694 if (C->getClauseKind() != OMPC_seq_cst && 5695 C->getClauseKind() != OMPC_acq_rel && 5696 C->getClauseKind() != OMPC_acquire && 5697 C->getClauseKind() != OMPC_release && 5698 C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) { 5699 Kind = C->getClauseKind(); 5700 break; 5701 } 5702 } 5703 if (!MemOrderingSpecified) { 5704 llvm::AtomicOrdering DefaultOrder = 5705 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5706 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5707 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5708 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5709 Kind == OMPC_capture)) { 5710 AO = DefaultOrder; 5711 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5712 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5713 AO = llvm::AtomicOrdering::Release; 5714 } else if (Kind == OMPC_read) { 5715 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5716 AO = llvm::AtomicOrdering::Acquire; 5717 } 5718 } 5719 } 5720 5721 LexicalScope Scope(*this, S.getSourceRange()); 5722 EmitStopPoint(S.getAssociatedStmt()); 5723 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5724 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5725 S.getBeginLoc()); 5726 } 5727 5728 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5729 const OMPExecutableDirective &S, 5730 const RegionCodeGenTy &CodeGen) { 5731 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5732 CodeGenModule &CGM = CGF.CGM; 5733 5734 // On device emit this construct as inlined code. 5735 if (CGM.getLangOpts().OpenMPIsDevice) { 5736 OMPLexicalScope Scope(CGF, S, OMPD_target); 5737 CGM.getOpenMPRuntime().emitInlinedDirective( 5738 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5739 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5740 }); 5741 return; 5742 } 5743 5744 auto LPCRegion = 5745 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5746 llvm::Function *Fn = nullptr; 5747 llvm::Constant *FnID = nullptr; 5748 5749 const Expr *IfCond = nullptr; 5750 // Check for the at most one if clause associated with the target region. 5751 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5752 if (C->getNameModifier() == OMPD_unknown || 5753 C->getNameModifier() == OMPD_target) { 5754 IfCond = C->getCondition(); 5755 break; 5756 } 5757 } 5758 5759 // Check if we have any device clause associated with the directive. 5760 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5761 nullptr, OMPC_DEVICE_unknown); 5762 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5763 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5764 5765 // Check if we have an if clause whose conditional always evaluates to false 5766 // or if we do not have any targets specified. If so the target region is not 5767 // an offload entry point. 5768 bool IsOffloadEntry = true; 5769 if (IfCond) { 5770 bool Val; 5771 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5772 IsOffloadEntry = false; 5773 } 5774 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5775 IsOffloadEntry = false; 5776 5777 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5778 StringRef ParentName; 5779 // In case we have Ctors/Dtors we use the complete type variant to produce 5780 // the mangling of the device outlined kernel. 5781 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5782 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5783 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5784 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5785 else 5786 ParentName = 5787 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5788 5789 // Emit target region as a standalone region. 5790 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5791 IsOffloadEntry, CodeGen); 5792 OMPLexicalScope Scope(CGF, S, OMPD_task); 5793 auto &&SizeEmitter = 5794 [IsOffloadEntry](CodeGenFunction &CGF, 5795 const OMPLoopDirective &D) -> llvm::Value * { 5796 if (IsOffloadEntry) { 5797 OMPLoopScope(CGF, D); 5798 // Emit calculation of the iterations count. 5799 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5800 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5801 /*isSigned=*/false); 5802 return NumIterations; 5803 } 5804 return nullptr; 5805 }; 5806 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5807 SizeEmitter); 5808 } 5809 5810 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5811 PrePostActionTy &Action) { 5812 Action.Enter(CGF); 5813 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5814 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5815 CGF.EmitOMPPrivateClause(S, PrivateScope); 5816 (void)PrivateScope.Privatize(); 5817 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5818 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5819 5820 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5821 CGF.EnsureInsertPoint(); 5822 } 5823 5824 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5825 StringRef ParentName, 5826 const OMPTargetDirective &S) { 5827 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5828 emitTargetRegion(CGF, S, Action); 5829 }; 5830 llvm::Function *Fn; 5831 llvm::Constant *Addr; 5832 // Emit target region as a standalone region. 5833 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5834 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5835 assert(Fn && Addr && "Target device function emission failed."); 5836 } 5837 5838 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5839 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5840 emitTargetRegion(CGF, S, Action); 5841 }; 5842 emitCommonOMPTargetDirective(*this, S, CodeGen); 5843 } 5844 5845 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5846 const OMPExecutableDirective &S, 5847 OpenMPDirectiveKind InnermostKind, 5848 const RegionCodeGenTy &CodeGen) { 5849 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5850 llvm::Function *OutlinedFn = 5851 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5852 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5853 5854 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5855 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5856 if (NT || TL) { 5857 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5858 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5859 5860 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5861 S.getBeginLoc()); 5862 } 5863 5864 OMPTeamsScope Scope(CGF, S); 5865 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5866 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5867 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5868 CapturedVars); 5869 } 5870 5871 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5872 // Emit teams region as a standalone region. 5873 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5874 Action.Enter(CGF); 5875 OMPPrivateScope PrivateScope(CGF); 5876 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5877 CGF.EmitOMPPrivateClause(S, PrivateScope); 5878 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5879 (void)PrivateScope.Privatize(); 5880 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5881 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5882 }; 5883 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5884 emitPostUpdateForReductionClause(*this, S, 5885 [](CodeGenFunction &) { return nullptr; }); 5886 } 5887 5888 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5889 const OMPTargetTeamsDirective &S) { 5890 auto *CS = S.getCapturedStmt(OMPD_teams); 5891 Action.Enter(CGF); 5892 // Emit teams region as a standalone region. 5893 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5894 Action.Enter(CGF); 5895 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5896 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5897 CGF.EmitOMPPrivateClause(S, PrivateScope); 5898 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5899 (void)PrivateScope.Privatize(); 5900 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5901 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5902 CGF.EmitStmt(CS->getCapturedStmt()); 5903 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5904 }; 5905 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5906 emitPostUpdateForReductionClause(CGF, S, 5907 [](CodeGenFunction &) { return nullptr; }); 5908 } 5909 5910 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5911 CodeGenModule &CGM, StringRef ParentName, 5912 const OMPTargetTeamsDirective &S) { 5913 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5914 emitTargetTeamsRegion(CGF, Action, S); 5915 }; 5916 llvm::Function *Fn; 5917 llvm::Constant *Addr; 5918 // Emit target region as a standalone region. 5919 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5920 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5921 assert(Fn && Addr && "Target device function emission failed."); 5922 } 5923 5924 void CodeGenFunction::EmitOMPTargetTeamsDirective( 5925 const OMPTargetTeamsDirective &S) { 5926 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5927 emitTargetTeamsRegion(CGF, Action, S); 5928 }; 5929 emitCommonOMPTargetDirective(*this, S, CodeGen); 5930 } 5931 5932 static void 5933 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5934 const OMPTargetTeamsDistributeDirective &S) { 5935 Action.Enter(CGF); 5936 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5937 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5938 }; 5939 5940 // Emit teams region as a standalone region. 5941 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5942 PrePostActionTy &Action) { 5943 Action.Enter(CGF); 5944 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5945 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5946 (void)PrivateScope.Privatize(); 5947 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5948 CodeGenDistribute); 5949 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5950 }; 5951 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 5952 emitPostUpdateForReductionClause(CGF, S, 5953 [](CodeGenFunction &) { return nullptr; }); 5954 } 5955 5956 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 5957 CodeGenModule &CGM, StringRef ParentName, 5958 const OMPTargetTeamsDistributeDirective &S) { 5959 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5960 emitTargetTeamsDistributeRegion(CGF, Action, S); 5961 }; 5962 llvm::Function *Fn; 5963 llvm::Constant *Addr; 5964 // Emit target region as a standalone region. 5965 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5966 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5967 assert(Fn && Addr && "Target device function emission failed."); 5968 } 5969 5970 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 5971 const OMPTargetTeamsDistributeDirective &S) { 5972 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5973 emitTargetTeamsDistributeRegion(CGF, Action, S); 5974 }; 5975 emitCommonOMPTargetDirective(*this, S, CodeGen); 5976 } 5977 5978 static void emitTargetTeamsDistributeSimdRegion( 5979 CodeGenFunction &CGF, PrePostActionTy &Action, 5980 const OMPTargetTeamsDistributeSimdDirective &S) { 5981 Action.Enter(CGF); 5982 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5983 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5984 }; 5985 5986 // Emit teams region as a standalone region. 5987 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5988 PrePostActionTy &Action) { 5989 Action.Enter(CGF); 5990 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5991 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5992 (void)PrivateScope.Privatize(); 5993 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5994 CodeGenDistribute); 5995 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5996 }; 5997 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 5998 emitPostUpdateForReductionClause(CGF, S, 5999 [](CodeGenFunction &) { return nullptr; }); 6000 } 6001 6002 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 6003 CodeGenModule &CGM, StringRef ParentName, 6004 const OMPTargetTeamsDistributeSimdDirective &S) { 6005 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6006 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6007 }; 6008 llvm::Function *Fn; 6009 llvm::Constant *Addr; 6010 // Emit target region as a standalone region. 6011 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6012 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6013 assert(Fn && Addr && "Target device function emission failed."); 6014 } 6015 6016 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 6017 const OMPTargetTeamsDistributeSimdDirective &S) { 6018 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6019 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 6020 }; 6021 emitCommonOMPTargetDirective(*this, S, CodeGen); 6022 } 6023 6024 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 6025 const OMPTeamsDistributeDirective &S) { 6026 6027 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6028 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6029 }; 6030 6031 // Emit teams region as a standalone region. 6032 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6033 PrePostActionTy &Action) { 6034 Action.Enter(CGF); 6035 OMPPrivateScope PrivateScope(CGF); 6036 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6037 (void)PrivateScope.Privatize(); 6038 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6039 CodeGenDistribute); 6040 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6041 }; 6042 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 6043 emitPostUpdateForReductionClause(*this, S, 6044 [](CodeGenFunction &) { return nullptr; }); 6045 } 6046 6047 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 6048 const OMPTeamsDistributeSimdDirective &S) { 6049 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6050 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 6051 }; 6052 6053 // Emit teams region as a standalone region. 6054 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6055 PrePostActionTy &Action) { 6056 Action.Enter(CGF); 6057 OMPPrivateScope PrivateScope(CGF); 6058 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6059 (void)PrivateScope.Privatize(); 6060 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 6061 CodeGenDistribute); 6062 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6063 }; 6064 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 6065 emitPostUpdateForReductionClause(*this, S, 6066 [](CodeGenFunction &) { return nullptr; }); 6067 } 6068 6069 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 6070 const OMPTeamsDistributeParallelForDirective &S) { 6071 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6072 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6073 S.getDistInc()); 6074 }; 6075 6076 // Emit teams region as a standalone region. 6077 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6078 PrePostActionTy &Action) { 6079 Action.Enter(CGF); 6080 OMPPrivateScope PrivateScope(CGF); 6081 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6082 (void)PrivateScope.Privatize(); 6083 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 6084 CodeGenDistribute); 6085 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6086 }; 6087 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 6088 emitPostUpdateForReductionClause(*this, S, 6089 [](CodeGenFunction &) { return nullptr; }); 6090 } 6091 6092 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 6093 const OMPTeamsDistributeParallelForSimdDirective &S) { 6094 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6095 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6096 S.getDistInc()); 6097 }; 6098 6099 // Emit teams region as a standalone region. 6100 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6101 PrePostActionTy &Action) { 6102 Action.Enter(CGF); 6103 OMPPrivateScope PrivateScope(CGF); 6104 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6105 (void)PrivateScope.Privatize(); 6106 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6107 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6108 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6109 }; 6110 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 6111 CodeGen); 6112 emitPostUpdateForReductionClause(*this, S, 6113 [](CodeGenFunction &) { return nullptr; }); 6114 } 6115 6116 static void emitTargetTeamsDistributeParallelForRegion( 6117 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 6118 PrePostActionTy &Action) { 6119 Action.Enter(CGF); 6120 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6121 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6122 S.getDistInc()); 6123 }; 6124 6125 // Emit teams region as a standalone region. 6126 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6127 PrePostActionTy &Action) { 6128 Action.Enter(CGF); 6129 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6130 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6131 (void)PrivateScope.Privatize(); 6132 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6133 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6134 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6135 }; 6136 6137 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 6138 CodeGenTeams); 6139 emitPostUpdateForReductionClause(CGF, S, 6140 [](CodeGenFunction &) { return nullptr; }); 6141 } 6142 6143 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 6144 CodeGenModule &CGM, StringRef ParentName, 6145 const OMPTargetTeamsDistributeParallelForDirective &S) { 6146 // Emit SPMD target teams distribute parallel for region as a standalone 6147 // region. 6148 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6149 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6150 }; 6151 llvm::Function *Fn; 6152 llvm::Constant *Addr; 6153 // Emit target region as a standalone region. 6154 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6155 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6156 assert(Fn && Addr && "Target device function emission failed."); 6157 } 6158 6159 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 6160 const OMPTargetTeamsDistributeParallelForDirective &S) { 6161 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6162 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 6163 }; 6164 emitCommonOMPTargetDirective(*this, S, CodeGen); 6165 } 6166 6167 static void emitTargetTeamsDistributeParallelForSimdRegion( 6168 CodeGenFunction &CGF, 6169 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 6170 PrePostActionTy &Action) { 6171 Action.Enter(CGF); 6172 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6173 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 6174 S.getDistInc()); 6175 }; 6176 6177 // Emit teams region as a standalone region. 6178 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 6179 PrePostActionTy &Action) { 6180 Action.Enter(CGF); 6181 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6182 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6183 (void)PrivateScope.Privatize(); 6184 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 6185 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 6186 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 6187 }; 6188 6189 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 6190 CodeGenTeams); 6191 emitPostUpdateForReductionClause(CGF, S, 6192 [](CodeGenFunction &) { return nullptr; }); 6193 } 6194 6195 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 6196 CodeGenModule &CGM, StringRef ParentName, 6197 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6198 // Emit SPMD target teams distribute parallel for simd region as a standalone 6199 // region. 6200 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6201 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6202 }; 6203 llvm::Function *Fn; 6204 llvm::Constant *Addr; 6205 // Emit target region as a standalone region. 6206 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6207 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6208 assert(Fn && Addr && "Target device function emission failed."); 6209 } 6210 6211 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 6212 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 6213 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6214 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 6215 }; 6216 emitCommonOMPTargetDirective(*this, S, CodeGen); 6217 } 6218 6219 void CodeGenFunction::EmitOMPCancellationPointDirective( 6220 const OMPCancellationPointDirective &S) { 6221 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 6222 S.getCancelRegion()); 6223 } 6224 6225 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 6226 const Expr *IfCond = nullptr; 6227 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6228 if (C->getNameModifier() == OMPD_unknown || 6229 C->getNameModifier() == OMPD_cancel) { 6230 IfCond = C->getCondition(); 6231 break; 6232 } 6233 } 6234 if (CGM.getLangOpts().OpenMPIRBuilder) { 6235 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder(); 6236 // TODO: This check is necessary as we only generate `omp parallel` through 6237 // the OpenMPIRBuilder for now. 6238 if (S.getCancelRegion() == OMPD_parallel) { 6239 llvm::Value *IfCondition = nullptr; 6240 if (IfCond) 6241 IfCondition = EmitScalarExpr(IfCond, 6242 /*IgnoreResultAssign=*/true); 6243 return Builder.restoreIP( 6244 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion())); 6245 } 6246 } 6247 6248 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 6249 S.getCancelRegion()); 6250 } 6251 6252 CodeGenFunction::JumpDest 6253 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 6254 if (Kind == OMPD_parallel || Kind == OMPD_task || 6255 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 6256 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 6257 return ReturnBlock; 6258 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 6259 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 6260 Kind == OMPD_distribute_parallel_for || 6261 Kind == OMPD_target_parallel_for || 6262 Kind == OMPD_teams_distribute_parallel_for || 6263 Kind == OMPD_target_teams_distribute_parallel_for); 6264 return OMPCancelStack.getExitBlock(); 6265 } 6266 6267 void CodeGenFunction::EmitOMPUseDevicePtrClause( 6268 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 6269 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6270 auto OrigVarIt = C.varlist_begin(); 6271 auto InitIt = C.inits().begin(); 6272 for (const Expr *PvtVarIt : C.private_copies()) { 6273 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 6274 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 6275 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 6276 6277 // In order to identify the right initializer we need to match the 6278 // declaration used by the mapping logic. In some cases we may get 6279 // OMPCapturedExprDecl that refers to the original declaration. 6280 const ValueDecl *MatchingVD = OrigVD; 6281 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6282 // OMPCapturedExprDecl are used to privative fields of the current 6283 // structure. 6284 const auto *ME = cast<MemberExpr>(OED->getInit()); 6285 assert(isa<CXXThisExpr>(ME->getBase()) && 6286 "Base should be the current struct!"); 6287 MatchingVD = ME->getMemberDecl(); 6288 } 6289 6290 // If we don't have information about the current list item, move on to 6291 // the next one. 6292 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6293 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6294 continue; 6295 6296 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 6297 InitAddrIt, InitVD, 6298 PvtVD]() { 6299 // Initialize the temporary initialization variable with the address we 6300 // get from the runtime library. We have to cast the source address 6301 // because it is always a void *. References are materialized in the 6302 // privatization scope, so the initialization here disregards the fact 6303 // the original variable is a reference. 6304 QualType AddrQTy = 6305 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 6306 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 6307 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 6308 setAddrOfLocalVar(InitVD, InitAddr); 6309 6310 // Emit private declaration, it will be initialized by the value we 6311 // declaration we just added to the local declarations map. 6312 EmitDecl(*PvtVD); 6313 6314 // The initialization variables reached its purpose in the emission 6315 // of the previous declaration, so we don't need it anymore. 6316 LocalDeclMap.erase(InitVD); 6317 6318 // Return the address of the private variable. 6319 return GetAddrOfLocalVar(PvtVD); 6320 }); 6321 assert(IsRegistered && "firstprivate var already registered as private"); 6322 // Silence the warning about unused variable. 6323 (void)IsRegistered; 6324 6325 ++OrigVarIt; 6326 ++InitIt; 6327 } 6328 } 6329 6330 static const VarDecl *getBaseDecl(const Expr *Ref) { 6331 const Expr *Base = Ref->IgnoreParenImpCasts(); 6332 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 6333 Base = OASE->getBase()->IgnoreParenImpCasts(); 6334 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 6335 Base = ASE->getBase()->IgnoreParenImpCasts(); 6336 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 6337 } 6338 6339 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 6340 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 6341 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 6342 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 6343 for (const Expr *Ref : C.varlists()) { 6344 const VarDecl *OrigVD = getBaseDecl(Ref); 6345 if (!Processed.insert(OrigVD).second) 6346 continue; 6347 // In order to identify the right initializer we need to match the 6348 // declaration used by the mapping logic. In some cases we may get 6349 // OMPCapturedExprDecl that refers to the original declaration. 6350 const ValueDecl *MatchingVD = OrigVD; 6351 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 6352 // OMPCapturedExprDecl are used to privative fields of the current 6353 // structure. 6354 const auto *ME = cast<MemberExpr>(OED->getInit()); 6355 assert(isa<CXXThisExpr>(ME->getBase()) && 6356 "Base should be the current struct!"); 6357 MatchingVD = ME->getMemberDecl(); 6358 } 6359 6360 // If we don't have information about the current list item, move on to 6361 // the next one. 6362 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 6363 if (InitAddrIt == CaptureDeviceAddrMap.end()) 6364 continue; 6365 6366 Address PrivAddr = InitAddrIt->getSecond(); 6367 // For declrefs and variable length array need to load the pointer for 6368 // correct mapping, since the pointer to the data was passed to the runtime. 6369 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 6370 MatchingVD->getType()->isArrayType()) 6371 PrivAddr = 6372 EmitLoadOfPointer(PrivAddr, getContext() 6373 .getPointerType(OrigVD->getType()) 6374 ->castAs<PointerType>()); 6375 llvm::Type *RealTy = 6376 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 6377 ->getPointerTo(); 6378 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 6379 6380 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 6381 } 6382 } 6383 6384 // Generate the instructions for '#pragma omp target data' directive. 6385 void CodeGenFunction::EmitOMPTargetDataDirective( 6386 const OMPTargetDataDirective &S) { 6387 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true, 6388 /*SeparateBeginEndCalls=*/true); 6389 6390 // Create a pre/post action to signal the privatization of the device pointer. 6391 // This action can be replaced by the OpenMP runtime code generation to 6392 // deactivate privatization. 6393 bool PrivatizeDevicePointers = false; 6394 class DevicePointerPrivActionTy : public PrePostActionTy { 6395 bool &PrivatizeDevicePointers; 6396 6397 public: 6398 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 6399 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 6400 void Enter(CodeGenFunction &CGF) override { 6401 PrivatizeDevicePointers = true; 6402 } 6403 }; 6404 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 6405 6406 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 6407 CodeGenFunction &CGF, PrePostActionTy &Action) { 6408 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6409 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 6410 }; 6411 6412 // Codegen that selects whether to generate the privatization code or not. 6413 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 6414 &InnermostCodeGen](CodeGenFunction &CGF, 6415 PrePostActionTy &Action) { 6416 RegionCodeGenTy RCG(InnermostCodeGen); 6417 PrivatizeDevicePointers = false; 6418 6419 // Call the pre-action to change the status of PrivatizeDevicePointers if 6420 // needed. 6421 Action.Enter(CGF); 6422 6423 if (PrivatizeDevicePointers) { 6424 OMPPrivateScope PrivateScope(CGF); 6425 // Emit all instances of the use_device_ptr clause. 6426 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 6427 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 6428 Info.CaptureDeviceAddrMap); 6429 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 6430 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 6431 Info.CaptureDeviceAddrMap); 6432 (void)PrivateScope.Privatize(); 6433 RCG(CGF); 6434 } else { 6435 OMPLexicalScope Scope(CGF, S, OMPD_unknown); 6436 RCG(CGF); 6437 } 6438 }; 6439 6440 // Forward the provided action to the privatization codegen. 6441 RegionCodeGenTy PrivRCG(PrivCodeGen); 6442 PrivRCG.setAction(Action); 6443 6444 // Notwithstanding the body of the region is emitted as inlined directive, 6445 // we don't use an inline scope as changes in the references inside the 6446 // region are expected to be visible outside, so we do not privative them. 6447 OMPLexicalScope Scope(CGF, S); 6448 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 6449 PrivRCG); 6450 }; 6451 6452 RegionCodeGenTy RCG(CodeGen); 6453 6454 // If we don't have target devices, don't bother emitting the data mapping 6455 // code. 6456 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6457 RCG(*this); 6458 return; 6459 } 6460 6461 // Check if we have any if clause associated with the directive. 6462 const Expr *IfCond = nullptr; 6463 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6464 IfCond = C->getCondition(); 6465 6466 // Check if we have any device clause associated with the directive. 6467 const Expr *Device = nullptr; 6468 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6469 Device = C->getDevice(); 6470 6471 // Set the action to signal privatization of device pointers. 6472 RCG.setAction(PrivAction); 6473 6474 // Emit region code. 6475 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6476 Info); 6477 } 6478 6479 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6480 const OMPTargetEnterDataDirective &S) { 6481 // If we don't have target devices, don't bother emitting the data mapping 6482 // code. 6483 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6484 return; 6485 6486 // Check if we have any if clause associated with the directive. 6487 const Expr *IfCond = nullptr; 6488 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6489 IfCond = C->getCondition(); 6490 6491 // Check if we have any device clause associated with the directive. 6492 const Expr *Device = nullptr; 6493 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6494 Device = C->getDevice(); 6495 6496 OMPLexicalScope Scope(*this, S, OMPD_task); 6497 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6498 } 6499 6500 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6501 const OMPTargetExitDataDirective &S) { 6502 // If we don't have target devices, don't bother emitting the data mapping 6503 // code. 6504 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6505 return; 6506 6507 // Check if we have any if clause associated with the directive. 6508 const Expr *IfCond = nullptr; 6509 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6510 IfCond = C->getCondition(); 6511 6512 // Check if we have any device clause associated with the directive. 6513 const Expr *Device = nullptr; 6514 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6515 Device = C->getDevice(); 6516 6517 OMPLexicalScope Scope(*this, S, OMPD_task); 6518 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6519 } 6520 6521 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6522 const OMPTargetParallelDirective &S, 6523 PrePostActionTy &Action) { 6524 // Get the captured statement associated with the 'parallel' region. 6525 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6526 Action.Enter(CGF); 6527 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6528 Action.Enter(CGF); 6529 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6530 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6531 CGF.EmitOMPPrivateClause(S, PrivateScope); 6532 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6533 (void)PrivateScope.Privatize(); 6534 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6535 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6536 // TODO: Add support for clauses. 6537 CGF.EmitStmt(CS->getCapturedStmt()); 6538 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6539 }; 6540 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6541 emitEmptyBoundParameters); 6542 emitPostUpdateForReductionClause(CGF, S, 6543 [](CodeGenFunction &) { return nullptr; }); 6544 } 6545 6546 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6547 CodeGenModule &CGM, StringRef ParentName, 6548 const OMPTargetParallelDirective &S) { 6549 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6550 emitTargetParallelRegion(CGF, S, Action); 6551 }; 6552 llvm::Function *Fn; 6553 llvm::Constant *Addr; 6554 // Emit target region as a standalone region. 6555 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6556 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6557 assert(Fn && Addr && "Target device function emission failed."); 6558 } 6559 6560 void CodeGenFunction::EmitOMPTargetParallelDirective( 6561 const OMPTargetParallelDirective &S) { 6562 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6563 emitTargetParallelRegion(CGF, S, Action); 6564 }; 6565 emitCommonOMPTargetDirective(*this, S, CodeGen); 6566 } 6567 6568 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6569 const OMPTargetParallelForDirective &S, 6570 PrePostActionTy &Action) { 6571 Action.Enter(CGF); 6572 // Emit directive as a combined directive that consists of two implicit 6573 // directives: 'parallel' with 'for' directive. 6574 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6575 Action.Enter(CGF); 6576 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6577 CGF, OMPD_target_parallel_for, S.hasCancel()); 6578 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6579 emitDispatchForLoopBounds); 6580 }; 6581 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6582 emitEmptyBoundParameters); 6583 } 6584 6585 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6586 CodeGenModule &CGM, StringRef ParentName, 6587 const OMPTargetParallelForDirective &S) { 6588 // Emit SPMD target parallel for region as a standalone region. 6589 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6590 emitTargetParallelForRegion(CGF, S, Action); 6591 }; 6592 llvm::Function *Fn; 6593 llvm::Constant *Addr; 6594 // Emit target region as a standalone region. 6595 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6596 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6597 assert(Fn && Addr && "Target device function emission failed."); 6598 } 6599 6600 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6601 const OMPTargetParallelForDirective &S) { 6602 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6603 emitTargetParallelForRegion(CGF, S, Action); 6604 }; 6605 emitCommonOMPTargetDirective(*this, S, CodeGen); 6606 } 6607 6608 static void 6609 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6610 const OMPTargetParallelForSimdDirective &S, 6611 PrePostActionTy &Action) { 6612 Action.Enter(CGF); 6613 // Emit directive as a combined directive that consists of two implicit 6614 // directives: 'parallel' with 'for' directive. 6615 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6616 Action.Enter(CGF); 6617 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6618 emitDispatchForLoopBounds); 6619 }; 6620 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6621 emitEmptyBoundParameters); 6622 } 6623 6624 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6625 CodeGenModule &CGM, StringRef ParentName, 6626 const OMPTargetParallelForSimdDirective &S) { 6627 // Emit SPMD target parallel for region as a standalone region. 6628 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6629 emitTargetParallelForSimdRegion(CGF, S, Action); 6630 }; 6631 llvm::Function *Fn; 6632 llvm::Constant *Addr; 6633 // Emit target region as a standalone region. 6634 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6635 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6636 assert(Fn && Addr && "Target device function emission failed."); 6637 } 6638 6639 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6640 const OMPTargetParallelForSimdDirective &S) { 6641 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6642 emitTargetParallelForSimdRegion(CGF, S, Action); 6643 }; 6644 emitCommonOMPTargetDirective(*this, S, CodeGen); 6645 } 6646 6647 /// Emit a helper variable and return corresponding lvalue. 6648 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6649 const ImplicitParamDecl *PVD, 6650 CodeGenFunction::OMPPrivateScope &Privates) { 6651 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6652 Privates.addPrivate(VDecl, 6653 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6654 } 6655 6656 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6657 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6658 // Emit outlined function for task construct. 6659 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6660 Address CapturedStruct = Address::invalid(); 6661 { 6662 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6663 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6664 } 6665 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6666 const Expr *IfCond = nullptr; 6667 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6668 if (C->getNameModifier() == OMPD_unknown || 6669 C->getNameModifier() == OMPD_taskloop) { 6670 IfCond = C->getCondition(); 6671 break; 6672 } 6673 } 6674 6675 OMPTaskDataTy Data; 6676 // Check if taskloop must be emitted without taskgroup. 6677 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6678 // TODO: Check if we should emit tied or untied task. 6679 Data.Tied = true; 6680 // Set scheduling for taskloop 6681 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6682 // grainsize clause 6683 Data.Schedule.setInt(/*IntVal=*/false); 6684 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6685 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6686 // num_tasks clause 6687 Data.Schedule.setInt(/*IntVal=*/true); 6688 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6689 } 6690 6691 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6692 // if (PreCond) { 6693 // for (IV in 0..LastIteration) BODY; 6694 // <Final counter/linear vars updates>; 6695 // } 6696 // 6697 6698 // Emit: if (PreCond) - begin. 6699 // If the condition constant folds and can be elided, avoid emitting the 6700 // whole loop. 6701 bool CondConstant; 6702 llvm::BasicBlock *ContBlock = nullptr; 6703 OMPLoopScope PreInitScope(CGF, S); 6704 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6705 if (!CondConstant) 6706 return; 6707 } else { 6708 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6709 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6710 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6711 CGF.getProfileCount(&S)); 6712 CGF.EmitBlock(ThenBlock); 6713 CGF.incrementProfileCounter(&S); 6714 } 6715 6716 (void)CGF.EmitOMPLinearClauseInit(S); 6717 6718 OMPPrivateScope LoopScope(CGF); 6719 // Emit helper vars inits. 6720 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6721 auto *I = CS->getCapturedDecl()->param_begin(); 6722 auto *LBP = std::next(I, LowerBound); 6723 auto *UBP = std::next(I, UpperBound); 6724 auto *STP = std::next(I, Stride); 6725 auto *LIP = std::next(I, LastIter); 6726 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6727 LoopScope); 6728 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6729 LoopScope); 6730 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6731 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6732 LoopScope); 6733 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6734 CGF.EmitOMPLinearClause(S, LoopScope); 6735 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6736 (void)LoopScope.Privatize(); 6737 // Emit the loop iteration variable. 6738 const Expr *IVExpr = S.getIterationVariable(); 6739 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6740 CGF.EmitVarDecl(*IVDecl); 6741 CGF.EmitIgnoredExpr(S.getInit()); 6742 6743 // Emit the iterations count variable. 6744 // If it is not a variable, Sema decided to calculate iterations count on 6745 // each iteration (e.g., it is foldable into a constant). 6746 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6747 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6748 // Emit calculation of the iterations count. 6749 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6750 } 6751 6752 { 6753 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6754 emitCommonSimdLoop( 6755 CGF, S, 6756 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6757 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6758 CGF.EmitOMPSimdInit(S); 6759 }, 6760 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6761 CGF.EmitOMPInnerLoop( 6762 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6763 [&S](CodeGenFunction &CGF) { 6764 emitOMPLoopBodyWithStopPoint(CGF, S, 6765 CodeGenFunction::JumpDest()); 6766 }, 6767 [](CodeGenFunction &) {}); 6768 }); 6769 } 6770 // Emit: if (PreCond) - end. 6771 if (ContBlock) { 6772 CGF.EmitBranch(ContBlock); 6773 CGF.EmitBlock(ContBlock, true); 6774 } 6775 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6776 if (HasLastprivateClause) { 6777 CGF.EmitOMPLastprivateClauseFinal( 6778 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6779 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6780 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6781 (*LIP)->getType(), S.getBeginLoc()))); 6782 } 6783 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6784 return CGF.Builder.CreateIsNotNull( 6785 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6786 (*LIP)->getType(), S.getBeginLoc())); 6787 }); 6788 }; 6789 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6790 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6791 const OMPTaskDataTy &Data) { 6792 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6793 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6794 OMPLoopScope PreInitScope(CGF, S); 6795 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6796 OutlinedFn, SharedsTy, 6797 CapturedStruct, IfCond, Data); 6798 }; 6799 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6800 CodeGen); 6801 }; 6802 if (Data.Nogroup) { 6803 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6804 } else { 6805 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6806 *this, 6807 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6808 PrePostActionTy &Action) { 6809 Action.Enter(CGF); 6810 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6811 Data); 6812 }, 6813 S.getBeginLoc()); 6814 } 6815 } 6816 6817 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6818 auto LPCRegion = 6819 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6820 EmitOMPTaskLoopBasedDirective(S); 6821 } 6822 6823 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6824 const OMPTaskLoopSimdDirective &S) { 6825 auto LPCRegion = 6826 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6827 OMPLexicalScope Scope(*this, S); 6828 EmitOMPTaskLoopBasedDirective(S); 6829 } 6830 6831 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6832 const OMPMasterTaskLoopDirective &S) { 6833 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6834 Action.Enter(CGF); 6835 EmitOMPTaskLoopBasedDirective(S); 6836 }; 6837 auto LPCRegion = 6838 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6839 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6840 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6841 } 6842 6843 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6844 const OMPMasterTaskLoopSimdDirective &S) { 6845 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6846 Action.Enter(CGF); 6847 EmitOMPTaskLoopBasedDirective(S); 6848 }; 6849 auto LPCRegion = 6850 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6851 OMPLexicalScope Scope(*this, S); 6852 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6853 } 6854 6855 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6856 const OMPParallelMasterTaskLoopDirective &S) { 6857 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6858 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6859 PrePostActionTy &Action) { 6860 Action.Enter(CGF); 6861 CGF.EmitOMPTaskLoopBasedDirective(S); 6862 }; 6863 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6864 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6865 S.getBeginLoc()); 6866 }; 6867 auto LPCRegion = 6868 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6869 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6870 emitEmptyBoundParameters); 6871 } 6872 6873 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6874 const OMPParallelMasterTaskLoopSimdDirective &S) { 6875 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6876 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6877 PrePostActionTy &Action) { 6878 Action.Enter(CGF); 6879 CGF.EmitOMPTaskLoopBasedDirective(S); 6880 }; 6881 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6882 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6883 S.getBeginLoc()); 6884 }; 6885 auto LPCRegion = 6886 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6887 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6888 emitEmptyBoundParameters); 6889 } 6890 6891 // Generate the instructions for '#pragma omp target update' directive. 6892 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6893 const OMPTargetUpdateDirective &S) { 6894 // If we don't have target devices, don't bother emitting the data mapping 6895 // code. 6896 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6897 return; 6898 6899 // Check if we have any if clause associated with the directive. 6900 const Expr *IfCond = nullptr; 6901 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6902 IfCond = C->getCondition(); 6903 6904 // Check if we have any device clause associated with the directive. 6905 const Expr *Device = nullptr; 6906 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6907 Device = C->getDevice(); 6908 6909 OMPLexicalScope Scope(*this, S, OMPD_task); 6910 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6911 } 6912 6913 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6914 const OMPExecutableDirective &D) { 6915 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6916 EmitOMPScanDirective(*SD); 6917 return; 6918 } 6919 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6920 return; 6921 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6922 OMPPrivateScope GlobalsScope(CGF); 6923 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 6924 // Capture global firstprivates to avoid crash. 6925 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 6926 for (const Expr *Ref : C->varlists()) { 6927 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 6928 if (!DRE) 6929 continue; 6930 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 6931 if (!VD || VD->hasLocalStorage()) 6932 continue; 6933 if (!CGF.LocalDeclMap.count(VD)) { 6934 LValue GlobLVal = CGF.EmitLValue(Ref); 6935 GlobalsScope.addPrivate( 6936 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6937 } 6938 } 6939 } 6940 } 6941 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 6942 (void)GlobalsScope.Privatize(); 6943 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 6944 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 6945 } else { 6946 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 6947 for (const Expr *E : LD->counters()) { 6948 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 6949 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 6950 LValue GlobLVal = CGF.EmitLValue(E); 6951 GlobalsScope.addPrivate( 6952 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6953 } 6954 if (isa<OMPCapturedExprDecl>(VD)) { 6955 // Emit only those that were not explicitly referenced in clauses. 6956 if (!CGF.LocalDeclMap.count(VD)) 6957 CGF.EmitVarDecl(*VD); 6958 } 6959 } 6960 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 6961 if (!C->getNumForLoops()) 6962 continue; 6963 for (unsigned I = LD->getLoopsNumber(), 6964 E = C->getLoopNumIterations().size(); 6965 I < E; ++I) { 6966 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 6967 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 6968 // Emit only those that were not explicitly referenced in clauses. 6969 if (!CGF.LocalDeclMap.count(VD)) 6970 CGF.EmitVarDecl(*VD); 6971 } 6972 } 6973 } 6974 } 6975 (void)GlobalsScope.Privatize(); 6976 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 6977 } 6978 }; 6979 if (D.getDirectiveKind() == OMPD_atomic || 6980 D.getDirectiveKind() == OMPD_critical || 6981 D.getDirectiveKind() == OMPD_section || 6982 D.getDirectiveKind() == OMPD_master || 6983 D.getDirectiveKind() == OMPD_masked) { 6984 EmitStmt(D.getAssociatedStmt()); 6985 } else { 6986 auto LPCRegion = 6987 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 6988 OMPSimdLexicalScope Scope(*this, D); 6989 CGM.getOpenMPRuntime().emitInlinedDirective( 6990 *this, 6991 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 6992 : D.getDirectiveKind(), 6993 CodeGen); 6994 } 6995 // Check for outer lastprivate conditional update. 6996 checkForLastprivateConditionalUpdate(*this, D); 6997 } 6998