1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/Support/AtomicOrdering.h" 30 using namespace clang; 31 using namespace CodeGen; 32 using namespace llvm::omp; 33 34 static const VarDecl *getBaseDecl(const Expr *Ref); 35 36 namespace { 37 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 38 /// for captured expressions. 39 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 40 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 41 for (const auto *C : S.clauses()) { 42 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 43 if (const auto *PreInit = 44 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 45 for (const auto *I : PreInit->decls()) { 46 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 47 CGF.EmitVarDecl(cast<VarDecl>(*I)); 48 } else { 49 CodeGenFunction::AutoVarEmission Emission = 50 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 51 CGF.EmitAutoVarCleanups(Emission); 52 } 53 } 54 } 55 } 56 } 57 } 58 CodeGenFunction::OMPPrivateScope InlinedShareds; 59 60 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 61 return CGF.LambdaCaptureFields.lookup(VD) || 62 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 63 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 64 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 65 } 66 67 public: 68 OMPLexicalScope( 69 CodeGenFunction &CGF, const OMPExecutableDirective &S, 70 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 71 const bool EmitPreInitStmt = true) 72 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 73 InlinedShareds(CGF) { 74 if (EmitPreInitStmt) 75 emitPreInitStmt(CGF, S); 76 if (!CapturedRegion.hasValue()) 77 return; 78 assert(S.hasAssociatedStmt() && 79 "Expected associated statement for inlined directive."); 80 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 81 for (const auto &C : CS->captures()) { 82 if (C.capturesVariable() || C.capturesVariableByCopy()) { 83 auto *VD = C.getCapturedVar(); 84 assert(VD == VD->getCanonicalDecl() && 85 "Canonical decl must be captured."); 86 DeclRefExpr DRE( 87 CGF.getContext(), const_cast<VarDecl *>(VD), 88 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 89 InlinedShareds.isGlobalVarCaptured(VD)), 90 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 91 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 92 return CGF.EmitLValue(&DRE).getAddress(CGF); 93 }); 94 } 95 } 96 (void)InlinedShareds.Privatize(); 97 } 98 }; 99 100 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 101 /// for captured expressions. 102 class OMPParallelScope final : public OMPLexicalScope { 103 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 104 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 105 return !(isOpenMPTargetExecutionDirective(Kind) || 106 isOpenMPLoopBoundSharingDirective(Kind)) && 107 isOpenMPParallelDirective(Kind); 108 } 109 110 public: 111 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 112 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 113 EmitPreInitStmt(S)) {} 114 }; 115 116 /// Lexical scope for OpenMP teams construct, that handles correct codegen 117 /// for captured expressions. 118 class OMPTeamsScope final : public OMPLexicalScope { 119 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 120 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 121 return !isOpenMPTargetExecutionDirective(Kind) && 122 isOpenMPTeamsDirective(Kind); 123 } 124 125 public: 126 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 127 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 128 EmitPreInitStmt(S)) {} 129 }; 130 131 /// Private scope for OpenMP loop-based directives, that supports capturing 132 /// of used expression from loop statement. 133 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 134 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 135 CodeGenFunction::OMPMapVars PreCondVars; 136 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 137 for (const auto *E : S.counters()) { 138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 139 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 140 (void)PreCondVars.setVarAddr( 141 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 142 } 143 // Mark private vars as undefs. 144 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 145 for (const Expr *IRef : C->varlists()) { 146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 147 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 148 (void)PreCondVars.setVarAddr( 149 CGF, OrigVD, 150 Address(llvm::UndefValue::get( 151 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 152 OrigVD->getType().getNonReferenceType()))), 153 CGF.getContext().getDeclAlign(OrigVD))); 154 } 155 } 156 } 157 (void)PreCondVars.apply(CGF); 158 // Emit init, __range and __end variables for C++ range loops. 159 const Stmt *Body = 160 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 161 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 162 Body = OMPLoopDirective::tryToFindNextInnerLoop( 163 Body, /*TryImperfectlyNestedLoops=*/true); 164 if (auto *For = dyn_cast<ForStmt>(Body)) { 165 Body = For->getBody(); 166 } else { 167 assert(isa<CXXForRangeStmt>(Body) && 168 "Expected canonical for loop or range-based for loop."); 169 auto *CXXFor = cast<CXXForRangeStmt>(Body); 170 if (const Stmt *Init = CXXFor->getInit()) 171 CGF.EmitStmt(Init); 172 CGF.EmitStmt(CXXFor->getRangeStmt()); 173 CGF.EmitStmt(CXXFor->getEndStmt()); 174 Body = CXXFor->getBody(); 175 } 176 } 177 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 178 for (const auto *I : PreInits->decls()) 179 CGF.EmitVarDecl(cast<VarDecl>(*I)); 180 } 181 PreCondVars.restore(CGF); 182 } 183 184 public: 185 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 186 : CodeGenFunction::RunCleanupsScope(CGF) { 187 emitPreInitStmt(CGF, S); 188 } 189 }; 190 191 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 192 CodeGenFunction::OMPPrivateScope InlinedShareds; 193 194 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 195 return CGF.LambdaCaptureFields.lookup(VD) || 196 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 197 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 198 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 199 } 200 201 public: 202 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 203 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 204 InlinedShareds(CGF) { 205 for (const auto *C : S.clauses()) { 206 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 207 if (const auto *PreInit = 208 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 209 for (const auto *I : PreInit->decls()) { 210 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 211 CGF.EmitVarDecl(cast<VarDecl>(*I)); 212 } else { 213 CodeGenFunction::AutoVarEmission Emission = 214 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 215 CGF.EmitAutoVarCleanups(Emission); 216 } 217 } 218 } 219 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 220 for (const Expr *E : UDP->varlists()) { 221 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 222 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 223 CGF.EmitVarDecl(*OED); 224 } 225 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 226 for (const Expr *E : UDP->varlists()) { 227 const Decl *D = getBaseDecl(E); 228 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 229 CGF.EmitVarDecl(*OED); 230 } 231 } 232 } 233 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 234 CGF.EmitOMPPrivateClause(S, InlinedShareds); 235 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 236 if (const Expr *E = TG->getReductionRef()) 237 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 238 } 239 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 240 while (CS) { 241 for (auto &C : CS->captures()) { 242 if (C.capturesVariable() || C.capturesVariableByCopy()) { 243 auto *VD = C.getCapturedVar(); 244 assert(VD == VD->getCanonicalDecl() && 245 "Canonical decl must be captured."); 246 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 247 isCapturedVar(CGF, VD) || 248 (CGF.CapturedStmtInfo && 249 InlinedShareds.isGlobalVarCaptured(VD)), 250 VD->getType().getNonReferenceType(), VK_LValue, 251 C.getLocation()); 252 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 253 return CGF.EmitLValue(&DRE).getAddress(CGF); 254 }); 255 } 256 } 257 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 258 } 259 (void)InlinedShareds.Privatize(); 260 } 261 }; 262 263 } // namespace 264 265 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 266 const OMPExecutableDirective &S, 267 const RegionCodeGenTy &CodeGen); 268 269 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 270 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 271 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 272 OrigVD = OrigVD->getCanonicalDecl(); 273 bool IsCaptured = 274 LambdaCaptureFields.lookup(OrigVD) || 275 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 276 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 277 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 278 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 279 return EmitLValue(&DRE); 280 } 281 } 282 return EmitLValue(E); 283 } 284 285 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 286 ASTContext &C = getContext(); 287 llvm::Value *Size = nullptr; 288 auto SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) { 290 // getTypeSizeInChars() returns 0 for a VLA. 291 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 292 VlaSizePair VlaSize = getVLASize(VAT); 293 Ty = VlaSize.Type; 294 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 295 : VlaSize.NumElts; 296 } 297 SizeInChars = C.getTypeSizeInChars(Ty); 298 if (SizeInChars.isZero()) 299 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 300 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 301 } 302 return CGM.getSize(SizeInChars); 303 } 304 305 void CodeGenFunction::GenerateOpenMPCapturedVars( 306 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 307 const RecordDecl *RD = S.getCapturedRecordDecl(); 308 auto CurField = RD->field_begin(); 309 auto CurCap = S.captures().begin(); 310 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 311 E = S.capture_init_end(); 312 I != E; ++I, ++CurField, ++CurCap) { 313 if (CurField->hasCapturedVLAType()) { 314 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 315 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 316 CapturedVars.push_back(Val); 317 } else if (CurCap->capturesThis()) { 318 CapturedVars.push_back(CXXThisValue); 319 } else if (CurCap->capturesVariableByCopy()) { 320 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 321 322 // If the field is not a pointer, we need to save the actual value 323 // and load it as a void pointer. 324 if (!CurField->getType()->isAnyPointerType()) { 325 ASTContext &Ctx = getContext(); 326 Address DstAddr = CreateMemTemp( 327 Ctx.getUIntPtrType(), 328 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 329 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 330 331 llvm::Value *SrcAddrVal = EmitScalarConversion( 332 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 333 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 334 LValue SrcLV = 335 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 336 337 // Store the value using the source type pointer. 338 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 339 340 // Load the value using the destination type pointer. 341 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 342 } 343 CapturedVars.push_back(CV); 344 } else { 345 assert(CurCap->capturesVariable() && "Expected capture by reference."); 346 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 347 } 348 } 349 } 350 351 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 352 QualType DstType, StringRef Name, 353 LValue AddrLV) { 354 ASTContext &Ctx = CGF.getContext(); 355 356 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 357 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 358 Ctx.getPointerType(DstType), Loc); 359 Address TmpAddr = 360 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 361 .getAddress(CGF); 362 return TmpAddr; 363 } 364 365 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 366 if (T->isLValueReferenceType()) 367 return C.getLValueReferenceType( 368 getCanonicalParamType(C, T.getNonReferenceType()), 369 /*SpelledAsLValue=*/false); 370 if (T->isPointerType()) 371 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 372 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 373 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 374 return getCanonicalParamType(C, VLA->getElementType()); 375 if (!A->isVariablyModifiedType()) 376 return C.getCanonicalType(T); 377 } 378 return C.getCanonicalParamType(T); 379 } 380 381 namespace { 382 /// Contains required data for proper outlined function codegen. 383 struct FunctionOptions { 384 /// Captured statement for which the function is generated. 385 const CapturedStmt *S = nullptr; 386 /// true if cast to/from UIntPtr is required for variables captured by 387 /// value. 388 const bool UIntPtrCastRequired = true; 389 /// true if only casted arguments must be registered as local args or VLA 390 /// sizes. 391 const bool RegisterCastedArgsOnly = false; 392 /// Name of the generated function. 393 const StringRef FunctionName; 394 /// Location of the non-debug version of the outlined function. 395 SourceLocation Loc; 396 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 397 bool RegisterCastedArgsOnly, StringRef FunctionName, 398 SourceLocation Loc) 399 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 400 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 401 FunctionName(FunctionName), Loc(Loc) {} 402 }; 403 } // namespace 404 405 static llvm::Function *emitOutlinedFunctionPrologue( 406 CodeGenFunction &CGF, FunctionArgList &Args, 407 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 408 &LocalAddrs, 409 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 410 &VLASizes, 411 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 412 const CapturedDecl *CD = FO.S->getCapturedDecl(); 413 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 414 assert(CD->hasBody() && "missing CapturedDecl body"); 415 416 CXXThisValue = nullptr; 417 // Build the argument list. 418 CodeGenModule &CGM = CGF.CGM; 419 ASTContext &Ctx = CGM.getContext(); 420 FunctionArgList TargetArgs; 421 Args.append(CD->param_begin(), 422 std::next(CD->param_begin(), CD->getContextParamPosition())); 423 TargetArgs.append( 424 CD->param_begin(), 425 std::next(CD->param_begin(), CD->getContextParamPosition())); 426 auto I = FO.S->captures().begin(); 427 FunctionDecl *DebugFunctionDecl = nullptr; 428 if (!FO.UIntPtrCastRequired) { 429 FunctionProtoType::ExtProtoInfo EPI; 430 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 431 DebugFunctionDecl = FunctionDecl::Create( 432 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 433 SourceLocation(), DeclarationName(), FunctionTy, 434 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 435 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 436 } 437 for (const FieldDecl *FD : RD->fields()) { 438 QualType ArgType = FD->getType(); 439 IdentifierInfo *II = nullptr; 440 VarDecl *CapVar = nullptr; 441 442 // If this is a capture by copy and the type is not a pointer, the outlined 443 // function argument type should be uintptr and the value properly casted to 444 // uintptr. This is necessary given that the runtime library is only able to 445 // deal with pointers. We can pass in the same way the VLA type sizes to the 446 // outlined function. 447 if (FO.UIntPtrCastRequired && 448 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 449 I->capturesVariableArrayType())) 450 ArgType = Ctx.getUIntPtrType(); 451 452 if (I->capturesVariable() || I->capturesVariableByCopy()) { 453 CapVar = I->getCapturedVar(); 454 II = CapVar->getIdentifier(); 455 } else if (I->capturesThis()) { 456 II = &Ctx.Idents.get("this"); 457 } else { 458 assert(I->capturesVariableArrayType()); 459 II = &Ctx.Idents.get("vla"); 460 } 461 if (ArgType->isVariablyModifiedType()) 462 ArgType = getCanonicalParamType(Ctx, ArgType); 463 VarDecl *Arg; 464 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 465 Arg = ParmVarDecl::Create( 466 Ctx, DebugFunctionDecl, 467 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 468 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 469 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 470 } else { 471 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 472 II, ArgType, ImplicitParamDecl::Other); 473 } 474 Args.emplace_back(Arg); 475 // Do not cast arguments if we emit function with non-original types. 476 TargetArgs.emplace_back( 477 FO.UIntPtrCastRequired 478 ? Arg 479 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 480 ++I; 481 } 482 Args.append( 483 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 484 CD->param_end()); 485 TargetArgs.append( 486 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 487 CD->param_end()); 488 489 // Create the function declaration. 490 const CGFunctionInfo &FuncInfo = 491 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 492 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 493 494 auto *F = 495 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 496 FO.FunctionName, &CGM.getModule()); 497 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 498 if (CD->isNothrow()) 499 F->setDoesNotThrow(); 500 F->setDoesNotRecurse(); 501 502 // Generate the function. 503 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 504 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 505 FO.UIntPtrCastRequired ? FO.Loc 506 : CD->getBody()->getBeginLoc()); 507 unsigned Cnt = CD->getContextParamPosition(); 508 I = FO.S->captures().begin(); 509 for (const FieldDecl *FD : RD->fields()) { 510 // Do not map arguments if we emit function with non-original types. 511 Address LocalAddr(Address::invalid()); 512 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 513 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 514 TargetArgs[Cnt]); 515 } else { 516 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 517 } 518 // If we are capturing a pointer by copy we don't need to do anything, just 519 // use the value that we get from the arguments. 520 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 521 const VarDecl *CurVD = I->getCapturedVar(); 522 if (!FO.RegisterCastedArgsOnly) 523 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 524 ++Cnt; 525 ++I; 526 continue; 527 } 528 529 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 530 AlignmentSource::Decl); 531 if (FD->hasCapturedVLAType()) { 532 if (FO.UIntPtrCastRequired) { 533 ArgLVal = CGF.MakeAddrLValue( 534 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 535 Args[Cnt]->getName(), ArgLVal), 536 FD->getType(), AlignmentSource::Decl); 537 } 538 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 539 const VariableArrayType *VAT = FD->getCapturedVLAType(); 540 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 541 } else if (I->capturesVariable()) { 542 const VarDecl *Var = I->getCapturedVar(); 543 QualType VarTy = Var->getType(); 544 Address ArgAddr = ArgLVal.getAddress(CGF); 545 if (ArgLVal.getType()->isLValueReferenceType()) { 546 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 547 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 548 assert(ArgLVal.getType()->isPointerType()); 549 ArgAddr = CGF.EmitLoadOfPointer( 550 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 551 } 552 if (!FO.RegisterCastedArgsOnly) { 553 LocalAddrs.insert( 554 {Args[Cnt], 555 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 556 } 557 } else if (I->capturesVariableByCopy()) { 558 assert(!FD->getType()->isAnyPointerType() && 559 "Not expecting a captured pointer."); 560 const VarDecl *Var = I->getCapturedVar(); 561 LocalAddrs.insert({Args[Cnt], 562 {Var, FO.UIntPtrCastRequired 563 ? castValueFromUintptr( 564 CGF, I->getLocation(), FD->getType(), 565 Args[Cnt]->getName(), ArgLVal) 566 : ArgLVal.getAddress(CGF)}}); 567 } else { 568 // If 'this' is captured, load it into CXXThisValue. 569 assert(I->capturesThis()); 570 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 571 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 572 } 573 ++Cnt; 574 ++I; 575 } 576 577 return F; 578 } 579 580 llvm::Function * 581 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 582 SourceLocation Loc) { 583 assert( 584 CapturedStmtInfo && 585 "CapturedStmtInfo should be set when generating the captured function"); 586 const CapturedDecl *CD = S.getCapturedDecl(); 587 // Build the argument list. 588 bool NeedWrapperFunction = 589 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 590 FunctionArgList Args; 591 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 592 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 593 SmallString<256> Buffer; 594 llvm::raw_svector_ostream Out(Buffer); 595 Out << CapturedStmtInfo->getHelperName(); 596 if (NeedWrapperFunction) 597 Out << "_debug__"; 598 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 599 Out.str(), Loc); 600 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 601 VLASizes, CXXThisValue, FO); 602 CodeGenFunction::OMPPrivateScope LocalScope(*this); 603 for (const auto &LocalAddrPair : LocalAddrs) { 604 if (LocalAddrPair.second.first) { 605 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 606 return LocalAddrPair.second.second; 607 }); 608 } 609 } 610 (void)LocalScope.Privatize(); 611 for (const auto &VLASizePair : VLASizes) 612 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 613 PGO.assignRegionCounters(GlobalDecl(CD), F); 614 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 615 (void)LocalScope.ForceCleanup(); 616 FinishFunction(CD->getBodyRBrace()); 617 if (!NeedWrapperFunction) 618 return F; 619 620 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 621 /*RegisterCastedArgsOnly=*/true, 622 CapturedStmtInfo->getHelperName(), Loc); 623 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 624 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 625 Args.clear(); 626 LocalAddrs.clear(); 627 VLASizes.clear(); 628 llvm::Function *WrapperF = 629 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 630 WrapperCGF.CXXThisValue, WrapperFO); 631 llvm::SmallVector<llvm::Value *, 4> CallArgs; 632 for (const auto *Arg : Args) { 633 llvm::Value *CallArg; 634 auto I = LocalAddrs.find(Arg); 635 if (I != LocalAddrs.end()) { 636 LValue LV = WrapperCGF.MakeAddrLValue( 637 I->second.second, 638 I->second.first ? I->second.first->getType() : Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } else { 642 auto EI = VLASizes.find(Arg); 643 if (EI != VLASizes.end()) { 644 CallArg = EI->second.second; 645 } else { 646 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 647 Arg->getType(), 648 AlignmentSource::Decl); 649 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 650 } 651 } 652 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 653 } 654 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 655 WrapperCGF.FinishFunction(); 656 return WrapperF; 657 } 658 659 //===----------------------------------------------------------------------===// 660 // OpenMP Directive Emission 661 //===----------------------------------------------------------------------===// 662 void CodeGenFunction::EmitOMPAggregateAssign( 663 Address DestAddr, Address SrcAddr, QualType OriginalType, 664 const llvm::function_ref<void(Address, Address)> CopyGen) { 665 // Perform element-by-element initialization. 666 QualType ElementTy; 667 668 // Drill down to the base element type on both arrays. 669 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 670 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 671 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 672 673 llvm::Value *SrcBegin = SrcAddr.getPointer(); 674 llvm::Value *DestBegin = DestAddr.getPointer(); 675 // Cast from pointer to array type to pointer to single element. 676 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 677 // The basic structure here is a while-do loop. 678 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 679 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 680 llvm::Value *IsEmpty = 681 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 682 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 683 684 // Enter the loop body, making that address the current address. 685 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 686 EmitBlock(BodyBB); 687 688 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 689 690 llvm::PHINode *SrcElementPHI = 691 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 692 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 693 Address SrcElementCurrent = 694 Address(SrcElementPHI, 695 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 696 697 llvm::PHINode *DestElementPHI = 698 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 699 DestElementPHI->addIncoming(DestBegin, EntryBB); 700 Address DestElementCurrent = 701 Address(DestElementPHI, 702 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 703 704 // Emit copy. 705 CopyGen(DestElementCurrent, SrcElementCurrent); 706 707 // Shift the address forward by one element. 708 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 709 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 710 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 711 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 712 // Check whether we've reached the end. 713 llvm::Value *Done = 714 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 715 Builder.CreateCondBr(Done, DoneBB, BodyBB); 716 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 717 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 718 719 // Done. 720 EmitBlock(DoneBB, /*IsFinished=*/true); 721 } 722 723 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 724 Address SrcAddr, const VarDecl *DestVD, 725 const VarDecl *SrcVD, const Expr *Copy) { 726 if (OriginalType->isArrayType()) { 727 const auto *BO = dyn_cast<BinaryOperator>(Copy); 728 if (BO && BO->getOpcode() == BO_Assign) { 729 // Perform simple memcpy for simple copying. 730 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 731 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 732 EmitAggregateAssign(Dest, Src, OriginalType); 733 } else { 734 // For arrays with complex element types perform element by element 735 // copying. 736 EmitOMPAggregateAssign( 737 DestAddr, SrcAddr, OriginalType, 738 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 739 // Working with the single array element, so have to remap 740 // destination and source variables to corresponding array 741 // elements. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 744 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 745 (void)Remap.Privatize(); 746 EmitIgnoredExpr(Copy); 747 }); 748 } 749 } else { 750 // Remap pseudo source variable to private copy. 751 CodeGenFunction::OMPPrivateScope Remap(*this); 752 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 753 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 754 (void)Remap.Privatize(); 755 // Emit copying of the whole variable. 756 EmitIgnoredExpr(Copy); 757 } 758 } 759 760 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 761 OMPPrivateScope &PrivateScope) { 762 if (!HaveInsertPoint()) 763 return false; 764 bool DeviceConstTarget = 765 getLangOpts().OpenMPIsDevice && 766 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 767 bool FirstprivateIsLastprivate = false; 768 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 769 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 770 for (const auto *D : C->varlists()) 771 Lastprivates.try_emplace( 772 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 773 C->getKind()); 774 } 775 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 776 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 777 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 778 // Force emission of the firstprivate copy if the directive does not emit 779 // outlined function, like omp for, omp simd, omp distribute etc. 780 bool MustEmitFirstprivateCopy = 781 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 782 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 783 const auto *IRef = C->varlist_begin(); 784 const auto *InitsRef = C->inits().begin(); 785 for (const Expr *IInit : C->private_copies()) { 786 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 787 bool ThisFirstprivateIsLastprivate = 788 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 789 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 790 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 791 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 792 !FD->getType()->isReferenceType() && 793 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 794 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 795 ++IRef; 796 ++InitsRef; 797 continue; 798 } 799 // Do not emit copy for firstprivate constant variables in target regions, 800 // captured by reference. 801 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 802 FD && FD->getType()->isReferenceType() && 803 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 804 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 805 OrigVD); 806 ++IRef; 807 ++InitsRef; 808 continue; 809 } 810 FirstprivateIsLastprivate = 811 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 812 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 813 const auto *VDInit = 814 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 815 bool IsRegistered; 816 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 817 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 818 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 819 LValue OriginalLVal; 820 if (!FD) { 821 // Check if the firstprivate variable is just a constant value. 822 ConstantEmission CE = tryEmitAsConstant(&DRE); 823 if (CE && !CE.isReference()) { 824 // Constant value, no need to create a copy. 825 ++IRef; 826 ++InitsRef; 827 continue; 828 } 829 if (CE && CE.isReference()) { 830 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 831 } else { 832 assert(!CE && "Expected non-constant firstprivate."); 833 OriginalLVal = EmitLValue(&DRE); 834 } 835 } else { 836 OriginalLVal = EmitLValue(&DRE); 837 } 838 QualType Type = VD->getType(); 839 if (Type->isArrayType()) { 840 // Emit VarDecl with copy init for arrays. 841 // Get the address of the original variable captured in current 842 // captured region. 843 IsRegistered = PrivateScope.addPrivate( 844 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 845 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 846 const Expr *Init = VD->getInit(); 847 if (!isa<CXXConstructExpr>(Init) || 848 isTrivialInitializer(Init)) { 849 // Perform simple memcpy. 850 LValue Dest = 851 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 852 EmitAggregateAssign(Dest, OriginalLVal, Type); 853 } else { 854 EmitOMPAggregateAssign( 855 Emission.getAllocatedAddress(), 856 OriginalLVal.getAddress(*this), Type, 857 [this, VDInit, Init](Address DestElement, 858 Address SrcElement) { 859 // Clean up any temporaries needed by the 860 // initialization. 861 RunCleanupsScope InitScope(*this); 862 // Emit initialization for single element. 863 setAddrOfLocalVar(VDInit, SrcElement); 864 EmitAnyExprToMem(Init, DestElement, 865 Init->getType().getQualifiers(), 866 /*IsInitializer*/ false); 867 LocalDeclMap.erase(VDInit); 868 }); 869 } 870 EmitAutoVarCleanups(Emission); 871 return Emission.getAllocatedAddress(); 872 }); 873 } else { 874 Address OriginalAddr = OriginalLVal.getAddress(*this); 875 IsRegistered = 876 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 877 ThisFirstprivateIsLastprivate, 878 OrigVD, &Lastprivates, IRef]() { 879 // Emit private VarDecl with copy init. 880 // Remap temp VDInit variable to the address of the original 881 // variable (for proper handling of captured global variables). 882 setAddrOfLocalVar(VDInit, OriginalAddr); 883 EmitDecl(*VD); 884 LocalDeclMap.erase(VDInit); 885 if (ThisFirstprivateIsLastprivate && 886 Lastprivates[OrigVD->getCanonicalDecl()] == 887 OMPC_LASTPRIVATE_conditional) { 888 // Create/init special variable for lastprivate conditionals. 889 Address VDAddr = 890 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 891 *this, OrigVD); 892 llvm::Value *V = EmitLoadOfScalar( 893 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 894 AlignmentSource::Decl), 895 (*IRef)->getExprLoc()); 896 EmitStoreOfScalar(V, 897 MakeAddrLValue(VDAddr, (*IRef)->getType(), 898 AlignmentSource::Decl)); 899 LocalDeclMap.erase(VD); 900 setAddrOfLocalVar(VD, VDAddr); 901 return VDAddr; 902 } 903 return GetAddrOfLocalVar(VD); 904 }); 905 } 906 assert(IsRegistered && 907 "firstprivate var already registered as private"); 908 // Silence the warning about unused variable. 909 (void)IsRegistered; 910 } 911 ++IRef; 912 ++InitsRef; 913 } 914 } 915 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 916 } 917 918 void CodeGenFunction::EmitOMPPrivateClause( 919 const OMPExecutableDirective &D, 920 CodeGenFunction::OMPPrivateScope &PrivateScope) { 921 if (!HaveInsertPoint()) 922 return; 923 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 924 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 925 auto IRef = C->varlist_begin(); 926 for (const Expr *IInit : C->private_copies()) { 927 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 928 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 929 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 930 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 931 // Emit private VarDecl with copy init. 932 EmitDecl(*VD); 933 return GetAddrOfLocalVar(VD); 934 }); 935 assert(IsRegistered && "private var already registered as private"); 936 // Silence the warning about unused variable. 937 (void)IsRegistered; 938 } 939 ++IRef; 940 } 941 } 942 } 943 944 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 945 if (!HaveInsertPoint()) 946 return false; 947 // threadprivate_var1 = master_threadprivate_var1; 948 // operator=(threadprivate_var2, master_threadprivate_var2); 949 // ... 950 // __kmpc_barrier(&loc, global_tid); 951 llvm::DenseSet<const VarDecl *> CopiedVars; 952 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 953 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 954 auto IRef = C->varlist_begin(); 955 auto ISrcRef = C->source_exprs().begin(); 956 auto IDestRef = C->destination_exprs().begin(); 957 for (const Expr *AssignOp : C->assignment_ops()) { 958 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 959 QualType Type = VD->getType(); 960 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 961 // Get the address of the master variable. If we are emitting code with 962 // TLS support, the address is passed from the master as field in the 963 // captured declaration. 964 Address MasterAddr = Address::invalid(); 965 if (getLangOpts().OpenMPUseTLS && 966 getContext().getTargetInfo().isTLSSupported()) { 967 assert(CapturedStmtInfo->lookup(VD) && 968 "Copyin threadprivates should have been captured!"); 969 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 970 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 971 MasterAddr = EmitLValue(&DRE).getAddress(*this); 972 LocalDeclMap.erase(VD); 973 } else { 974 MasterAddr = 975 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 976 : CGM.GetAddrOfGlobal(VD), 977 getContext().getDeclAlign(VD)); 978 } 979 // Get the address of the threadprivate variable. 980 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 981 if (CopiedVars.size() == 1) { 982 // At first check if current thread is a master thread. If it is, no 983 // need to copy data. 984 CopyBegin = createBasicBlock("copyin.not.master"); 985 CopyEnd = createBasicBlock("copyin.not.master.end"); 986 Builder.CreateCondBr( 987 Builder.CreateICmpNE( 988 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 989 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 990 CGM.IntPtrTy)), 991 CopyBegin, CopyEnd); 992 EmitBlock(CopyBegin); 993 } 994 const auto *SrcVD = 995 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 996 const auto *DestVD = 997 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 998 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 999 } 1000 ++IRef; 1001 ++ISrcRef; 1002 ++IDestRef; 1003 } 1004 } 1005 if (CopyEnd) { 1006 // Exit out of copying procedure for non-master thread. 1007 EmitBlock(CopyEnd, /*IsFinished=*/true); 1008 return true; 1009 } 1010 return false; 1011 } 1012 1013 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1014 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1015 if (!HaveInsertPoint()) 1016 return false; 1017 bool HasAtLeastOneLastprivate = false; 1018 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1019 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1020 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1021 for (const Expr *C : LoopDirective->counters()) { 1022 SIMDLCVs.insert( 1023 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1024 } 1025 } 1026 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1027 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1028 HasAtLeastOneLastprivate = true; 1029 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1030 !getLangOpts().OpenMPSimd) 1031 break; 1032 const auto *IRef = C->varlist_begin(); 1033 const auto *IDestRef = C->destination_exprs().begin(); 1034 for (const Expr *IInit : C->private_copies()) { 1035 // Keep the address of the original variable for future update at the end 1036 // of the loop. 1037 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1038 // Taskloops do not require additional initialization, it is done in 1039 // runtime support library. 1040 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1041 const auto *DestVD = 1042 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1043 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1044 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1045 /*RefersToEnclosingVariableOrCapture=*/ 1046 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1047 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1048 return EmitLValue(&DRE).getAddress(*this); 1049 }); 1050 // Check if the variable is also a firstprivate: in this case IInit is 1051 // not generated. Initialization of this variable will happen in codegen 1052 // for 'firstprivate' clause. 1053 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1054 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1055 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1056 OrigVD]() { 1057 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1058 Address VDAddr = 1059 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1060 OrigVD); 1061 setAddrOfLocalVar(VD, VDAddr); 1062 return VDAddr; 1063 } 1064 // Emit private VarDecl with copy init. 1065 EmitDecl(*VD); 1066 return GetAddrOfLocalVar(VD); 1067 }); 1068 assert(IsRegistered && 1069 "lastprivate var already registered as private"); 1070 (void)IsRegistered; 1071 } 1072 } 1073 ++IRef; 1074 ++IDestRef; 1075 } 1076 } 1077 return HasAtLeastOneLastprivate; 1078 } 1079 1080 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1081 const OMPExecutableDirective &D, bool NoFinals, 1082 llvm::Value *IsLastIterCond) { 1083 if (!HaveInsertPoint()) 1084 return; 1085 // Emit following code: 1086 // if (<IsLastIterCond>) { 1087 // orig_var1 = private_orig_var1; 1088 // ... 1089 // orig_varn = private_orig_varn; 1090 // } 1091 llvm::BasicBlock *ThenBB = nullptr; 1092 llvm::BasicBlock *DoneBB = nullptr; 1093 if (IsLastIterCond) { 1094 // Emit implicit barrier if at least one lastprivate conditional is found 1095 // and this is not a simd mode. 1096 if (!getLangOpts().OpenMPSimd && 1097 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1098 [](const OMPLastprivateClause *C) { 1099 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1100 })) { 1101 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1102 OMPD_unknown, 1103 /*EmitChecks=*/false, 1104 /*ForceSimpleCall=*/true); 1105 } 1106 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1107 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1108 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1109 EmitBlock(ThenBB); 1110 } 1111 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1112 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1113 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1114 auto IC = LoopDirective->counters().begin(); 1115 for (const Expr *F : LoopDirective->finals()) { 1116 const auto *D = 1117 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1118 if (NoFinals) 1119 AlreadyEmittedVars.insert(D); 1120 else 1121 LoopCountersAndUpdates[D] = F; 1122 ++IC; 1123 } 1124 } 1125 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1126 auto IRef = C->varlist_begin(); 1127 auto ISrcRef = C->source_exprs().begin(); 1128 auto IDestRef = C->destination_exprs().begin(); 1129 for (const Expr *AssignOp : C->assignment_ops()) { 1130 const auto *PrivateVD = 1131 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1132 QualType Type = PrivateVD->getType(); 1133 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1134 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1135 // If lastprivate variable is a loop control variable for loop-based 1136 // directive, update its value before copyin back to original 1137 // variable. 1138 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1139 EmitIgnoredExpr(FinalExpr); 1140 const auto *SrcVD = 1141 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1142 const auto *DestVD = 1143 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1144 // Get the address of the private variable. 1145 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1146 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1147 PrivateAddr = 1148 Address(Builder.CreateLoad(PrivateAddr), 1149 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1150 // Store the last value to the private copy in the last iteration. 1151 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1152 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1153 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1154 (*IRef)->getExprLoc()); 1155 // Get the address of the original variable. 1156 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1157 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1158 } 1159 ++IRef; 1160 ++ISrcRef; 1161 ++IDestRef; 1162 } 1163 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1164 EmitIgnoredExpr(PostUpdate); 1165 } 1166 if (IsLastIterCond) 1167 EmitBlock(DoneBB, /*IsFinished=*/true); 1168 } 1169 1170 void CodeGenFunction::EmitOMPReductionClauseInit( 1171 const OMPExecutableDirective &D, 1172 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1173 if (!HaveInsertPoint()) 1174 return; 1175 SmallVector<const Expr *, 4> Shareds; 1176 SmallVector<const Expr *, 4> Privates; 1177 SmallVector<const Expr *, 4> ReductionOps; 1178 SmallVector<const Expr *, 4> LHSs; 1179 SmallVector<const Expr *, 4> RHSs; 1180 OMPTaskDataTy Data; 1181 SmallVector<const Expr *, 4> TaskLHSs; 1182 SmallVector<const Expr *, 4> TaskRHSs; 1183 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1184 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1185 continue; 1186 Shareds.append(C->varlist_begin(), C->varlist_end()); 1187 Privates.append(C->privates().begin(), C->privates().end()); 1188 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1189 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1190 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1191 if (C->getModifier() == OMPC_REDUCTION_task) { 1192 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1193 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1194 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1195 Data.ReductionOps.append(C->reduction_ops().begin(), 1196 C->reduction_ops().end()); 1197 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1198 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1199 } 1200 } 1201 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1202 unsigned Count = 0; 1203 auto *ILHS = LHSs.begin(); 1204 auto *IRHS = RHSs.begin(); 1205 auto *IPriv = Privates.begin(); 1206 for (const Expr *IRef : Shareds) { 1207 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1208 // Emit private VarDecl with reduction init. 1209 RedCG.emitSharedOrigLValue(*this, Count); 1210 RedCG.emitAggregateType(*this, Count); 1211 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1212 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1213 RedCG.getSharedLValue(Count), 1214 [&Emission](CodeGenFunction &CGF) { 1215 CGF.EmitAutoVarInit(Emission); 1216 return true; 1217 }); 1218 EmitAutoVarCleanups(Emission); 1219 Address BaseAddr = RedCG.adjustPrivateAddress( 1220 *this, Count, Emission.getAllocatedAddress()); 1221 bool IsRegistered = PrivateScope.addPrivate( 1222 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1223 assert(IsRegistered && "private var already registered as private"); 1224 // Silence the warning about unused variable. 1225 (void)IsRegistered; 1226 1227 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1228 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1229 QualType Type = PrivateVD->getType(); 1230 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1231 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1232 // Store the address of the original variable associated with the LHS 1233 // implicit variable. 1234 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1235 return RedCG.getSharedLValue(Count).getAddress(*this); 1236 }); 1237 PrivateScope.addPrivate( 1238 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1239 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1240 isa<ArraySubscriptExpr>(IRef)) { 1241 // Store the address of the original variable associated with the LHS 1242 // implicit variable. 1243 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1244 return RedCG.getSharedLValue(Count).getAddress(*this); 1245 }); 1246 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1247 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1248 ConvertTypeForMem(RHSVD->getType()), 1249 "rhs.begin"); 1250 }); 1251 } else { 1252 QualType Type = PrivateVD->getType(); 1253 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1254 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 if (IsArray) { 1258 OriginalAddr = Builder.CreateElementBitCast( 1259 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1260 } 1261 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1262 PrivateScope.addPrivate( 1263 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1264 return IsArray 1265 ? Builder.CreateElementBitCast( 1266 GetAddrOfLocalVar(PrivateVD), 1267 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1268 : GetAddrOfLocalVar(PrivateVD); 1269 }); 1270 } 1271 ++ILHS; 1272 ++IRHS; 1273 ++IPriv; 1274 ++Count; 1275 } 1276 if (!Data.ReductionVars.empty()) { 1277 Data.IsReductionWithTaskMod = true; 1278 Data.IsWorksharingReduction = 1279 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1280 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1281 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1282 const Expr *TaskRedRef = nullptr; 1283 switch (D.getDirectiveKind()) { 1284 case OMPD_parallel: 1285 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1286 break; 1287 case OMPD_for: 1288 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1289 break; 1290 case OMPD_sections: 1291 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1292 break; 1293 case OMPD_parallel_for: 1294 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1295 break; 1296 case OMPD_parallel_master: 1297 TaskRedRef = 1298 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1299 break; 1300 case OMPD_parallel_sections: 1301 TaskRedRef = 1302 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1303 break; 1304 case OMPD_target_parallel: 1305 TaskRedRef = 1306 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1307 break; 1308 case OMPD_target_parallel_for: 1309 TaskRedRef = 1310 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_distribute_parallel_for: 1313 TaskRedRef = 1314 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_teams_distribute_parallel_for: 1317 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1318 .getTaskReductionRefExpr(); 1319 break; 1320 case OMPD_target_teams_distribute_parallel_for: 1321 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1322 .getTaskReductionRefExpr(); 1323 break; 1324 case OMPD_simd: 1325 case OMPD_for_simd: 1326 case OMPD_section: 1327 case OMPD_single: 1328 case OMPD_master: 1329 case OMPD_critical: 1330 case OMPD_parallel_for_simd: 1331 case OMPD_task: 1332 case OMPD_taskyield: 1333 case OMPD_barrier: 1334 case OMPD_taskwait: 1335 case OMPD_taskgroup: 1336 case OMPD_flush: 1337 case OMPD_depobj: 1338 case OMPD_scan: 1339 case OMPD_ordered: 1340 case OMPD_atomic: 1341 case OMPD_teams: 1342 case OMPD_target: 1343 case OMPD_cancellation_point: 1344 case OMPD_cancel: 1345 case OMPD_target_data: 1346 case OMPD_target_enter_data: 1347 case OMPD_target_exit_data: 1348 case OMPD_taskloop: 1349 case OMPD_taskloop_simd: 1350 case OMPD_master_taskloop: 1351 case OMPD_master_taskloop_simd: 1352 case OMPD_parallel_master_taskloop: 1353 case OMPD_parallel_master_taskloop_simd: 1354 case OMPD_distribute: 1355 case OMPD_target_update: 1356 case OMPD_distribute_parallel_for_simd: 1357 case OMPD_distribute_simd: 1358 case OMPD_target_parallel_for_simd: 1359 case OMPD_target_simd: 1360 case OMPD_teams_distribute: 1361 case OMPD_teams_distribute_simd: 1362 case OMPD_teams_distribute_parallel_for_simd: 1363 case OMPD_target_teams: 1364 case OMPD_target_teams_distribute: 1365 case OMPD_target_teams_distribute_parallel_for_simd: 1366 case OMPD_target_teams_distribute_simd: 1367 case OMPD_declare_target: 1368 case OMPD_end_declare_target: 1369 case OMPD_threadprivate: 1370 case OMPD_allocate: 1371 case OMPD_declare_reduction: 1372 case OMPD_declare_mapper: 1373 case OMPD_declare_simd: 1374 case OMPD_requires: 1375 case OMPD_declare_variant: 1376 case OMPD_begin_declare_variant: 1377 case OMPD_end_declare_variant: 1378 case OMPD_unknown: 1379 llvm_unreachable("Enexpected directive with task reductions."); 1380 } 1381 1382 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1383 EmitVarDecl(*VD); 1384 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1385 /*Volatile=*/false, TaskRedRef->getType()); 1386 } 1387 } 1388 1389 void CodeGenFunction::EmitOMPReductionClauseFinal( 1390 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1391 if (!HaveInsertPoint()) 1392 return; 1393 llvm::SmallVector<const Expr *, 8> Privates; 1394 llvm::SmallVector<const Expr *, 8> LHSExprs; 1395 llvm::SmallVector<const Expr *, 8> RHSExprs; 1396 llvm::SmallVector<const Expr *, 8> ReductionOps; 1397 bool HasAtLeastOneReduction = false; 1398 bool IsReductionWithTaskMod = false; 1399 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1400 // Do not emit for inscan reductions. 1401 if (C->getModifier() == OMPC_REDUCTION_inscan) 1402 continue; 1403 HasAtLeastOneReduction = true; 1404 Privates.append(C->privates().begin(), C->privates().end()); 1405 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1406 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1407 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1408 IsReductionWithTaskMod = 1409 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1410 } 1411 if (HasAtLeastOneReduction) { 1412 if (IsReductionWithTaskMod) { 1413 CGM.getOpenMPRuntime().emitTaskReductionFini( 1414 *this, D.getBeginLoc(), 1415 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1416 } 1417 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1418 isOpenMPParallelDirective(D.getDirectiveKind()) || 1419 ReductionKind == OMPD_simd; 1420 bool SimpleReduction = ReductionKind == OMPD_simd; 1421 // Emit nowait reduction if nowait clause is present or directive is a 1422 // parallel directive (it always has implicit barrier). 1423 CGM.getOpenMPRuntime().emitReduction( 1424 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1425 {WithNowait, SimpleReduction, ReductionKind}); 1426 } 1427 } 1428 1429 static void emitPostUpdateForReductionClause( 1430 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1431 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1432 if (!CGF.HaveInsertPoint()) 1433 return; 1434 llvm::BasicBlock *DoneBB = nullptr; 1435 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1436 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1437 if (!DoneBB) { 1438 if (llvm::Value *Cond = CondGen(CGF)) { 1439 // If the first post-update expression is found, emit conditional 1440 // block if it was requested. 1441 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1442 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1443 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1444 CGF.EmitBlock(ThenBB); 1445 } 1446 } 1447 CGF.EmitIgnoredExpr(PostUpdate); 1448 } 1449 } 1450 if (DoneBB) 1451 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1452 } 1453 1454 namespace { 1455 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1456 /// parallel function. This is necessary for combined constructs such as 1457 /// 'distribute parallel for' 1458 typedef llvm::function_ref<void(CodeGenFunction &, 1459 const OMPExecutableDirective &, 1460 llvm::SmallVectorImpl<llvm::Value *> &)> 1461 CodeGenBoundParametersTy; 1462 } // anonymous namespace 1463 1464 static void 1465 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1466 const OMPExecutableDirective &S) { 1467 if (CGF.getLangOpts().OpenMP < 50) 1468 return; 1469 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1470 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1471 for (const Expr *Ref : C->varlists()) { 1472 if (!Ref->getType()->isScalarType()) 1473 continue; 1474 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1475 if (!DRE) 1476 continue; 1477 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1478 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1479 } 1480 } 1481 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1482 for (const Expr *Ref : C->varlists()) { 1483 if (!Ref->getType()->isScalarType()) 1484 continue; 1485 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1486 if (!DRE) 1487 continue; 1488 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1489 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1490 } 1491 } 1492 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1493 for (const Expr *Ref : C->varlists()) { 1494 if (!Ref->getType()->isScalarType()) 1495 continue; 1496 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1497 if (!DRE) 1498 continue; 1499 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1500 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1501 } 1502 } 1503 // Privates should ne analyzed since they are not captured at all. 1504 // Task reductions may be skipped - tasks are ignored. 1505 // Firstprivates do not return value but may be passed by reference - no need 1506 // to check for updated lastprivate conditional. 1507 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1508 for (const Expr *Ref : C->varlists()) { 1509 if (!Ref->getType()->isScalarType()) 1510 continue; 1511 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1512 if (!DRE) 1513 continue; 1514 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1515 } 1516 } 1517 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1518 CGF, S, PrivateDecls); 1519 } 1520 1521 static void emitCommonOMPParallelDirective( 1522 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1523 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1524 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1525 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1526 llvm::Function *OutlinedFn = 1527 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1528 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1529 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1530 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1531 llvm::Value *NumThreads = 1532 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1533 /*IgnoreResultAssign=*/true); 1534 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1535 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1536 } 1537 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1538 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1539 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1540 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1541 } 1542 const Expr *IfCond = nullptr; 1543 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1544 if (C->getNameModifier() == OMPD_unknown || 1545 C->getNameModifier() == OMPD_parallel) { 1546 IfCond = C->getCondition(); 1547 break; 1548 } 1549 } 1550 1551 OMPParallelScope Scope(CGF, S); 1552 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1553 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1554 // lower and upper bounds with the pragma 'for' chunking mechanism. 1555 // The following lambda takes care of appending the lower and upper bound 1556 // parameters when necessary 1557 CodeGenBoundParameters(CGF, S, CapturedVars); 1558 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1559 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1560 CapturedVars, IfCond); 1561 } 1562 1563 static void emitEmptyBoundParameters(CodeGenFunction &, 1564 const OMPExecutableDirective &, 1565 llvm::SmallVectorImpl<llvm::Value *> &) {} 1566 1567 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1568 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1569 // Check if we have any if clause associated with the directive. 1570 llvm::Value *IfCond = nullptr; 1571 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1572 IfCond = EmitScalarExpr(C->getCondition(), 1573 /*IgnoreResultAssign=*/true); 1574 1575 llvm::Value *NumThreads = nullptr; 1576 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1577 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1578 /*IgnoreResultAssign=*/true); 1579 1580 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1581 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1582 ProcBind = ProcBindClause->getProcBindKind(); 1583 1584 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1585 1586 // The cleanup callback that finalizes all variabels at the given location, 1587 // thus calls destructors etc. 1588 auto FiniCB = [this](InsertPointTy IP) { 1589 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1590 }; 1591 1592 // Privatization callback that performs appropriate action for 1593 // shared/private/firstprivate/lastprivate/copyin/... variables. 1594 // 1595 // TODO: This defaults to shared right now. 1596 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1597 llvm::Value &Val, llvm::Value *&ReplVal) { 1598 // The next line is appropriate only for variables (Val) with the 1599 // data-sharing attribute "shared". 1600 ReplVal = &Val; 1601 1602 return CodeGenIP; 1603 }; 1604 1605 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1606 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1607 1608 auto BodyGenCB = [ParallelRegionBodyStmt, 1609 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1610 llvm::BasicBlock &ContinuationBB) { 1611 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1612 ContinuationBB); 1613 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1614 CodeGenIP, ContinuationBB); 1615 }; 1616 1617 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1618 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1619 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1620 FiniCB, IfCond, NumThreads, 1621 ProcBind, S.hasCancel())); 1622 return; 1623 } 1624 1625 // Emit parallel region as a standalone region. 1626 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1627 Action.Enter(CGF); 1628 OMPPrivateScope PrivateScope(CGF); 1629 bool Copyins = CGF.EmitOMPCopyinClause(S); 1630 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1631 if (Copyins) { 1632 // Emit implicit barrier to synchronize threads and avoid data races on 1633 // propagation master's thread values of threadprivate variables to local 1634 // instances of that variables of all other implicit threads. 1635 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1636 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1637 /*ForceSimpleCall=*/true); 1638 } 1639 CGF.EmitOMPPrivateClause(S, PrivateScope); 1640 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1641 (void)PrivateScope.Privatize(); 1642 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1643 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1644 }; 1645 { 1646 auto LPCRegion = 1647 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1648 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1649 emitEmptyBoundParameters); 1650 emitPostUpdateForReductionClause(*this, S, 1651 [](CodeGenFunction &) { return nullptr; }); 1652 } 1653 // Check for outer lastprivate conditional update. 1654 checkForLastprivateConditionalUpdate(*this, S); 1655 } 1656 1657 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1658 int MaxLevel, int Level = 0) { 1659 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1660 const Stmt *SimplifiedS = S->IgnoreContainers(); 1661 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1662 PrettyStackTraceLoc CrashInfo( 1663 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1664 "LLVM IR generation of compound statement ('{}')"); 1665 1666 // Keep track of the current cleanup stack depth, including debug scopes. 1667 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1668 for (const Stmt *CurStmt : CS->body()) 1669 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1670 return; 1671 } 1672 if (SimplifiedS == NextLoop) { 1673 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1674 S = For->getBody(); 1675 } else { 1676 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1677 "Expected canonical for loop or range-based for loop."); 1678 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1679 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1680 S = CXXFor->getBody(); 1681 } 1682 if (Level + 1 < MaxLevel) { 1683 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1684 S, /*TryImperfectlyNestedLoops=*/true); 1685 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1686 return; 1687 } 1688 } 1689 CGF.EmitStmt(S); 1690 } 1691 1692 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1693 JumpDest LoopExit) { 1694 RunCleanupsScope BodyScope(*this); 1695 // Update counters values on current iteration. 1696 for (const Expr *UE : D.updates()) 1697 EmitIgnoredExpr(UE); 1698 // Update the linear variables. 1699 // In distribute directives only loop counters may be marked as linear, no 1700 // need to generate the code for them. 1701 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1702 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1703 for (const Expr *UE : C->updates()) 1704 EmitIgnoredExpr(UE); 1705 } 1706 } 1707 1708 // On a continue in the body, jump to the end. 1709 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1710 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1711 for (const Expr *E : D.finals_conditions()) { 1712 if (!E) 1713 continue; 1714 // Check that loop counter in non-rectangular nest fits into the iteration 1715 // space. 1716 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1717 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1718 getProfileCount(D.getBody())); 1719 EmitBlock(NextBB); 1720 } 1721 1722 OMPPrivateScope InscanScope(*this); 1723 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1724 bool IsInscanRegion = InscanScope.Privatize(); 1725 if (IsInscanRegion) { 1726 // Need to remember the block before and after scan directive 1727 // to dispatch them correctly depending on the clause used in 1728 // this directive, inclusive or exclusive. For inclusive scan the natural 1729 // order of the blocks is used, for exclusive clause the blocks must be 1730 // executed in reverse order. 1731 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1732 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1733 // No need to allocate inscan exit block, in simd mode it is selected in the 1734 // codegen for the scan directive. 1735 if (D.getDirectiveKind() != OMPD_simd && 1736 (!getLangOpts().OpenMPSimd || 1737 isOpenMPSimdDirective(D.getDirectiveKind()))) { 1738 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1739 } 1740 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1741 EmitBranch(OMPScanDispatch); 1742 EmitBlock(OMPBeforeScanBlock); 1743 } 1744 1745 // Emit loop variables for C++ range loops. 1746 const Stmt *Body = 1747 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1748 // Emit loop body. 1749 emitBody(*this, Body, 1750 OMPLoopDirective::tryToFindNextInnerLoop( 1751 Body, /*TryImperfectlyNestedLoops=*/true), 1752 D.getCollapsedNumber()); 1753 1754 // Jump to the dispatcher at the end of the loop body. 1755 if (IsInscanRegion) 1756 EmitBranch(OMPScanExitBlock); 1757 1758 // The end (updates/cleanups). 1759 EmitBlock(Continue.getBlock()); 1760 BreakContinueStack.pop_back(); 1761 } 1762 1763 void CodeGenFunction::EmitOMPInnerLoop( 1764 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 1765 const Expr *IncExpr, 1766 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1767 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1768 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1769 1770 // Start the loop with a block that tests the condition. 1771 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1772 EmitBlock(CondBlock); 1773 const SourceRange R = S.getSourceRange(); 1774 1775 // If attributes are attached, push to the basic block with them. 1776 const auto &OMPED = cast<OMPExecutableDirective>(S); 1777 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 1778 const Stmt *SS = ICS->getCapturedStmt(); 1779 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 1780 if (AS) 1781 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 1782 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 1783 SourceLocToDebugLoc(R.getEnd())); 1784 else 1785 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1786 SourceLocToDebugLoc(R.getEnd())); 1787 1788 // If there are any cleanups between here and the loop-exit scope, 1789 // create a block to stage a loop exit along. 1790 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1791 if (RequiresCleanup) 1792 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1793 1794 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1795 1796 // Emit condition. 1797 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1798 if (ExitBlock != LoopExit.getBlock()) { 1799 EmitBlock(ExitBlock); 1800 EmitBranchThroughCleanup(LoopExit); 1801 } 1802 1803 EmitBlock(LoopBody); 1804 incrementProfileCounter(&S); 1805 1806 // Create a block for the increment. 1807 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1808 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1809 1810 BodyGen(*this); 1811 1812 // Emit "IV = IV + 1" and a back-edge to the condition block. 1813 EmitBlock(Continue.getBlock()); 1814 EmitIgnoredExpr(IncExpr); 1815 PostIncGen(*this); 1816 BreakContinueStack.pop_back(); 1817 EmitBranch(CondBlock); 1818 LoopStack.pop(); 1819 // Emit the fall-through block. 1820 EmitBlock(LoopExit.getBlock()); 1821 } 1822 1823 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1824 if (!HaveInsertPoint()) 1825 return false; 1826 // Emit inits for the linear variables. 1827 bool HasLinears = false; 1828 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1829 for (const Expr *Init : C->inits()) { 1830 HasLinears = true; 1831 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1832 if (const auto *Ref = 1833 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1834 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1835 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1836 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1837 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1838 VD->getInit()->getType(), VK_LValue, 1839 VD->getInit()->getExprLoc()); 1840 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1841 VD->getType()), 1842 /*capturedByInit=*/false); 1843 EmitAutoVarCleanups(Emission); 1844 } else { 1845 EmitVarDecl(*VD); 1846 } 1847 } 1848 // Emit the linear steps for the linear clauses. 1849 // If a step is not constant, it is pre-calculated before the loop. 1850 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1851 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1852 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1853 // Emit calculation of the linear step. 1854 EmitIgnoredExpr(CS); 1855 } 1856 } 1857 return HasLinears; 1858 } 1859 1860 void CodeGenFunction::EmitOMPLinearClauseFinal( 1861 const OMPLoopDirective &D, 1862 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1863 if (!HaveInsertPoint()) 1864 return; 1865 llvm::BasicBlock *DoneBB = nullptr; 1866 // Emit the final values of the linear variables. 1867 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1868 auto IC = C->varlist_begin(); 1869 for (const Expr *F : C->finals()) { 1870 if (!DoneBB) { 1871 if (llvm::Value *Cond = CondGen(*this)) { 1872 // If the first post-update expression is found, emit conditional 1873 // block if it was requested. 1874 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1875 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1876 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1877 EmitBlock(ThenBB); 1878 } 1879 } 1880 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1881 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1882 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1883 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1884 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1885 CodeGenFunction::OMPPrivateScope VarScope(*this); 1886 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1887 (void)VarScope.Privatize(); 1888 EmitIgnoredExpr(F); 1889 ++IC; 1890 } 1891 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1892 EmitIgnoredExpr(PostUpdate); 1893 } 1894 if (DoneBB) 1895 EmitBlock(DoneBB, /*IsFinished=*/true); 1896 } 1897 1898 static void emitAlignedClause(CodeGenFunction &CGF, 1899 const OMPExecutableDirective &D) { 1900 if (!CGF.HaveInsertPoint()) 1901 return; 1902 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1903 llvm::APInt ClauseAlignment(64, 0); 1904 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1905 auto *AlignmentCI = 1906 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1907 ClauseAlignment = AlignmentCI->getValue(); 1908 } 1909 for (const Expr *E : Clause->varlists()) { 1910 llvm::APInt Alignment(ClauseAlignment); 1911 if (Alignment == 0) { 1912 // OpenMP [2.8.1, Description] 1913 // If no optional parameter is specified, implementation-defined default 1914 // alignments for SIMD instructions on the target platforms are assumed. 1915 Alignment = 1916 CGF.getContext() 1917 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1918 E->getType()->getPointeeType())) 1919 .getQuantity(); 1920 } 1921 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1922 "alignment is not power of 2"); 1923 if (Alignment != 0) { 1924 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1925 CGF.emitAlignmentAssumption( 1926 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1927 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1928 } 1929 } 1930 } 1931 } 1932 1933 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1934 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1935 if (!HaveInsertPoint()) 1936 return; 1937 auto I = S.private_counters().begin(); 1938 for (const Expr *E : S.counters()) { 1939 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1940 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1941 // Emit var without initialization. 1942 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1943 EmitAutoVarCleanups(VarEmission); 1944 LocalDeclMap.erase(PrivateVD); 1945 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1946 return VarEmission.getAllocatedAddress(); 1947 }); 1948 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1949 VD->hasGlobalStorage()) { 1950 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1951 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1952 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1953 E->getType(), VK_LValue, E->getExprLoc()); 1954 return EmitLValue(&DRE).getAddress(*this); 1955 }); 1956 } else { 1957 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1958 return VarEmission.getAllocatedAddress(); 1959 }); 1960 } 1961 ++I; 1962 } 1963 // Privatize extra loop counters used in loops for ordered(n) clauses. 1964 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1965 if (!C->getNumForLoops()) 1966 continue; 1967 for (unsigned I = S.getCollapsedNumber(), 1968 E = C->getLoopNumIterations().size(); 1969 I < E; ++I) { 1970 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1971 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1972 // Override only those variables that can be captured to avoid re-emission 1973 // of the variables declared within the loops. 1974 if (DRE->refersToEnclosingVariableOrCapture()) { 1975 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1976 return CreateMemTemp(DRE->getType(), VD->getName()); 1977 }); 1978 } 1979 } 1980 } 1981 } 1982 1983 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1984 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1985 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1986 if (!CGF.HaveInsertPoint()) 1987 return; 1988 { 1989 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1990 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1991 (void)PreCondScope.Privatize(); 1992 // Get initial values of real counters. 1993 for (const Expr *I : S.inits()) { 1994 CGF.EmitIgnoredExpr(I); 1995 } 1996 } 1997 // Create temp loop control variables with their init values to support 1998 // non-rectangular loops. 1999 CodeGenFunction::OMPMapVars PreCondVars; 2000 for (const Expr * E: S.dependent_counters()) { 2001 if (!E) 2002 continue; 2003 assert(!E->getType().getNonReferenceType()->isRecordType() && 2004 "dependent counter must not be an iterator."); 2005 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2006 Address CounterAddr = 2007 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2008 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2009 } 2010 (void)PreCondVars.apply(CGF); 2011 for (const Expr *E : S.dependent_inits()) { 2012 if (!E) 2013 continue; 2014 CGF.EmitIgnoredExpr(E); 2015 } 2016 // Check that loop is executed at least one time. 2017 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2018 PreCondVars.restore(CGF); 2019 } 2020 2021 void CodeGenFunction::EmitOMPLinearClause( 2022 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2023 if (!HaveInsertPoint()) 2024 return; 2025 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2026 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2027 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2028 for (const Expr *C : LoopDirective->counters()) { 2029 SIMDLCVs.insert( 2030 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2031 } 2032 } 2033 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2034 auto CurPrivate = C->privates().begin(); 2035 for (const Expr *E : C->varlists()) { 2036 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2037 const auto *PrivateVD = 2038 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2039 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2040 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2041 // Emit private VarDecl with copy init. 2042 EmitVarDecl(*PrivateVD); 2043 return GetAddrOfLocalVar(PrivateVD); 2044 }); 2045 assert(IsRegistered && "linear var already registered as private"); 2046 // Silence the warning about unused variable. 2047 (void)IsRegistered; 2048 } else { 2049 EmitVarDecl(*PrivateVD); 2050 } 2051 ++CurPrivate; 2052 } 2053 } 2054 } 2055 2056 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2057 const OMPExecutableDirective &D, 2058 bool IsMonotonic) { 2059 if (!CGF.HaveInsertPoint()) 2060 return; 2061 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2062 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2063 /*ignoreResult=*/true); 2064 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2065 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2066 // In presence of finite 'safelen', it may be unsafe to mark all 2067 // the memory instructions parallel, because loop-carried 2068 // dependences of 'safelen' iterations are possible. 2069 if (!IsMonotonic) 2070 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2071 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2072 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2073 /*ignoreResult=*/true); 2074 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2075 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2076 // In presence of finite 'safelen', it may be unsafe to mark all 2077 // the memory instructions parallel, because loop-carried 2078 // dependences of 'safelen' iterations are possible. 2079 CGF.LoopStack.setParallel(/*Enable=*/false); 2080 } 2081 } 2082 2083 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2084 bool IsMonotonic) { 2085 // Walk clauses and process safelen/lastprivate. 2086 LoopStack.setParallel(!IsMonotonic); 2087 LoopStack.setVectorizeEnable(); 2088 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2089 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2090 if (C->getKind() == OMPC_ORDER_concurrent) 2091 LoopStack.setParallel(/*Enable=*/true); 2092 if ((D.getDirectiveKind() == OMPD_simd || 2093 (getLangOpts().OpenMPSimd && 2094 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2095 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2096 [](const OMPReductionClause *C) { 2097 return C->getModifier() == OMPC_REDUCTION_inscan; 2098 })) 2099 // Disable parallel access in case of prefix sum. 2100 LoopStack.setParallel(/*Enable=*/false); 2101 } 2102 2103 void CodeGenFunction::EmitOMPSimdFinal( 2104 const OMPLoopDirective &D, 2105 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2106 if (!HaveInsertPoint()) 2107 return; 2108 llvm::BasicBlock *DoneBB = nullptr; 2109 auto IC = D.counters().begin(); 2110 auto IPC = D.private_counters().begin(); 2111 for (const Expr *F : D.finals()) { 2112 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2113 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2114 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2115 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2116 OrigVD->hasGlobalStorage() || CED) { 2117 if (!DoneBB) { 2118 if (llvm::Value *Cond = CondGen(*this)) { 2119 // If the first post-update expression is found, emit conditional 2120 // block if it was requested. 2121 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2122 DoneBB = createBasicBlock(".omp.final.done"); 2123 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2124 EmitBlock(ThenBB); 2125 } 2126 } 2127 Address OrigAddr = Address::invalid(); 2128 if (CED) { 2129 OrigAddr = 2130 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2131 } else { 2132 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2133 /*RefersToEnclosingVariableOrCapture=*/false, 2134 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2135 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2136 } 2137 OMPPrivateScope VarScope(*this); 2138 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2139 (void)VarScope.Privatize(); 2140 EmitIgnoredExpr(F); 2141 } 2142 ++IC; 2143 ++IPC; 2144 } 2145 if (DoneBB) 2146 EmitBlock(DoneBB, /*IsFinished=*/true); 2147 } 2148 2149 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2150 const OMPLoopDirective &S, 2151 CodeGenFunction::JumpDest LoopExit) { 2152 CGF.EmitOMPLoopBody(S, LoopExit); 2153 CGF.EmitStopPoint(&S); 2154 } 2155 2156 /// Emit a helper variable and return corresponding lvalue. 2157 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2158 const DeclRefExpr *Helper) { 2159 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2160 CGF.EmitVarDecl(*VDecl); 2161 return CGF.EmitLValue(Helper); 2162 } 2163 2164 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2165 const RegionCodeGenTy &SimdInitGen, 2166 const RegionCodeGenTy &BodyCodeGen) { 2167 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2168 PrePostActionTy &) { 2169 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2170 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2171 SimdInitGen(CGF); 2172 2173 BodyCodeGen(CGF); 2174 }; 2175 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2176 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2177 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2178 2179 BodyCodeGen(CGF); 2180 }; 2181 const Expr *IfCond = nullptr; 2182 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2183 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2184 if (CGF.getLangOpts().OpenMP >= 50 && 2185 (C->getNameModifier() == OMPD_unknown || 2186 C->getNameModifier() == OMPD_simd)) { 2187 IfCond = C->getCondition(); 2188 break; 2189 } 2190 } 2191 } 2192 if (IfCond) { 2193 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2194 } else { 2195 RegionCodeGenTy ThenRCG(ThenGen); 2196 ThenRCG(CGF); 2197 } 2198 } 2199 2200 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2201 PrePostActionTy &Action) { 2202 Action.Enter(CGF); 2203 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2204 "Expected simd directive"); 2205 OMPLoopScope PreInitScope(CGF, S); 2206 // if (PreCond) { 2207 // for (IV in 0..LastIteration) BODY; 2208 // <Final counter/linear vars updates>; 2209 // } 2210 // 2211 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2212 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2213 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2214 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2215 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2216 } 2217 2218 // Emit: if (PreCond) - begin. 2219 // If the condition constant folds and can be elided, avoid emitting the 2220 // whole loop. 2221 bool CondConstant; 2222 llvm::BasicBlock *ContBlock = nullptr; 2223 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2224 if (!CondConstant) 2225 return; 2226 } else { 2227 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2228 ContBlock = CGF.createBasicBlock("simd.if.end"); 2229 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2230 CGF.getProfileCount(&S)); 2231 CGF.EmitBlock(ThenBlock); 2232 CGF.incrementProfileCounter(&S); 2233 } 2234 2235 // Emit the loop iteration variable. 2236 const Expr *IVExpr = S.getIterationVariable(); 2237 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2238 CGF.EmitVarDecl(*IVDecl); 2239 CGF.EmitIgnoredExpr(S.getInit()); 2240 2241 // Emit the iterations count variable. 2242 // If it is not a variable, Sema decided to calculate iterations count on 2243 // each iteration (e.g., it is foldable into a constant). 2244 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2245 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2246 // Emit calculation of the iterations count. 2247 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2248 } 2249 2250 emitAlignedClause(CGF, S); 2251 (void)CGF.EmitOMPLinearClauseInit(S); 2252 { 2253 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2254 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2255 CGF.EmitOMPLinearClause(S, LoopScope); 2256 CGF.EmitOMPPrivateClause(S, LoopScope); 2257 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2258 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2259 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2260 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2261 (void)LoopScope.Privatize(); 2262 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2263 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2264 2265 emitCommonSimdLoop( 2266 CGF, S, 2267 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2268 CGF.EmitOMPSimdInit(S); 2269 }, 2270 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2271 CGF.EmitOMPInnerLoop( 2272 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2273 [&S](CodeGenFunction &CGF) { 2274 emitOMPLoopBodyWithStopPoint(CGF, S, 2275 CodeGenFunction::JumpDest()); 2276 }, 2277 [](CodeGenFunction &) {}); 2278 }); 2279 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2280 // Emit final copy of the lastprivate variables at the end of loops. 2281 if (HasLastprivateClause) 2282 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2283 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2284 emitPostUpdateForReductionClause(CGF, S, 2285 [](CodeGenFunction &) { return nullptr; }); 2286 } 2287 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2288 // Emit: if (PreCond) - end. 2289 if (ContBlock) { 2290 CGF.EmitBranch(ContBlock); 2291 CGF.EmitBlock(ContBlock, true); 2292 } 2293 } 2294 2295 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2296 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2297 OMPFirstScanLoop = true; 2298 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2299 emitOMPSimdRegion(CGF, S, Action); 2300 }; 2301 { 2302 auto LPCRegion = 2303 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2304 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2305 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2306 } 2307 // Check for outer lastprivate conditional update. 2308 checkForLastprivateConditionalUpdate(*this, S); 2309 } 2310 2311 void CodeGenFunction::EmitOMPOuterLoop( 2312 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2313 CodeGenFunction::OMPPrivateScope &LoopScope, 2314 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2315 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2316 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2317 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2318 2319 const Expr *IVExpr = S.getIterationVariable(); 2320 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2321 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2322 2323 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2324 2325 // Start the loop with a block that tests the condition. 2326 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2327 EmitBlock(CondBlock); 2328 const SourceRange R = S.getSourceRange(); 2329 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2330 SourceLocToDebugLoc(R.getEnd())); 2331 2332 llvm::Value *BoolCondVal = nullptr; 2333 if (!DynamicOrOrdered) { 2334 // UB = min(UB, GlobalUB) or 2335 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2336 // 'distribute parallel for') 2337 EmitIgnoredExpr(LoopArgs.EUB); 2338 // IV = LB 2339 EmitIgnoredExpr(LoopArgs.Init); 2340 // IV < UB 2341 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2342 } else { 2343 BoolCondVal = 2344 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2345 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2346 } 2347 2348 // If there are any cleanups between here and the loop-exit scope, 2349 // create a block to stage a loop exit along. 2350 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2351 if (LoopScope.requiresCleanups()) 2352 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2353 2354 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2355 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2356 if (ExitBlock != LoopExit.getBlock()) { 2357 EmitBlock(ExitBlock); 2358 EmitBranchThroughCleanup(LoopExit); 2359 } 2360 EmitBlock(LoopBody); 2361 2362 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2363 // LB for loop condition and emitted it above). 2364 if (DynamicOrOrdered) 2365 EmitIgnoredExpr(LoopArgs.Init); 2366 2367 // Create a block for the increment. 2368 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2369 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2370 2371 emitCommonSimdLoop( 2372 *this, S, 2373 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2374 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2375 // with dynamic/guided scheduling and without ordered clause. 2376 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2377 CGF.LoopStack.setParallel(!IsMonotonic); 2378 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2379 if (C->getKind() == OMPC_ORDER_concurrent) 2380 CGF.LoopStack.setParallel(/*Enable=*/true); 2381 } else { 2382 CGF.EmitOMPSimdInit(S, IsMonotonic); 2383 } 2384 }, 2385 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2386 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2387 SourceLocation Loc = S.getBeginLoc(); 2388 // when 'distribute' is not combined with a 'for': 2389 // while (idx <= UB) { BODY; ++idx; } 2390 // when 'distribute' is combined with a 'for' 2391 // (e.g. 'distribute parallel for') 2392 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2393 CGF.EmitOMPInnerLoop( 2394 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2395 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2396 CodeGenLoop(CGF, S, LoopExit); 2397 }, 2398 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2399 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2400 }); 2401 }); 2402 2403 EmitBlock(Continue.getBlock()); 2404 BreakContinueStack.pop_back(); 2405 if (!DynamicOrOrdered) { 2406 // Emit "LB = LB + Stride", "UB = UB + Stride". 2407 EmitIgnoredExpr(LoopArgs.NextLB); 2408 EmitIgnoredExpr(LoopArgs.NextUB); 2409 } 2410 2411 EmitBranch(CondBlock); 2412 LoopStack.pop(); 2413 // Emit the fall-through block. 2414 EmitBlock(LoopExit.getBlock()); 2415 2416 // Tell the runtime we are done. 2417 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2418 if (!DynamicOrOrdered) 2419 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2420 S.getDirectiveKind()); 2421 }; 2422 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2423 } 2424 2425 void CodeGenFunction::EmitOMPForOuterLoop( 2426 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2427 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2428 const OMPLoopArguments &LoopArgs, 2429 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2430 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2431 2432 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2433 const bool DynamicOrOrdered = 2434 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2435 2436 assert((Ordered || 2437 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2438 LoopArgs.Chunk != nullptr)) && 2439 "static non-chunked schedule does not need outer loop"); 2440 2441 // Emit outer loop. 2442 // 2443 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2444 // When schedule(dynamic,chunk_size) is specified, the iterations are 2445 // distributed to threads in the team in chunks as the threads request them. 2446 // Each thread executes a chunk of iterations, then requests another chunk, 2447 // until no chunks remain to be distributed. Each chunk contains chunk_size 2448 // iterations, except for the last chunk to be distributed, which may have 2449 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2450 // 2451 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2452 // to threads in the team in chunks as the executing threads request them. 2453 // Each thread executes a chunk of iterations, then requests another chunk, 2454 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2455 // each chunk is proportional to the number of unassigned iterations divided 2456 // by the number of threads in the team, decreasing to 1. For a chunk_size 2457 // with value k (greater than 1), the size of each chunk is determined in the 2458 // same way, with the restriction that the chunks do not contain fewer than k 2459 // iterations (except for the last chunk to be assigned, which may have fewer 2460 // than k iterations). 2461 // 2462 // When schedule(auto) is specified, the decision regarding scheduling is 2463 // delegated to the compiler and/or runtime system. The programmer gives the 2464 // implementation the freedom to choose any possible mapping of iterations to 2465 // threads in the team. 2466 // 2467 // When schedule(runtime) is specified, the decision regarding scheduling is 2468 // deferred until run time, and the schedule and chunk size are taken from the 2469 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2470 // implementation defined 2471 // 2472 // while(__kmpc_dispatch_next(&LB, &UB)) { 2473 // idx = LB; 2474 // while (idx <= UB) { BODY; ++idx; 2475 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2476 // } // inner loop 2477 // } 2478 // 2479 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2480 // When schedule(static, chunk_size) is specified, iterations are divided into 2481 // chunks of size chunk_size, and the chunks are assigned to the threads in 2482 // the team in a round-robin fashion in the order of the thread number. 2483 // 2484 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2485 // while (idx <= UB) { BODY; ++idx; } // inner loop 2486 // LB = LB + ST; 2487 // UB = UB + ST; 2488 // } 2489 // 2490 2491 const Expr *IVExpr = S.getIterationVariable(); 2492 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2493 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2494 2495 if (DynamicOrOrdered) { 2496 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2497 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2498 llvm::Value *LBVal = DispatchBounds.first; 2499 llvm::Value *UBVal = DispatchBounds.second; 2500 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2501 LoopArgs.Chunk}; 2502 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2503 IVSigned, Ordered, DipatchRTInputValues); 2504 } else { 2505 CGOpenMPRuntime::StaticRTInput StaticInit( 2506 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2507 LoopArgs.ST, LoopArgs.Chunk); 2508 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2509 ScheduleKind, StaticInit); 2510 } 2511 2512 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2513 const unsigned IVSize, 2514 const bool IVSigned) { 2515 if (Ordered) { 2516 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2517 IVSigned); 2518 } 2519 }; 2520 2521 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2522 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2523 OuterLoopArgs.IncExpr = S.getInc(); 2524 OuterLoopArgs.Init = S.getInit(); 2525 OuterLoopArgs.Cond = S.getCond(); 2526 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2527 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2528 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2529 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2530 } 2531 2532 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2533 const unsigned IVSize, const bool IVSigned) {} 2534 2535 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2536 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2537 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2538 const CodeGenLoopTy &CodeGenLoopContent) { 2539 2540 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2541 2542 // Emit outer loop. 2543 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2544 // dynamic 2545 // 2546 2547 const Expr *IVExpr = S.getIterationVariable(); 2548 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2549 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2550 2551 CGOpenMPRuntime::StaticRTInput StaticInit( 2552 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2553 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2554 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2555 2556 // for combined 'distribute' and 'for' the increment expression of distribute 2557 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2558 Expr *IncExpr; 2559 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2560 IncExpr = S.getDistInc(); 2561 else 2562 IncExpr = S.getInc(); 2563 2564 // this routine is shared by 'omp distribute parallel for' and 2565 // 'omp distribute': select the right EUB expression depending on the 2566 // directive 2567 OMPLoopArguments OuterLoopArgs; 2568 OuterLoopArgs.LB = LoopArgs.LB; 2569 OuterLoopArgs.UB = LoopArgs.UB; 2570 OuterLoopArgs.ST = LoopArgs.ST; 2571 OuterLoopArgs.IL = LoopArgs.IL; 2572 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2573 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2574 ? S.getCombinedEnsureUpperBound() 2575 : S.getEnsureUpperBound(); 2576 OuterLoopArgs.IncExpr = IncExpr; 2577 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2578 ? S.getCombinedInit() 2579 : S.getInit(); 2580 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2581 ? S.getCombinedCond() 2582 : S.getCond(); 2583 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2584 ? S.getCombinedNextLowerBound() 2585 : S.getNextLowerBound(); 2586 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2587 ? S.getCombinedNextUpperBound() 2588 : S.getNextUpperBound(); 2589 2590 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2591 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2592 emitEmptyOrdered); 2593 } 2594 2595 static std::pair<LValue, LValue> 2596 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2597 const OMPExecutableDirective &S) { 2598 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2599 LValue LB = 2600 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2601 LValue UB = 2602 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2603 2604 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2605 // parallel for') we need to use the 'distribute' 2606 // chunk lower and upper bounds rather than the whole loop iteration 2607 // space. These are parameters to the outlined function for 'parallel' 2608 // and we copy the bounds of the previous schedule into the 2609 // the current ones. 2610 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2611 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2612 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2613 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2614 PrevLBVal = CGF.EmitScalarConversion( 2615 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2616 LS.getIterationVariable()->getType(), 2617 LS.getPrevLowerBoundVariable()->getExprLoc()); 2618 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2619 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2620 PrevUBVal = CGF.EmitScalarConversion( 2621 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2622 LS.getIterationVariable()->getType(), 2623 LS.getPrevUpperBoundVariable()->getExprLoc()); 2624 2625 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2626 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2627 2628 return {LB, UB}; 2629 } 2630 2631 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2632 /// we need to use the LB and UB expressions generated by the worksharing 2633 /// code generation support, whereas in non combined situations we would 2634 /// just emit 0 and the LastIteration expression 2635 /// This function is necessary due to the difference of the LB and UB 2636 /// types for the RT emission routines for 'for_static_init' and 2637 /// 'for_dispatch_init' 2638 static std::pair<llvm::Value *, llvm::Value *> 2639 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2640 const OMPExecutableDirective &S, 2641 Address LB, Address UB) { 2642 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2643 const Expr *IVExpr = LS.getIterationVariable(); 2644 // when implementing a dynamic schedule for a 'for' combined with a 2645 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2646 // is not normalized as each team only executes its own assigned 2647 // distribute chunk 2648 QualType IteratorTy = IVExpr->getType(); 2649 llvm::Value *LBVal = 2650 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2651 llvm::Value *UBVal = 2652 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2653 return {LBVal, UBVal}; 2654 } 2655 2656 static void emitDistributeParallelForDistributeInnerBoundParams( 2657 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2658 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2659 const auto &Dir = cast<OMPLoopDirective>(S); 2660 LValue LB = 2661 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2662 llvm::Value *LBCast = 2663 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2664 CGF.SizeTy, /*isSigned=*/false); 2665 CapturedVars.push_back(LBCast); 2666 LValue UB = 2667 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2668 2669 llvm::Value *UBCast = 2670 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2671 CGF.SizeTy, /*isSigned=*/false); 2672 CapturedVars.push_back(UBCast); 2673 } 2674 2675 static void 2676 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2677 const OMPLoopDirective &S, 2678 CodeGenFunction::JumpDest LoopExit) { 2679 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2680 PrePostActionTy &Action) { 2681 Action.Enter(CGF); 2682 bool HasCancel = false; 2683 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2684 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2685 HasCancel = D->hasCancel(); 2686 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2687 HasCancel = D->hasCancel(); 2688 else if (const auto *D = 2689 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2690 HasCancel = D->hasCancel(); 2691 } 2692 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2693 HasCancel); 2694 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2695 emitDistributeParallelForInnerBounds, 2696 emitDistributeParallelForDispatchBounds); 2697 }; 2698 2699 emitCommonOMPParallelDirective( 2700 CGF, S, 2701 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2702 CGInlinedWorksharingLoop, 2703 emitDistributeParallelForDistributeInnerBoundParams); 2704 } 2705 2706 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2707 const OMPDistributeParallelForDirective &S) { 2708 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2709 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2710 S.getDistInc()); 2711 }; 2712 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2713 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2714 } 2715 2716 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2717 const OMPDistributeParallelForSimdDirective &S) { 2718 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2719 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2720 S.getDistInc()); 2721 }; 2722 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2723 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2724 } 2725 2726 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2727 const OMPDistributeSimdDirective &S) { 2728 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2729 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2730 }; 2731 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2732 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2733 } 2734 2735 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2736 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2737 // Emit SPMD target parallel for region as a standalone region. 2738 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2739 emitOMPSimdRegion(CGF, S, Action); 2740 }; 2741 llvm::Function *Fn; 2742 llvm::Constant *Addr; 2743 // Emit target region as a standalone region. 2744 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2745 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2746 assert(Fn && Addr && "Target device function emission failed."); 2747 } 2748 2749 void CodeGenFunction::EmitOMPTargetSimdDirective( 2750 const OMPTargetSimdDirective &S) { 2751 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2752 emitOMPSimdRegion(CGF, S, Action); 2753 }; 2754 emitCommonOMPTargetDirective(*this, S, CodeGen); 2755 } 2756 2757 namespace { 2758 struct ScheduleKindModifiersTy { 2759 OpenMPScheduleClauseKind Kind; 2760 OpenMPScheduleClauseModifier M1; 2761 OpenMPScheduleClauseModifier M2; 2762 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2763 OpenMPScheduleClauseModifier M1, 2764 OpenMPScheduleClauseModifier M2) 2765 : Kind(Kind), M1(M1), M2(M2) {} 2766 }; 2767 } // namespace 2768 2769 bool CodeGenFunction::EmitOMPWorksharingLoop( 2770 const OMPLoopDirective &S, Expr *EUB, 2771 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2772 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2773 // Emit the loop iteration variable. 2774 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2775 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2776 EmitVarDecl(*IVDecl); 2777 2778 // Emit the iterations count variable. 2779 // If it is not a variable, Sema decided to calculate iterations count on each 2780 // iteration (e.g., it is foldable into a constant). 2781 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2782 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2783 // Emit calculation of the iterations count. 2784 EmitIgnoredExpr(S.getCalcLastIteration()); 2785 } 2786 2787 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2788 2789 bool HasLastprivateClause; 2790 // Check pre-condition. 2791 { 2792 OMPLoopScope PreInitScope(*this, S); 2793 // Skip the entire loop if we don't meet the precondition. 2794 // If the condition constant folds and can be elided, avoid emitting the 2795 // whole loop. 2796 bool CondConstant; 2797 llvm::BasicBlock *ContBlock = nullptr; 2798 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2799 if (!CondConstant) 2800 return false; 2801 } else { 2802 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2803 ContBlock = createBasicBlock("omp.precond.end"); 2804 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2805 getProfileCount(&S)); 2806 EmitBlock(ThenBlock); 2807 incrementProfileCounter(&S); 2808 } 2809 2810 RunCleanupsScope DoacrossCleanupScope(*this); 2811 bool Ordered = false; 2812 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2813 if (OrderedClause->getNumForLoops()) 2814 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2815 else 2816 Ordered = true; 2817 } 2818 2819 llvm::DenseSet<const Expr *> EmittedFinals; 2820 emitAlignedClause(*this, S); 2821 bool HasLinears = EmitOMPLinearClauseInit(S); 2822 // Emit helper vars inits. 2823 2824 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2825 LValue LB = Bounds.first; 2826 LValue UB = Bounds.second; 2827 LValue ST = 2828 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2829 LValue IL = 2830 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2831 2832 // Emit 'then' code. 2833 { 2834 OMPPrivateScope LoopScope(*this); 2835 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2836 // Emit implicit barrier to synchronize threads and avoid data races on 2837 // initialization of firstprivate variables and post-update of 2838 // lastprivate variables. 2839 CGM.getOpenMPRuntime().emitBarrierCall( 2840 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2841 /*ForceSimpleCall=*/true); 2842 } 2843 EmitOMPPrivateClause(S, LoopScope); 2844 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2845 *this, S, EmitLValue(S.getIterationVariable())); 2846 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2847 EmitOMPReductionClauseInit(S, LoopScope); 2848 EmitOMPPrivateLoopCounters(S, LoopScope); 2849 EmitOMPLinearClause(S, LoopScope); 2850 (void)LoopScope.Privatize(); 2851 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2852 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2853 2854 // Detect the loop schedule kind and chunk. 2855 const Expr *ChunkExpr = nullptr; 2856 OpenMPScheduleTy ScheduleKind; 2857 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2858 ScheduleKind.Schedule = C->getScheduleKind(); 2859 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2860 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2861 ChunkExpr = C->getChunkSize(); 2862 } else { 2863 // Default behaviour for schedule clause. 2864 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2865 *this, S, ScheduleKind.Schedule, ChunkExpr); 2866 } 2867 bool HasChunkSizeOne = false; 2868 llvm::Value *Chunk = nullptr; 2869 if (ChunkExpr) { 2870 Chunk = EmitScalarExpr(ChunkExpr); 2871 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2872 S.getIterationVariable()->getType(), 2873 S.getBeginLoc()); 2874 Expr::EvalResult Result; 2875 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2876 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2877 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2878 } 2879 } 2880 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2881 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2882 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2883 // If the static schedule kind is specified or if the ordered clause is 2884 // specified, and if no monotonic modifier is specified, the effect will 2885 // be as if the monotonic modifier was specified. 2886 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2887 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2888 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2889 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2890 /* Chunked */ Chunk != nullptr) || 2891 StaticChunkedOne) && 2892 !Ordered) { 2893 JumpDest LoopExit = 2894 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2895 emitCommonSimdLoop( 2896 *this, S, 2897 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2898 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2899 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2900 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2901 if (C->getKind() == OMPC_ORDER_concurrent) 2902 CGF.LoopStack.setParallel(/*Enable=*/true); 2903 } 2904 }, 2905 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2906 &S, ScheduleKind, LoopExit, 2907 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2908 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2909 // When no chunk_size is specified, the iteration space is divided 2910 // into chunks that are approximately equal in size, and at most 2911 // one chunk is distributed to each thread. Note that the size of 2912 // the chunks is unspecified in this case. 2913 CGOpenMPRuntime::StaticRTInput StaticInit( 2914 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2915 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2916 StaticChunkedOne ? Chunk : nullptr); 2917 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2918 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2919 StaticInit); 2920 // UB = min(UB, GlobalUB); 2921 if (!StaticChunkedOne) 2922 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2923 // IV = LB; 2924 CGF.EmitIgnoredExpr(S.getInit()); 2925 // For unchunked static schedule generate: 2926 // 2927 // while (idx <= UB) { 2928 // BODY; 2929 // ++idx; 2930 // } 2931 // 2932 // For static schedule with chunk one: 2933 // 2934 // while (IV <= PrevUB) { 2935 // BODY; 2936 // IV += ST; 2937 // } 2938 CGF.EmitOMPInnerLoop( 2939 S, LoopScope.requiresCleanups(), 2940 StaticChunkedOne ? S.getCombinedParForInDistCond() 2941 : S.getCond(), 2942 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2943 [&S, LoopExit](CodeGenFunction &CGF) { 2944 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 2945 }, 2946 [](CodeGenFunction &) {}); 2947 }); 2948 EmitBlock(LoopExit.getBlock()); 2949 // Tell the runtime we are done. 2950 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2951 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2952 S.getDirectiveKind()); 2953 }; 2954 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2955 } else { 2956 const bool IsMonotonic = 2957 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2958 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2959 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2960 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2961 // Emit the outer loop, which requests its work chunk [LB..UB] from 2962 // runtime and runs the inner loop to process it. 2963 const OMPLoopArguments LoopArguments( 2964 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2965 IL.getAddress(*this), Chunk, EUB); 2966 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2967 LoopArguments, CGDispatchBounds); 2968 } 2969 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2970 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2971 return CGF.Builder.CreateIsNotNull( 2972 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2973 }); 2974 } 2975 EmitOMPReductionClauseFinal( 2976 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2977 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2978 : /*Parallel only*/ OMPD_parallel); 2979 // Emit post-update of the reduction variables if IsLastIter != 0. 2980 emitPostUpdateForReductionClause( 2981 *this, S, [IL, &S](CodeGenFunction &CGF) { 2982 return CGF.Builder.CreateIsNotNull( 2983 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2984 }); 2985 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2986 if (HasLastprivateClause) 2987 EmitOMPLastprivateClauseFinal( 2988 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2989 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2990 } 2991 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2992 return CGF.Builder.CreateIsNotNull( 2993 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2994 }); 2995 DoacrossCleanupScope.ForceCleanup(); 2996 // We're now done with the loop, so jump to the continuation block. 2997 if (ContBlock) { 2998 EmitBranch(ContBlock); 2999 EmitBlock(ContBlock, /*IsFinished=*/true); 3000 } 3001 } 3002 return HasLastprivateClause; 3003 } 3004 3005 /// The following two functions generate expressions for the loop lower 3006 /// and upper bounds in case of static and dynamic (dispatch) schedule 3007 /// of the associated 'for' or 'distribute' loop. 3008 static std::pair<LValue, LValue> 3009 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3010 const auto &LS = cast<OMPLoopDirective>(S); 3011 LValue LB = 3012 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3013 LValue UB = 3014 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3015 return {LB, UB}; 3016 } 3017 3018 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3019 /// consider the lower and upper bound expressions generated by the 3020 /// worksharing loop support, but we use 0 and the iteration space size as 3021 /// constants 3022 static std::pair<llvm::Value *, llvm::Value *> 3023 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3024 Address LB, Address UB) { 3025 const auto &LS = cast<OMPLoopDirective>(S); 3026 const Expr *IVExpr = LS.getIterationVariable(); 3027 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3028 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3029 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3030 return {LBVal, UBVal}; 3031 } 3032 3033 /// Emits the code for the directive with inscan reductions. 3034 /// The code is the following: 3035 /// \code 3036 /// size num_iters = <num_iters>; 3037 /// <type> buffer[num_iters]; 3038 /// #pragma omp ... 3039 /// for (i: 0..<num_iters>) { 3040 /// <input phase>; 3041 /// buffer[i] = red; 3042 /// } 3043 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3044 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3045 /// buffer[i] op= buffer[i-pow(2,k)]; 3046 /// #pragma omp ... 3047 /// for (0..<num_iters>) { 3048 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3049 /// <scan phase>; 3050 /// } 3051 /// \endcode 3052 static void emitScanBasedDirective( 3053 CodeGenFunction &CGF, const OMPLoopDirective &S, 3054 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3055 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3056 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3057 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3058 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3059 SmallVector<const Expr *, 4> Shareds; 3060 SmallVector<const Expr *, 4> Privates; 3061 SmallVector<const Expr *, 4> ReductionOps; 3062 SmallVector<const Expr *, 4> LHSs; 3063 SmallVector<const Expr *, 4> RHSs; 3064 SmallVector<const Expr *, 4> CopyOps; 3065 SmallVector<const Expr *, 4> CopyArrayTemps; 3066 SmallVector<const Expr *, 4> CopyArrayElems; 3067 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3068 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3069 "Only inscan reductions are expected."); 3070 Shareds.append(C->varlist_begin(), C->varlist_end()); 3071 Privates.append(C->privates().begin(), C->privates().end()); 3072 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3073 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3074 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3075 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 3076 CopyArrayTemps.append(C->copy_array_temps().begin(), 3077 C->copy_array_temps().end()); 3078 CopyArrayElems.append(C->copy_array_elems().begin(), 3079 C->copy_array_elems().end()); 3080 } 3081 { 3082 // Emit buffers for each reduction variables. 3083 // ReductionCodeGen is required to emit correctly the code for array 3084 // reductions. 3085 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3086 unsigned Count = 0; 3087 auto *ITA = CopyArrayTemps.begin(); 3088 for (const Expr *IRef : Privates) { 3089 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3090 // Emit variably modified arrays, used for arrays/array sections 3091 // reductions. 3092 if (PrivateVD->getType()->isVariablyModifiedType()) { 3093 RedCG.emitSharedOrigLValue(CGF, Count); 3094 RedCG.emitAggregateType(CGF, Count); 3095 } 3096 CodeGenFunction::OpaqueValueMapping DimMapping( 3097 CGF, 3098 cast<OpaqueValueExpr>( 3099 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3100 ->getSizeExpr()), 3101 RValue::get(OMPScanNumIterations)); 3102 // Emit temp buffer. 3103 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3104 ++ITA; 3105 ++Count; 3106 } 3107 } 3108 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3109 { 3110 // Emit loop with input phase: 3111 // #pragma omp ... 3112 // for (i: 0..<num_iters>) { 3113 // <input phase>; 3114 // buffer[i] = red; 3115 // } 3116 CGF.OMPFirstScanLoop = true; 3117 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3118 FirstGen(CGF); 3119 } 3120 // Emit prefix reduction: 3121 // for (int k = 0; k <= ceil(log2(n)); ++k) 3122 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3123 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3124 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3125 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3126 llvm::Value *Arg = 3127 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3128 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3129 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3130 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3131 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3132 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3133 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3134 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3135 CGF.EmitBlock(LoopBB); 3136 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3137 // size pow2k = 1; 3138 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3139 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3140 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3141 // for (size i = n - 1; i >= 2 ^ k; --i) 3142 // tmp[i] op= tmp[i-pow2k]; 3143 llvm::BasicBlock *InnerLoopBB = 3144 CGF.createBasicBlock("omp.inner.log.scan.body"); 3145 llvm::BasicBlock *InnerExitBB = 3146 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3147 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3148 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3149 CGF.EmitBlock(InnerLoopBB); 3150 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3151 IVal->addIncoming(NMin1, LoopBB); 3152 { 3153 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3154 auto *ILHS = LHSs.begin(); 3155 auto *IRHS = RHSs.begin(); 3156 for (const Expr *CopyArrayElem : CopyArrayElems) { 3157 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3158 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3159 Address LHSAddr = Address::invalid(); 3160 { 3161 CodeGenFunction::OpaqueValueMapping IdxMapping( 3162 CGF, 3163 cast<OpaqueValueExpr>( 3164 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3165 RValue::get(IVal)); 3166 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3167 } 3168 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3169 Address RHSAddr = Address::invalid(); 3170 { 3171 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3172 CodeGenFunction::OpaqueValueMapping IdxMapping( 3173 CGF, 3174 cast<OpaqueValueExpr>( 3175 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3176 RValue::get(OffsetIVal)); 3177 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3178 } 3179 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3180 ++ILHS; 3181 ++IRHS; 3182 } 3183 PrivScope.Privatize(); 3184 CGF.CGM.getOpenMPRuntime().emitReduction( 3185 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3186 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3187 } 3188 llvm::Value *NextIVal = 3189 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3190 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3191 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3192 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3193 CGF.EmitBlock(InnerExitBB); 3194 llvm::Value *Next = 3195 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3196 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3197 // pow2k <<= 1; 3198 llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3199 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3200 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3201 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3202 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3203 CGF.EmitBlock(ExitBB); 3204 3205 CGF.OMPFirstScanLoop = false; 3206 SecondGen(CGF); 3207 } 3208 3209 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3210 bool HasLastprivates = false; 3211 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3212 PrePostActionTy &) { 3213 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3214 [](const OMPReductionClause *C) { 3215 return C->getModifier() == OMPC_REDUCTION_inscan; 3216 })) { 3217 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3218 OMPLocalDeclMapRAII Scope(CGF); 3219 OMPLoopScope LoopScope(CGF, S); 3220 return CGF.EmitScalarExpr(S.getNumIterations()); 3221 }; 3222 const auto &&FirstGen = [&S](CodeGenFunction &CGF) { 3223 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3224 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3225 emitForLoopBounds, 3226 emitDispatchForLoopBounds); 3227 // Emit an implicit barrier at the end. 3228 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3229 OMPD_for); 3230 }; 3231 const auto &&SecondGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 3232 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3233 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3234 emitForLoopBounds, 3235 emitDispatchForLoopBounds); 3236 }; 3237 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3238 } else { 3239 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 3240 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3241 emitForLoopBounds, 3242 emitDispatchForLoopBounds); 3243 } 3244 }; 3245 { 3246 auto LPCRegion = 3247 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3248 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3249 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3250 S.hasCancel()); 3251 } 3252 3253 // Emit an implicit barrier at the end. 3254 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3255 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3256 // Check for outer lastprivate conditional update. 3257 checkForLastprivateConditionalUpdate(*this, S); 3258 } 3259 3260 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3261 bool HasLastprivates = false; 3262 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3263 PrePostActionTy &) { 3264 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3265 emitForLoopBounds, 3266 emitDispatchForLoopBounds); 3267 }; 3268 { 3269 auto LPCRegion = 3270 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3271 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3272 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3273 } 3274 3275 // Emit an implicit barrier at the end. 3276 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3277 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3278 // Check for outer lastprivate conditional update. 3279 checkForLastprivateConditionalUpdate(*this, S); 3280 } 3281 3282 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3283 const Twine &Name, 3284 llvm::Value *Init = nullptr) { 3285 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3286 if (Init) 3287 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3288 return LVal; 3289 } 3290 3291 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3292 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3293 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3294 bool HasLastprivates = false; 3295 auto &&CodeGen = [&S, CapturedStmt, CS, 3296 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3297 const ASTContext &C = CGF.getContext(); 3298 QualType KmpInt32Ty = 3299 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3300 // Emit helper vars inits. 3301 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3302 CGF.Builder.getInt32(0)); 3303 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3304 ? CGF.Builder.getInt32(CS->size() - 1) 3305 : CGF.Builder.getInt32(0); 3306 LValue UB = 3307 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3308 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3309 CGF.Builder.getInt32(1)); 3310 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3311 CGF.Builder.getInt32(0)); 3312 // Loop counter. 3313 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3314 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3315 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3316 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3317 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3318 // Generate condition for loop. 3319 BinaryOperator *Cond = BinaryOperator::Create( 3320 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3321 S.getBeginLoc(), FPOptions(C.getLangOpts())); 3322 // Increment for loop counter. 3323 UnaryOperator *Inc = UnaryOperator::Create( 3324 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3325 S.getBeginLoc(), true, FPOptions(C.getLangOpts())); 3326 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3327 // Iterate through all sections and emit a switch construct: 3328 // switch (IV) { 3329 // case 0: 3330 // <SectionStmt[0]>; 3331 // break; 3332 // ... 3333 // case <NumSection> - 1: 3334 // <SectionStmt[<NumSection> - 1]>; 3335 // break; 3336 // } 3337 // .omp.sections.exit: 3338 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3339 llvm::SwitchInst *SwitchStmt = 3340 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3341 ExitBB, CS == nullptr ? 1 : CS->size()); 3342 if (CS) { 3343 unsigned CaseNumber = 0; 3344 for (const Stmt *SubStmt : CS->children()) { 3345 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3346 CGF.EmitBlock(CaseBB); 3347 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3348 CGF.EmitStmt(SubStmt); 3349 CGF.EmitBranch(ExitBB); 3350 ++CaseNumber; 3351 } 3352 } else { 3353 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3354 CGF.EmitBlock(CaseBB); 3355 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3356 CGF.EmitStmt(CapturedStmt); 3357 CGF.EmitBranch(ExitBB); 3358 } 3359 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3360 }; 3361 3362 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3363 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3364 // Emit implicit barrier to synchronize threads and avoid data races on 3365 // initialization of firstprivate variables and post-update of lastprivate 3366 // variables. 3367 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3368 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3369 /*ForceSimpleCall=*/true); 3370 } 3371 CGF.EmitOMPPrivateClause(S, LoopScope); 3372 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3373 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3374 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3375 (void)LoopScope.Privatize(); 3376 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3377 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3378 3379 // Emit static non-chunked loop. 3380 OpenMPScheduleTy ScheduleKind; 3381 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3382 CGOpenMPRuntime::StaticRTInput StaticInit( 3383 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3384 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3385 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3386 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3387 // UB = min(UB, GlobalUB); 3388 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3389 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3390 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3391 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3392 // IV = LB; 3393 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3394 // while (idx <= UB) { BODY; ++idx; } 3395 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3396 [](CodeGenFunction &) {}); 3397 // Tell the runtime we are done. 3398 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3399 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3400 S.getDirectiveKind()); 3401 }; 3402 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3403 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3404 // Emit post-update of the reduction variables if IsLastIter != 0. 3405 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3406 return CGF.Builder.CreateIsNotNull( 3407 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3408 }); 3409 3410 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3411 if (HasLastprivates) 3412 CGF.EmitOMPLastprivateClauseFinal( 3413 S, /*NoFinals=*/false, 3414 CGF.Builder.CreateIsNotNull( 3415 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3416 }; 3417 3418 bool HasCancel = false; 3419 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3420 HasCancel = OSD->hasCancel(); 3421 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3422 HasCancel = OPSD->hasCancel(); 3423 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3424 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3425 HasCancel); 3426 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3427 // clause. Otherwise the barrier will be generated by the codegen for the 3428 // directive. 3429 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3430 // Emit implicit barrier to synchronize threads and avoid data races on 3431 // initialization of firstprivate variables. 3432 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3433 OMPD_unknown); 3434 } 3435 } 3436 3437 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3438 { 3439 auto LPCRegion = 3440 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3441 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3442 EmitSections(S); 3443 } 3444 // Emit an implicit barrier at the end. 3445 if (!S.getSingleClause<OMPNowaitClause>()) { 3446 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3447 OMPD_sections); 3448 } 3449 // Check for outer lastprivate conditional update. 3450 checkForLastprivateConditionalUpdate(*this, S); 3451 } 3452 3453 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3454 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3455 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3456 }; 3457 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3458 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3459 S.hasCancel()); 3460 } 3461 3462 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3463 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3464 llvm::SmallVector<const Expr *, 8> DestExprs; 3465 llvm::SmallVector<const Expr *, 8> SrcExprs; 3466 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3467 // Check if there are any 'copyprivate' clauses associated with this 3468 // 'single' construct. 3469 // Build a list of copyprivate variables along with helper expressions 3470 // (<source>, <destination>, <destination>=<source> expressions) 3471 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3472 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3473 DestExprs.append(C->destination_exprs().begin(), 3474 C->destination_exprs().end()); 3475 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3476 AssignmentOps.append(C->assignment_ops().begin(), 3477 C->assignment_ops().end()); 3478 } 3479 // Emit code for 'single' region along with 'copyprivate' clauses 3480 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3481 Action.Enter(CGF); 3482 OMPPrivateScope SingleScope(CGF); 3483 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3484 CGF.EmitOMPPrivateClause(S, SingleScope); 3485 (void)SingleScope.Privatize(); 3486 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3487 }; 3488 { 3489 auto LPCRegion = 3490 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3491 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3492 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3493 CopyprivateVars, DestExprs, 3494 SrcExprs, AssignmentOps); 3495 } 3496 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3497 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3498 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3499 CGM.getOpenMPRuntime().emitBarrierCall( 3500 *this, S.getBeginLoc(), 3501 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3502 } 3503 // Check for outer lastprivate conditional update. 3504 checkForLastprivateConditionalUpdate(*this, S); 3505 } 3506 3507 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3508 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3509 Action.Enter(CGF); 3510 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3511 }; 3512 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3513 } 3514 3515 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3516 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3517 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3518 3519 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3520 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3521 3522 auto FiniCB = [this](InsertPointTy IP) { 3523 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3524 }; 3525 3526 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3527 InsertPointTy CodeGenIP, 3528 llvm::BasicBlock &FiniBB) { 3529 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3530 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3531 CodeGenIP, FiniBB); 3532 }; 3533 3534 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3535 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3536 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3537 3538 return; 3539 } 3540 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3541 emitMaster(*this, S); 3542 } 3543 3544 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3545 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3546 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3547 3548 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3549 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3550 const Expr *Hint = nullptr; 3551 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3552 Hint = HintClause->getHint(); 3553 3554 // TODO: This is slightly different from what's currently being done in 3555 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3556 // about typing is final. 3557 llvm::Value *HintInst = nullptr; 3558 if (Hint) 3559 HintInst = 3560 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3561 3562 auto FiniCB = [this](InsertPointTy IP) { 3563 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3564 }; 3565 3566 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3567 InsertPointTy CodeGenIP, 3568 llvm::BasicBlock &FiniBB) { 3569 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3570 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3571 CodeGenIP, FiniBB); 3572 }; 3573 3574 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3575 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3576 Builder.restoreIP(OMPBuilder->CreateCritical( 3577 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3578 HintInst)); 3579 3580 return; 3581 } 3582 3583 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3584 Action.Enter(CGF); 3585 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3586 }; 3587 const Expr *Hint = nullptr; 3588 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3589 Hint = HintClause->getHint(); 3590 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3591 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3592 S.getDirectiveName().getAsString(), 3593 CodeGen, S.getBeginLoc(), Hint); 3594 } 3595 3596 void CodeGenFunction::EmitOMPParallelForDirective( 3597 const OMPParallelForDirective &S) { 3598 // Emit directive as a combined directive that consists of two implicit 3599 // directives: 'parallel' with 'for' directive. 3600 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3601 Action.Enter(CGF); 3602 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 3603 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3604 emitDispatchForLoopBounds); 3605 }; 3606 { 3607 auto LPCRegion = 3608 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3609 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3610 emitEmptyBoundParameters); 3611 } 3612 // Check for outer lastprivate conditional update. 3613 checkForLastprivateConditionalUpdate(*this, S); 3614 } 3615 3616 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3617 const OMPParallelForSimdDirective &S) { 3618 // Emit directive as a combined directive that consists of two implicit 3619 // directives: 'parallel' with 'for' directive. 3620 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3621 Action.Enter(CGF); 3622 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3623 emitDispatchForLoopBounds); 3624 }; 3625 { 3626 auto LPCRegion = 3627 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3628 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3629 emitEmptyBoundParameters); 3630 } 3631 // Check for outer lastprivate conditional update. 3632 checkForLastprivateConditionalUpdate(*this, S); 3633 } 3634 3635 void CodeGenFunction::EmitOMPParallelMasterDirective( 3636 const OMPParallelMasterDirective &S) { 3637 // Emit directive as a combined directive that consists of two implicit 3638 // directives: 'parallel' with 'master' directive. 3639 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3640 Action.Enter(CGF); 3641 OMPPrivateScope PrivateScope(CGF); 3642 bool Copyins = CGF.EmitOMPCopyinClause(S); 3643 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3644 if (Copyins) { 3645 // Emit implicit barrier to synchronize threads and avoid data races on 3646 // propagation master's thread values of threadprivate variables to local 3647 // instances of that variables of all other implicit threads. 3648 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3649 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3650 /*ForceSimpleCall=*/true); 3651 } 3652 CGF.EmitOMPPrivateClause(S, PrivateScope); 3653 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3654 (void)PrivateScope.Privatize(); 3655 emitMaster(CGF, S); 3656 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3657 }; 3658 { 3659 auto LPCRegion = 3660 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3661 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3662 emitEmptyBoundParameters); 3663 emitPostUpdateForReductionClause(*this, S, 3664 [](CodeGenFunction &) { return nullptr; }); 3665 } 3666 // Check for outer lastprivate conditional update. 3667 checkForLastprivateConditionalUpdate(*this, S); 3668 } 3669 3670 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3671 const OMPParallelSectionsDirective &S) { 3672 // Emit directive as a combined directive that consists of two implicit 3673 // directives: 'parallel' with 'sections' directive. 3674 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3675 Action.Enter(CGF); 3676 CGF.EmitSections(S); 3677 }; 3678 { 3679 auto LPCRegion = 3680 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3681 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3682 emitEmptyBoundParameters); 3683 } 3684 // Check for outer lastprivate conditional update. 3685 checkForLastprivateConditionalUpdate(*this, S); 3686 } 3687 3688 void CodeGenFunction::EmitOMPTaskBasedDirective( 3689 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3690 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3691 OMPTaskDataTy &Data) { 3692 // Emit outlined function for task construct. 3693 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3694 auto I = CS->getCapturedDecl()->param_begin(); 3695 auto PartId = std::next(I); 3696 auto TaskT = std::next(I, 4); 3697 // Check if the task is final 3698 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3699 // If the condition constant folds and can be elided, try to avoid emitting 3700 // the condition and the dead arm of the if/else. 3701 const Expr *Cond = Clause->getCondition(); 3702 bool CondConstant; 3703 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3704 Data.Final.setInt(CondConstant); 3705 else 3706 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3707 } else { 3708 // By default the task is not final. 3709 Data.Final.setInt(/*IntVal=*/false); 3710 } 3711 // Check if the task has 'priority' clause. 3712 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3713 const Expr *Prio = Clause->getPriority(); 3714 Data.Priority.setInt(/*IntVal=*/true); 3715 Data.Priority.setPointer(EmitScalarConversion( 3716 EmitScalarExpr(Prio), Prio->getType(), 3717 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3718 Prio->getExprLoc())); 3719 } 3720 // The first function argument for tasks is a thread id, the second one is a 3721 // part id (0 for tied tasks, >=0 for untied task). 3722 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3723 // Get list of private variables. 3724 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3725 auto IRef = C->varlist_begin(); 3726 for (const Expr *IInit : C->private_copies()) { 3727 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3728 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3729 Data.PrivateVars.push_back(*IRef); 3730 Data.PrivateCopies.push_back(IInit); 3731 } 3732 ++IRef; 3733 } 3734 } 3735 EmittedAsPrivate.clear(); 3736 // Get list of firstprivate variables. 3737 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3738 auto IRef = C->varlist_begin(); 3739 auto IElemInitRef = C->inits().begin(); 3740 for (const Expr *IInit : C->private_copies()) { 3741 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3742 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3743 Data.FirstprivateVars.push_back(*IRef); 3744 Data.FirstprivateCopies.push_back(IInit); 3745 Data.FirstprivateInits.push_back(*IElemInitRef); 3746 } 3747 ++IRef; 3748 ++IElemInitRef; 3749 } 3750 } 3751 // Get list of lastprivate variables (for taskloops). 3752 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3753 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3754 auto IRef = C->varlist_begin(); 3755 auto ID = C->destination_exprs().begin(); 3756 for (const Expr *IInit : C->private_copies()) { 3757 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3758 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3759 Data.LastprivateVars.push_back(*IRef); 3760 Data.LastprivateCopies.push_back(IInit); 3761 } 3762 LastprivateDstsOrigs.insert( 3763 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3764 cast<DeclRefExpr>(*IRef)}); 3765 ++IRef; 3766 ++ID; 3767 } 3768 } 3769 SmallVector<const Expr *, 4> LHSs; 3770 SmallVector<const Expr *, 4> RHSs; 3771 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3772 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 3773 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 3774 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 3775 Data.ReductionOps.append(C->reduction_ops().begin(), 3776 C->reduction_ops().end()); 3777 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3778 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3779 } 3780 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3781 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3782 // Build list of dependences. 3783 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 3784 OMPTaskDataTy::DependData &DD = 3785 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 3786 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 3787 } 3788 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3789 CapturedRegion](CodeGenFunction &CGF, 3790 PrePostActionTy &Action) { 3791 // Set proper addresses for generated private copies. 3792 OMPPrivateScope Scope(CGF); 3793 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 3794 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3795 !Data.LastprivateVars.empty()) { 3796 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3797 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3798 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3799 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3800 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3801 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3802 CS->getCapturedDecl()->getParam(PrivatesParam))); 3803 // Map privates. 3804 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3805 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3806 CallArgs.push_back(PrivatesPtr); 3807 for (const Expr *E : Data.PrivateVars) { 3808 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3809 Address PrivatePtr = CGF.CreateMemTemp( 3810 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3811 PrivatePtrs.emplace_back(VD, PrivatePtr); 3812 CallArgs.push_back(PrivatePtr.getPointer()); 3813 } 3814 for (const Expr *E : Data.FirstprivateVars) { 3815 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3816 Address PrivatePtr = 3817 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3818 ".firstpriv.ptr.addr"); 3819 PrivatePtrs.emplace_back(VD, PrivatePtr); 3820 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 3821 CallArgs.push_back(PrivatePtr.getPointer()); 3822 } 3823 for (const Expr *E : Data.LastprivateVars) { 3824 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3825 Address PrivatePtr = 3826 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3827 ".lastpriv.ptr.addr"); 3828 PrivatePtrs.emplace_back(VD, PrivatePtr); 3829 CallArgs.push_back(PrivatePtr.getPointer()); 3830 } 3831 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3832 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3833 for (const auto &Pair : LastprivateDstsOrigs) { 3834 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3835 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3836 /*RefersToEnclosingVariableOrCapture=*/ 3837 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3838 Pair.second->getType(), VK_LValue, 3839 Pair.second->getExprLoc()); 3840 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3841 return CGF.EmitLValue(&DRE).getAddress(CGF); 3842 }); 3843 } 3844 for (const auto &Pair : PrivatePtrs) { 3845 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3846 CGF.getContext().getDeclAlign(Pair.first)); 3847 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3848 } 3849 } 3850 if (Data.Reductions) { 3851 OMPPrivateScope FirstprivateScope(CGF); 3852 for (const auto &Pair : FirstprivatePtrs) { 3853 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3854 CGF.getContext().getDeclAlign(Pair.first)); 3855 FirstprivateScope.addPrivate(Pair.first, 3856 [Replacement]() { return Replacement; }); 3857 } 3858 (void)FirstprivateScope.Privatize(); 3859 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3860 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 3861 Data.ReductionCopies, Data.ReductionOps); 3862 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3863 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3864 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3865 RedCG.emitSharedOrigLValue(CGF, Cnt); 3866 RedCG.emitAggregateType(CGF, Cnt); 3867 // FIXME: This must removed once the runtime library is fixed. 3868 // Emit required threadprivate variables for 3869 // initializer/combiner/finalizer. 3870 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3871 RedCG, Cnt); 3872 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3873 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3874 Replacement = 3875 Address(CGF.EmitScalarConversion( 3876 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3877 CGF.getContext().getPointerType( 3878 Data.ReductionCopies[Cnt]->getType()), 3879 Data.ReductionCopies[Cnt]->getExprLoc()), 3880 Replacement.getAlignment()); 3881 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3882 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3883 [Replacement]() { return Replacement; }); 3884 } 3885 } 3886 // Privatize all private variables except for in_reduction items. 3887 (void)Scope.Privatize(); 3888 SmallVector<const Expr *, 4> InRedVars; 3889 SmallVector<const Expr *, 4> InRedPrivs; 3890 SmallVector<const Expr *, 4> InRedOps; 3891 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3892 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3893 auto IPriv = C->privates().begin(); 3894 auto IRed = C->reduction_ops().begin(); 3895 auto ITD = C->taskgroup_descriptors().begin(); 3896 for (const Expr *Ref : C->varlists()) { 3897 InRedVars.emplace_back(Ref); 3898 InRedPrivs.emplace_back(*IPriv); 3899 InRedOps.emplace_back(*IRed); 3900 TaskgroupDescriptors.emplace_back(*ITD); 3901 std::advance(IPriv, 1); 3902 std::advance(IRed, 1); 3903 std::advance(ITD, 1); 3904 } 3905 } 3906 // Privatize in_reduction items here, because taskgroup descriptors must be 3907 // privatized earlier. 3908 OMPPrivateScope InRedScope(CGF); 3909 if (!InRedVars.empty()) { 3910 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 3911 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3912 RedCG.emitSharedOrigLValue(CGF, Cnt); 3913 RedCG.emitAggregateType(CGF, Cnt); 3914 // The taskgroup descriptor variable is always implicit firstprivate and 3915 // privatized already during processing of the firstprivates. 3916 // FIXME: This must removed once the runtime library is fixed. 3917 // Emit required threadprivate variables for 3918 // initializer/combiner/finalizer. 3919 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3920 RedCG, Cnt); 3921 llvm::Value *ReductionsPtr; 3922 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 3923 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 3924 TRExpr->getExprLoc()); 3925 } else { 3926 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 3927 } 3928 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3929 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3930 Replacement = Address( 3931 CGF.EmitScalarConversion( 3932 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3933 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3934 InRedPrivs[Cnt]->getExprLoc()), 3935 Replacement.getAlignment()); 3936 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3937 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3938 [Replacement]() { return Replacement; }); 3939 } 3940 } 3941 (void)InRedScope.Privatize(); 3942 3943 Action.Enter(CGF); 3944 BodyGen(CGF); 3945 }; 3946 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3947 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3948 Data.NumberOfParts); 3949 OMPLexicalScope Scope(*this, S, llvm::None, 3950 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3951 !isOpenMPSimdDirective(S.getDirectiveKind())); 3952 TaskGen(*this, OutlinedFn, Data); 3953 } 3954 3955 static ImplicitParamDecl * 3956 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3957 QualType Ty, CapturedDecl *CD, 3958 SourceLocation Loc) { 3959 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3960 ImplicitParamDecl::Other); 3961 auto *OrigRef = DeclRefExpr::Create( 3962 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3963 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3964 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3965 ImplicitParamDecl::Other); 3966 auto *PrivateRef = DeclRefExpr::Create( 3967 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3968 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3969 QualType ElemType = C.getBaseElementType(Ty); 3970 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3971 ImplicitParamDecl::Other); 3972 auto *InitRef = DeclRefExpr::Create( 3973 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3974 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3975 PrivateVD->setInitStyle(VarDecl::CInit); 3976 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3977 InitRef, /*BasePath=*/nullptr, 3978 VK_RValue)); 3979 Data.FirstprivateVars.emplace_back(OrigRef); 3980 Data.FirstprivateCopies.emplace_back(PrivateRef); 3981 Data.FirstprivateInits.emplace_back(InitRef); 3982 return OrigVD; 3983 } 3984 3985 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3986 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3987 OMPTargetDataInfo &InputInfo) { 3988 // Emit outlined function for task construct. 3989 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3990 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3991 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3992 auto I = CS->getCapturedDecl()->param_begin(); 3993 auto PartId = std::next(I); 3994 auto TaskT = std::next(I, 4); 3995 OMPTaskDataTy Data; 3996 // The task is not final. 3997 Data.Final.setInt(/*IntVal=*/false); 3998 // Get list of firstprivate variables. 3999 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4000 auto IRef = C->varlist_begin(); 4001 auto IElemInitRef = C->inits().begin(); 4002 for (auto *IInit : C->private_copies()) { 4003 Data.FirstprivateVars.push_back(*IRef); 4004 Data.FirstprivateCopies.push_back(IInit); 4005 Data.FirstprivateInits.push_back(*IElemInitRef); 4006 ++IRef; 4007 ++IElemInitRef; 4008 } 4009 } 4010 OMPPrivateScope TargetScope(*this); 4011 VarDecl *BPVD = nullptr; 4012 VarDecl *PVD = nullptr; 4013 VarDecl *SVD = nullptr; 4014 if (InputInfo.NumberOfTargetItems > 0) { 4015 auto *CD = CapturedDecl::Create( 4016 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4017 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4018 QualType BaseAndPointersType = getContext().getConstantArrayType( 4019 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4020 /*IndexTypeQuals=*/0); 4021 BPVD = createImplicitFirstprivateForType( 4022 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4023 PVD = createImplicitFirstprivateForType( 4024 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4025 QualType SizesType = getContext().getConstantArrayType( 4026 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4027 ArrSize, nullptr, ArrayType::Normal, 4028 /*IndexTypeQuals=*/0); 4029 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4030 S.getBeginLoc()); 4031 TargetScope.addPrivate( 4032 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4033 TargetScope.addPrivate(PVD, 4034 [&InputInfo]() { return InputInfo.PointersArray; }); 4035 TargetScope.addPrivate(SVD, 4036 [&InputInfo]() { return InputInfo.SizesArray; }); 4037 } 4038 (void)TargetScope.Privatize(); 4039 // Build list of dependences. 4040 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4041 OMPTaskDataTy::DependData &DD = 4042 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4043 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4044 } 4045 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 4046 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4047 // Set proper addresses for generated private copies. 4048 OMPPrivateScope Scope(CGF); 4049 if (!Data.FirstprivateVars.empty()) { 4050 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 4051 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 4052 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4053 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4054 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4055 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4056 CS->getCapturedDecl()->getParam(PrivatesParam))); 4057 // Map privates. 4058 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4059 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4060 CallArgs.push_back(PrivatesPtr); 4061 for (const Expr *E : Data.FirstprivateVars) { 4062 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4063 Address PrivatePtr = 4064 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4065 ".firstpriv.ptr.addr"); 4066 PrivatePtrs.emplace_back(VD, PrivatePtr); 4067 CallArgs.push_back(PrivatePtr.getPointer()); 4068 } 4069 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4070 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4071 for (const auto &Pair : PrivatePtrs) { 4072 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4073 CGF.getContext().getDeclAlign(Pair.first)); 4074 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4075 } 4076 } 4077 // Privatize all private variables except for in_reduction items. 4078 (void)Scope.Privatize(); 4079 if (InputInfo.NumberOfTargetItems > 0) { 4080 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4081 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4082 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4083 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4084 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4085 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4086 } 4087 4088 Action.Enter(CGF); 4089 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4090 BodyGen(CGF); 4091 }; 4092 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4093 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4094 Data.NumberOfParts); 4095 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4096 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4097 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4098 SourceLocation()); 4099 4100 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4101 SharedsTy, CapturedStruct, &IfCond, Data); 4102 } 4103 4104 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4105 // Emit outlined function for task construct. 4106 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4107 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4108 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4109 const Expr *IfCond = nullptr; 4110 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4111 if (C->getNameModifier() == OMPD_unknown || 4112 C->getNameModifier() == OMPD_task) { 4113 IfCond = C->getCondition(); 4114 break; 4115 } 4116 } 4117 4118 OMPTaskDataTy Data; 4119 // Check if we should emit tied or untied task. 4120 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4121 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4122 CGF.EmitStmt(CS->getCapturedStmt()); 4123 }; 4124 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4125 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4126 const OMPTaskDataTy &Data) { 4127 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4128 SharedsTy, CapturedStruct, IfCond, 4129 Data); 4130 }; 4131 auto LPCRegion = 4132 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4133 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4134 } 4135 4136 void CodeGenFunction::EmitOMPTaskyieldDirective( 4137 const OMPTaskyieldDirective &S) { 4138 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4139 } 4140 4141 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4142 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4143 } 4144 4145 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4146 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4147 } 4148 4149 void CodeGenFunction::EmitOMPTaskgroupDirective( 4150 const OMPTaskgroupDirective &S) { 4151 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4152 Action.Enter(CGF); 4153 if (const Expr *E = S.getReductionRef()) { 4154 SmallVector<const Expr *, 4> LHSs; 4155 SmallVector<const Expr *, 4> RHSs; 4156 OMPTaskDataTy Data; 4157 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4158 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4159 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4160 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4161 Data.ReductionOps.append(C->reduction_ops().begin(), 4162 C->reduction_ops().end()); 4163 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4164 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4165 } 4166 llvm::Value *ReductionDesc = 4167 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4168 LHSs, RHSs, Data); 4169 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4170 CGF.EmitVarDecl(*VD); 4171 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4172 /*Volatile=*/false, E->getType()); 4173 } 4174 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4175 }; 4176 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4177 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4178 } 4179 4180 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4181 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4182 ? llvm::AtomicOrdering::NotAtomic 4183 : llvm::AtomicOrdering::AcquireRelease; 4184 CGM.getOpenMPRuntime().emitFlush( 4185 *this, 4186 [&S]() -> ArrayRef<const Expr *> { 4187 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4188 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4189 FlushClause->varlist_end()); 4190 return llvm::None; 4191 }(), 4192 S.getBeginLoc(), AO); 4193 } 4194 4195 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4196 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4197 LValue DOLVal = EmitLValue(DO->getDepobj()); 4198 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4199 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4200 DC->getModifier()); 4201 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4202 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4203 *this, Dependencies, DC->getBeginLoc()); 4204 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4205 return; 4206 } 4207 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4208 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4209 return; 4210 } 4211 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4212 CGM.getOpenMPRuntime().emitUpdateClause( 4213 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4214 return; 4215 } 4216 } 4217 4218 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4219 if (!OMPParentLoopDirectiveForScan) 4220 return; 4221 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4222 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4223 SmallVector<const Expr *, 4> Shareds; 4224 SmallVector<const Expr *, 4> Privates; 4225 SmallVector<const Expr *, 4> LHSs; 4226 SmallVector<const Expr *, 4> RHSs; 4227 SmallVector<const Expr *, 4> ReductionOps; 4228 SmallVector<const Expr *, 4> CopyOps; 4229 SmallVector<const Expr *, 4> CopyArrayTemps; 4230 SmallVector<const Expr *, 4> CopyArrayElems; 4231 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4232 if (C->getModifier() != OMPC_REDUCTION_inscan) 4233 continue; 4234 Shareds.append(C->varlist_begin(), C->varlist_end()); 4235 Privates.append(C->privates().begin(), C->privates().end()); 4236 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4237 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4238 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4239 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4240 CopyArrayTemps.append(C->copy_array_temps().begin(), 4241 C->copy_array_temps().end()); 4242 CopyArrayElems.append(C->copy_array_elems().begin(), 4243 C->copy_array_elems().end()); 4244 } 4245 if (ParentDir.getDirectiveKind() == OMPD_simd || 4246 (getLangOpts().OpenMPSimd && 4247 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4248 // For simd directive and simd-based directives in simd only mode, use the 4249 // following codegen: 4250 // int x = 0; 4251 // #pragma omp simd reduction(inscan, +: x) 4252 // for (..) { 4253 // <first part> 4254 // #pragma omp scan inclusive(x) 4255 // <second part> 4256 // } 4257 // is transformed to: 4258 // int x = 0; 4259 // for (..) { 4260 // int x_priv = 0; 4261 // <first part> 4262 // x = x_priv + x; 4263 // x_priv = x; 4264 // <second part> 4265 // } 4266 // and 4267 // int x = 0; 4268 // #pragma omp simd reduction(inscan, +: x) 4269 // for (..) { 4270 // <first part> 4271 // #pragma omp scan exclusive(x) 4272 // <second part> 4273 // } 4274 // to 4275 // int x = 0; 4276 // for (..) { 4277 // int x_priv = 0; 4278 // <second part> 4279 // int temp = x; 4280 // x = x_priv + x; 4281 // x_priv = temp; 4282 // <first part> 4283 // } 4284 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4285 EmitBranch(IsInclusive 4286 ? OMPScanReduce 4287 : BreakContinueStack.back().ContinueBlock.getBlock()); 4288 EmitBlock(OMPScanDispatch); 4289 { 4290 // New scope for correct construction/destruction of temp variables for 4291 // exclusive scan. 4292 LexicalScope Scope(*this, S.getSourceRange()); 4293 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4294 EmitBlock(OMPScanReduce); 4295 if (!IsInclusive) { 4296 // Create temp var and copy LHS value to this temp value. 4297 // TMP = LHS; 4298 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4299 const Expr *PrivateExpr = Privates[I]; 4300 const Expr *TempExpr = CopyArrayTemps[I]; 4301 EmitAutoVarDecl( 4302 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4303 LValue DestLVal = EmitLValue(TempExpr); 4304 LValue SrcLVal = EmitLValue(LHSs[I]); 4305 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4306 SrcLVal.getAddress(*this), 4307 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4308 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4309 CopyOps[I]); 4310 } 4311 } 4312 CGM.getOpenMPRuntime().emitReduction( 4313 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4314 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4315 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4316 const Expr *PrivateExpr = Privates[I]; 4317 LValue DestLVal; 4318 LValue SrcLVal; 4319 if (IsInclusive) { 4320 DestLVal = EmitLValue(RHSs[I]); 4321 SrcLVal = EmitLValue(LHSs[I]); 4322 } else { 4323 const Expr *TempExpr = CopyArrayTemps[I]; 4324 DestLVal = EmitLValue(RHSs[I]); 4325 SrcLVal = EmitLValue(TempExpr); 4326 } 4327 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4328 SrcLVal.getAddress(*this), 4329 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4330 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4331 CopyOps[I]); 4332 } 4333 } 4334 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4335 OMPScanExitBlock = IsInclusive 4336 ? BreakContinueStack.back().ContinueBlock.getBlock() 4337 : OMPScanReduce; 4338 EmitBlock(OMPAfterScanBlock); 4339 return; 4340 } 4341 if (!IsInclusive) { 4342 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4343 EmitBlock(OMPScanExitBlock); 4344 } 4345 if (OMPFirstScanLoop) { 4346 // Emit buffer[i] = red; at the end of the input phase. 4347 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4348 .getIterationVariable() 4349 ->IgnoreParenImpCasts(); 4350 LValue IdxLVal = EmitLValue(IVExpr); 4351 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4352 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4353 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4354 const Expr *PrivateExpr = Privates[I]; 4355 const Expr *OrigExpr = Shareds[I]; 4356 const Expr *CopyArrayElem = CopyArrayElems[I]; 4357 OpaqueValueMapping IdxMapping( 4358 *this, 4359 cast<OpaqueValueExpr>( 4360 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4361 RValue::get(IdxVal)); 4362 LValue DestLVal = EmitLValue(CopyArrayElem); 4363 LValue SrcLVal = EmitLValue(OrigExpr); 4364 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4365 SrcLVal.getAddress(*this), 4366 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4367 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4368 CopyOps[I]); 4369 } 4370 } 4371 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4372 if (IsInclusive) { 4373 EmitBlock(OMPScanExitBlock); 4374 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4375 } 4376 EmitBlock(OMPScanDispatch); 4377 if (!OMPFirstScanLoop) { 4378 // Emit red = buffer[i]; at the entrance to the scan phase. 4379 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4380 .getIterationVariable() 4381 ->IgnoreParenImpCasts(); 4382 LValue IdxLVal = EmitLValue(IVExpr); 4383 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4384 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4385 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4386 if (!IsInclusive) { 4387 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4388 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4389 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4390 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4391 EmitBlock(ContBB); 4392 // Use idx - 1 iteration for exclusive scan. 4393 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4394 } 4395 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4396 const Expr *PrivateExpr = Privates[I]; 4397 const Expr *OrigExpr = Shareds[I]; 4398 const Expr *CopyArrayElem = CopyArrayElems[I]; 4399 OpaqueValueMapping IdxMapping( 4400 *this, 4401 cast<OpaqueValueExpr>( 4402 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4403 RValue::get(IdxVal)); 4404 LValue SrcLVal = EmitLValue(CopyArrayElem); 4405 LValue DestLVal = EmitLValue(OrigExpr); 4406 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4407 SrcLVal.getAddress(*this), 4408 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4409 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4410 CopyOps[I]); 4411 } 4412 if (!IsInclusive) { 4413 EmitBlock(ExclusiveExitBB); 4414 } 4415 } 4416 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4417 : OMPAfterScanBlock); 4418 EmitBlock(OMPAfterScanBlock); 4419 } 4420 4421 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4422 const CodeGenLoopTy &CodeGenLoop, 4423 Expr *IncExpr) { 4424 // Emit the loop iteration variable. 4425 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4426 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4427 EmitVarDecl(*IVDecl); 4428 4429 // Emit the iterations count variable. 4430 // If it is not a variable, Sema decided to calculate iterations count on each 4431 // iteration (e.g., it is foldable into a constant). 4432 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4433 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4434 // Emit calculation of the iterations count. 4435 EmitIgnoredExpr(S.getCalcLastIteration()); 4436 } 4437 4438 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4439 4440 bool HasLastprivateClause = false; 4441 // Check pre-condition. 4442 { 4443 OMPLoopScope PreInitScope(*this, S); 4444 // Skip the entire loop if we don't meet the precondition. 4445 // If the condition constant folds and can be elided, avoid emitting the 4446 // whole loop. 4447 bool CondConstant; 4448 llvm::BasicBlock *ContBlock = nullptr; 4449 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4450 if (!CondConstant) 4451 return; 4452 } else { 4453 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4454 ContBlock = createBasicBlock("omp.precond.end"); 4455 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4456 getProfileCount(&S)); 4457 EmitBlock(ThenBlock); 4458 incrementProfileCounter(&S); 4459 } 4460 4461 emitAlignedClause(*this, S); 4462 // Emit 'then' code. 4463 { 4464 // Emit helper vars inits. 4465 4466 LValue LB = EmitOMPHelperVar( 4467 *this, cast<DeclRefExpr>( 4468 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4469 ? S.getCombinedLowerBoundVariable() 4470 : S.getLowerBoundVariable()))); 4471 LValue UB = EmitOMPHelperVar( 4472 *this, cast<DeclRefExpr>( 4473 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4474 ? S.getCombinedUpperBoundVariable() 4475 : S.getUpperBoundVariable()))); 4476 LValue ST = 4477 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 4478 LValue IL = 4479 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 4480 4481 OMPPrivateScope LoopScope(*this); 4482 if (EmitOMPFirstprivateClause(S, LoopScope)) { 4483 // Emit implicit barrier to synchronize threads and avoid data races 4484 // on initialization of firstprivate variables and post-update of 4485 // lastprivate variables. 4486 CGM.getOpenMPRuntime().emitBarrierCall( 4487 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4488 /*ForceSimpleCall=*/true); 4489 } 4490 EmitOMPPrivateClause(S, LoopScope); 4491 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4492 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4493 !isOpenMPTeamsDirective(S.getDirectiveKind())) 4494 EmitOMPReductionClauseInit(S, LoopScope); 4495 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 4496 EmitOMPPrivateLoopCounters(S, LoopScope); 4497 (void)LoopScope.Privatize(); 4498 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4499 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 4500 4501 // Detect the distribute schedule kind and chunk. 4502 llvm::Value *Chunk = nullptr; 4503 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 4504 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 4505 ScheduleKind = C->getDistScheduleKind(); 4506 if (const Expr *Ch = C->getChunkSize()) { 4507 Chunk = EmitScalarExpr(Ch); 4508 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 4509 S.getIterationVariable()->getType(), 4510 S.getBeginLoc()); 4511 } 4512 } else { 4513 // Default behaviour for dist_schedule clause. 4514 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 4515 *this, S, ScheduleKind, Chunk); 4516 } 4517 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 4518 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 4519 4520 // OpenMP [2.10.8, distribute Construct, Description] 4521 // If dist_schedule is specified, kind must be static. If specified, 4522 // iterations are divided into chunks of size chunk_size, chunks are 4523 // assigned to the teams of the league in a round-robin fashion in the 4524 // order of the team number. When no chunk_size is specified, the 4525 // iteration space is divided into chunks that are approximately equal 4526 // in size, and at most one chunk is distributed to each team of the 4527 // league. The size of the chunks is unspecified in this case. 4528 bool StaticChunked = RT.isStaticChunked( 4529 ScheduleKind, /* Chunked */ Chunk != nullptr) && 4530 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 4531 if (RT.isStaticNonchunked(ScheduleKind, 4532 /* Chunked */ Chunk != nullptr) || 4533 StaticChunked) { 4534 CGOpenMPRuntime::StaticRTInput StaticInit( 4535 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 4536 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4537 StaticChunked ? Chunk : nullptr); 4538 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 4539 StaticInit); 4540 JumpDest LoopExit = 4541 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 4542 // UB = min(UB, GlobalUB); 4543 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4544 ? S.getCombinedEnsureUpperBound() 4545 : S.getEnsureUpperBound()); 4546 // IV = LB; 4547 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4548 ? S.getCombinedInit() 4549 : S.getInit()); 4550 4551 const Expr *Cond = 4552 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4553 ? S.getCombinedCond() 4554 : S.getCond(); 4555 4556 if (StaticChunked) 4557 Cond = S.getCombinedDistCond(); 4558 4559 // For static unchunked schedules generate: 4560 // 4561 // 1. For distribute alone, codegen 4562 // while (idx <= UB) { 4563 // BODY; 4564 // ++idx; 4565 // } 4566 // 4567 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 4568 // while (idx <= UB) { 4569 // <CodeGen rest of pragma>(LB, UB); 4570 // idx += ST; 4571 // } 4572 // 4573 // For static chunk one schedule generate: 4574 // 4575 // while (IV <= GlobalUB) { 4576 // <CodeGen rest of pragma>(LB, UB); 4577 // LB += ST; 4578 // UB += ST; 4579 // UB = min(UB, GlobalUB); 4580 // IV = LB; 4581 // } 4582 // 4583 emitCommonSimdLoop( 4584 *this, S, 4585 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4586 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4587 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 4588 }, 4589 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 4590 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 4591 CGF.EmitOMPInnerLoop( 4592 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4593 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4594 CodeGenLoop(CGF, S, LoopExit); 4595 }, 4596 [&S, StaticChunked](CodeGenFunction &CGF) { 4597 if (StaticChunked) { 4598 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4599 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4600 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4601 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4602 } 4603 }); 4604 }); 4605 EmitBlock(LoopExit.getBlock()); 4606 // Tell the runtime we are done. 4607 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4608 } else { 4609 // Emit the outer loop, which requests its work chunk [LB..UB] from 4610 // runtime and runs the inner loop to process it. 4611 const OMPLoopArguments LoopArguments = { 4612 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4613 IL.getAddress(*this), Chunk}; 4614 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4615 CodeGenLoop); 4616 } 4617 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4618 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4619 return CGF.Builder.CreateIsNotNull( 4620 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4621 }); 4622 } 4623 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4624 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4625 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4626 EmitOMPReductionClauseFinal(S, OMPD_simd); 4627 // Emit post-update of the reduction variables if IsLastIter != 0. 4628 emitPostUpdateForReductionClause( 4629 *this, S, [IL, &S](CodeGenFunction &CGF) { 4630 return CGF.Builder.CreateIsNotNull( 4631 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4632 }); 4633 } 4634 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4635 if (HasLastprivateClause) { 4636 EmitOMPLastprivateClauseFinal( 4637 S, /*NoFinals=*/false, 4638 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4639 } 4640 } 4641 4642 // We're now done with the loop, so jump to the continuation block. 4643 if (ContBlock) { 4644 EmitBranch(ContBlock); 4645 EmitBlock(ContBlock, true); 4646 } 4647 } 4648 } 4649 4650 void CodeGenFunction::EmitOMPDistributeDirective( 4651 const OMPDistributeDirective &S) { 4652 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4653 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4654 }; 4655 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4656 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4657 } 4658 4659 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4660 const CapturedStmt *S, 4661 SourceLocation Loc) { 4662 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4663 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4664 CGF.CapturedStmtInfo = &CapStmtInfo; 4665 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4666 Fn->setDoesNotRecurse(); 4667 return Fn; 4668 } 4669 4670 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4671 if (S.hasClausesOfKind<OMPDependClause>()) { 4672 assert(!S.getAssociatedStmt() && 4673 "No associated statement must be in ordered depend construct."); 4674 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4675 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4676 return; 4677 } 4678 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4679 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4680 PrePostActionTy &Action) { 4681 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4682 if (C) { 4683 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4684 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4685 llvm::Function *OutlinedFn = 4686 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4687 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4688 OutlinedFn, CapturedVars); 4689 } else { 4690 Action.Enter(CGF); 4691 CGF.EmitStmt(CS->getCapturedStmt()); 4692 } 4693 }; 4694 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4695 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4696 } 4697 4698 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4699 QualType SrcType, QualType DestType, 4700 SourceLocation Loc) { 4701 assert(CGF.hasScalarEvaluationKind(DestType) && 4702 "DestType must have scalar evaluation kind."); 4703 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4704 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4705 DestType, Loc) 4706 : CGF.EmitComplexToScalarConversion( 4707 Val.getComplexVal(), SrcType, DestType, Loc); 4708 } 4709 4710 static CodeGenFunction::ComplexPairTy 4711 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4712 QualType DestType, SourceLocation Loc) { 4713 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4714 "DestType must have complex evaluation kind."); 4715 CodeGenFunction::ComplexPairTy ComplexVal; 4716 if (Val.isScalar()) { 4717 // Convert the input element to the element type of the complex. 4718 QualType DestElementType = 4719 DestType->castAs<ComplexType>()->getElementType(); 4720 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4721 Val.getScalarVal(), SrcType, DestElementType, Loc); 4722 ComplexVal = CodeGenFunction::ComplexPairTy( 4723 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4724 } else { 4725 assert(Val.isComplex() && "Must be a scalar or complex."); 4726 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4727 QualType DestElementType = 4728 DestType->castAs<ComplexType>()->getElementType(); 4729 ComplexVal.first = CGF.EmitScalarConversion( 4730 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4731 ComplexVal.second = CGF.EmitScalarConversion( 4732 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4733 } 4734 return ComplexVal; 4735 } 4736 4737 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4738 LValue LVal, RValue RVal) { 4739 if (LVal.isGlobalReg()) 4740 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4741 else 4742 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4743 } 4744 4745 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4746 llvm::AtomicOrdering AO, LValue LVal, 4747 SourceLocation Loc) { 4748 if (LVal.isGlobalReg()) 4749 return CGF.EmitLoadOfLValue(LVal, Loc); 4750 return CGF.EmitAtomicLoad( 4751 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4752 LVal.isVolatile()); 4753 } 4754 4755 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4756 QualType RValTy, SourceLocation Loc) { 4757 switch (getEvaluationKind(LVal.getType())) { 4758 case TEK_Scalar: 4759 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4760 *this, RVal, RValTy, LVal.getType(), Loc)), 4761 LVal); 4762 break; 4763 case TEK_Complex: 4764 EmitStoreOfComplex( 4765 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4766 /*isInit=*/false); 4767 break; 4768 case TEK_Aggregate: 4769 llvm_unreachable("Must be a scalar or complex."); 4770 } 4771 } 4772 4773 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4774 const Expr *X, const Expr *V, 4775 SourceLocation Loc) { 4776 // v = x; 4777 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4778 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4779 LValue XLValue = CGF.EmitLValue(X); 4780 LValue VLValue = CGF.EmitLValue(V); 4781 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4782 // OpenMP, 2.17.7, atomic Construct 4783 // If the read or capture clause is specified and the acquire, acq_rel, or 4784 // seq_cst clause is specified then the strong flush on exit from the atomic 4785 // operation is also an acquire flush. 4786 switch (AO) { 4787 case llvm::AtomicOrdering::Acquire: 4788 case llvm::AtomicOrdering::AcquireRelease: 4789 case llvm::AtomicOrdering::SequentiallyConsistent: 4790 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4791 llvm::AtomicOrdering::Acquire); 4792 break; 4793 case llvm::AtomicOrdering::Monotonic: 4794 case llvm::AtomicOrdering::Release: 4795 break; 4796 case llvm::AtomicOrdering::NotAtomic: 4797 case llvm::AtomicOrdering::Unordered: 4798 llvm_unreachable("Unexpected ordering."); 4799 } 4800 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4801 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4802 } 4803 4804 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4805 llvm::AtomicOrdering AO, const Expr *X, 4806 const Expr *E, SourceLocation Loc) { 4807 // x = expr; 4808 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4809 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4810 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4811 // OpenMP, 2.17.7, atomic Construct 4812 // If the write, update, or capture clause is specified and the release, 4813 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4814 // the atomic operation is also a release flush. 4815 switch (AO) { 4816 case llvm::AtomicOrdering::Release: 4817 case llvm::AtomicOrdering::AcquireRelease: 4818 case llvm::AtomicOrdering::SequentiallyConsistent: 4819 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4820 llvm::AtomicOrdering::Release); 4821 break; 4822 case llvm::AtomicOrdering::Acquire: 4823 case llvm::AtomicOrdering::Monotonic: 4824 break; 4825 case llvm::AtomicOrdering::NotAtomic: 4826 case llvm::AtomicOrdering::Unordered: 4827 llvm_unreachable("Unexpected ordering."); 4828 } 4829 } 4830 4831 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4832 RValue Update, 4833 BinaryOperatorKind BO, 4834 llvm::AtomicOrdering AO, 4835 bool IsXLHSInRHSPart) { 4836 ASTContext &Context = CGF.getContext(); 4837 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4838 // expression is simple and atomic is allowed for the given type for the 4839 // target platform. 4840 if (BO == BO_Comma || !Update.isScalar() || 4841 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4842 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4843 (Update.getScalarVal()->getType() != 4844 X.getAddress(CGF).getElementType())) || 4845 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4846 !Context.getTargetInfo().hasBuiltinAtomic( 4847 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4848 return std::make_pair(false, RValue::get(nullptr)); 4849 4850 llvm::AtomicRMWInst::BinOp RMWOp; 4851 switch (BO) { 4852 case BO_Add: 4853 RMWOp = llvm::AtomicRMWInst::Add; 4854 break; 4855 case BO_Sub: 4856 if (!IsXLHSInRHSPart) 4857 return std::make_pair(false, RValue::get(nullptr)); 4858 RMWOp = llvm::AtomicRMWInst::Sub; 4859 break; 4860 case BO_And: 4861 RMWOp = llvm::AtomicRMWInst::And; 4862 break; 4863 case BO_Or: 4864 RMWOp = llvm::AtomicRMWInst::Or; 4865 break; 4866 case BO_Xor: 4867 RMWOp = llvm::AtomicRMWInst::Xor; 4868 break; 4869 case BO_LT: 4870 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4871 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4872 : llvm::AtomicRMWInst::Max) 4873 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4874 : llvm::AtomicRMWInst::UMax); 4875 break; 4876 case BO_GT: 4877 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4878 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4879 : llvm::AtomicRMWInst::Min) 4880 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4881 : llvm::AtomicRMWInst::UMin); 4882 break; 4883 case BO_Assign: 4884 RMWOp = llvm::AtomicRMWInst::Xchg; 4885 break; 4886 case BO_Mul: 4887 case BO_Div: 4888 case BO_Rem: 4889 case BO_Shl: 4890 case BO_Shr: 4891 case BO_LAnd: 4892 case BO_LOr: 4893 return std::make_pair(false, RValue::get(nullptr)); 4894 case BO_PtrMemD: 4895 case BO_PtrMemI: 4896 case BO_LE: 4897 case BO_GE: 4898 case BO_EQ: 4899 case BO_NE: 4900 case BO_Cmp: 4901 case BO_AddAssign: 4902 case BO_SubAssign: 4903 case BO_AndAssign: 4904 case BO_OrAssign: 4905 case BO_XorAssign: 4906 case BO_MulAssign: 4907 case BO_DivAssign: 4908 case BO_RemAssign: 4909 case BO_ShlAssign: 4910 case BO_ShrAssign: 4911 case BO_Comma: 4912 llvm_unreachable("Unsupported atomic update operation"); 4913 } 4914 llvm::Value *UpdateVal = Update.getScalarVal(); 4915 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4916 UpdateVal = CGF.Builder.CreateIntCast( 4917 IC, X.getAddress(CGF).getElementType(), 4918 X.getType()->hasSignedIntegerRepresentation()); 4919 } 4920 llvm::Value *Res = 4921 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4922 return std::make_pair(true, RValue::get(Res)); 4923 } 4924 4925 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4926 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4927 llvm::AtomicOrdering AO, SourceLocation Loc, 4928 const llvm::function_ref<RValue(RValue)> CommonGen) { 4929 // Update expressions are allowed to have the following forms: 4930 // x binop= expr; -> xrval + expr; 4931 // x++, ++x -> xrval + 1; 4932 // x--, --x -> xrval - 1; 4933 // x = x binop expr; -> xrval binop expr 4934 // x = expr Op x; - > expr binop xrval; 4935 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4936 if (!Res.first) { 4937 if (X.isGlobalReg()) { 4938 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4939 // 'xrval'. 4940 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4941 } else { 4942 // Perform compare-and-swap procedure. 4943 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4944 } 4945 } 4946 return Res; 4947 } 4948 4949 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4950 llvm::AtomicOrdering AO, const Expr *X, 4951 const Expr *E, const Expr *UE, 4952 bool IsXLHSInRHSPart, SourceLocation Loc) { 4953 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4954 "Update expr in 'atomic update' must be a binary operator."); 4955 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4956 // Update expressions are allowed to have the following forms: 4957 // x binop= expr; -> xrval + expr; 4958 // x++, ++x -> xrval + 1; 4959 // x--, --x -> xrval - 1; 4960 // x = x binop expr; -> xrval binop expr 4961 // x = expr Op x; - > expr binop xrval; 4962 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4963 LValue XLValue = CGF.EmitLValue(X); 4964 RValue ExprRValue = CGF.EmitAnyExpr(E); 4965 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4966 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4967 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4968 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4969 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4970 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4971 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4972 return CGF.EmitAnyExpr(UE); 4973 }; 4974 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4975 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4976 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4977 // OpenMP, 2.17.7, atomic Construct 4978 // If the write, update, or capture clause is specified and the release, 4979 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4980 // the atomic operation is also a release flush. 4981 switch (AO) { 4982 case llvm::AtomicOrdering::Release: 4983 case llvm::AtomicOrdering::AcquireRelease: 4984 case llvm::AtomicOrdering::SequentiallyConsistent: 4985 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4986 llvm::AtomicOrdering::Release); 4987 break; 4988 case llvm::AtomicOrdering::Acquire: 4989 case llvm::AtomicOrdering::Monotonic: 4990 break; 4991 case llvm::AtomicOrdering::NotAtomic: 4992 case llvm::AtomicOrdering::Unordered: 4993 llvm_unreachable("Unexpected ordering."); 4994 } 4995 } 4996 4997 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 4998 QualType SourceType, QualType ResType, 4999 SourceLocation Loc) { 5000 switch (CGF.getEvaluationKind(ResType)) { 5001 case TEK_Scalar: 5002 return RValue::get( 5003 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5004 case TEK_Complex: { 5005 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5006 return RValue::getComplex(Res.first, Res.second); 5007 } 5008 case TEK_Aggregate: 5009 break; 5010 } 5011 llvm_unreachable("Must be a scalar or complex."); 5012 } 5013 5014 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5015 llvm::AtomicOrdering AO, 5016 bool IsPostfixUpdate, const Expr *V, 5017 const Expr *X, const Expr *E, 5018 const Expr *UE, bool IsXLHSInRHSPart, 5019 SourceLocation Loc) { 5020 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5021 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5022 RValue NewVVal; 5023 LValue VLValue = CGF.EmitLValue(V); 5024 LValue XLValue = CGF.EmitLValue(X); 5025 RValue ExprRValue = CGF.EmitAnyExpr(E); 5026 QualType NewVValType; 5027 if (UE) { 5028 // 'x' is updated with some additional value. 5029 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5030 "Update expr in 'atomic capture' must be a binary operator."); 5031 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5032 // Update expressions are allowed to have the following forms: 5033 // x binop= expr; -> xrval + expr; 5034 // x++, ++x -> xrval + 1; 5035 // x--, --x -> xrval - 1; 5036 // x = x binop expr; -> xrval binop expr 5037 // x = expr Op x; - > expr binop xrval; 5038 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5039 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5040 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5041 NewVValType = XRValExpr->getType(); 5042 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5043 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5044 IsPostfixUpdate](RValue XRValue) { 5045 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5046 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5047 RValue Res = CGF.EmitAnyExpr(UE); 5048 NewVVal = IsPostfixUpdate ? XRValue : Res; 5049 return Res; 5050 }; 5051 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5052 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5053 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5054 if (Res.first) { 5055 // 'atomicrmw' instruction was generated. 5056 if (IsPostfixUpdate) { 5057 // Use old value from 'atomicrmw'. 5058 NewVVal = Res.second; 5059 } else { 5060 // 'atomicrmw' does not provide new value, so evaluate it using old 5061 // value of 'x'. 5062 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5063 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5064 NewVVal = CGF.EmitAnyExpr(UE); 5065 } 5066 } 5067 } else { 5068 // 'x' is simply rewritten with some 'expr'. 5069 NewVValType = X->getType().getNonReferenceType(); 5070 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5071 X->getType().getNonReferenceType(), Loc); 5072 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5073 NewVVal = XRValue; 5074 return ExprRValue; 5075 }; 5076 // Try to perform atomicrmw xchg, otherwise simple exchange. 5077 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5078 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5079 Loc, Gen); 5080 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5081 if (Res.first) { 5082 // 'atomicrmw' instruction was generated. 5083 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5084 } 5085 } 5086 // Emit post-update store to 'v' of old/new 'x' value. 5087 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5088 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5089 // OpenMP, 2.17.7, atomic Construct 5090 // If the write, update, or capture clause is specified and the release, 5091 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5092 // the atomic operation is also a release flush. 5093 // If the read or capture clause is specified and the acquire, acq_rel, or 5094 // seq_cst clause is specified then the strong flush on exit from the atomic 5095 // operation is also an acquire flush. 5096 switch (AO) { 5097 case llvm::AtomicOrdering::Release: 5098 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5099 llvm::AtomicOrdering::Release); 5100 break; 5101 case llvm::AtomicOrdering::Acquire: 5102 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5103 llvm::AtomicOrdering::Acquire); 5104 break; 5105 case llvm::AtomicOrdering::AcquireRelease: 5106 case llvm::AtomicOrdering::SequentiallyConsistent: 5107 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5108 llvm::AtomicOrdering::AcquireRelease); 5109 break; 5110 case llvm::AtomicOrdering::Monotonic: 5111 break; 5112 case llvm::AtomicOrdering::NotAtomic: 5113 case llvm::AtomicOrdering::Unordered: 5114 llvm_unreachable("Unexpected ordering."); 5115 } 5116 } 5117 5118 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5119 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5120 const Expr *X, const Expr *V, const Expr *E, 5121 const Expr *UE, bool IsXLHSInRHSPart, 5122 SourceLocation Loc) { 5123 switch (Kind) { 5124 case OMPC_read: 5125 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5126 break; 5127 case OMPC_write: 5128 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5129 break; 5130 case OMPC_unknown: 5131 case OMPC_update: 5132 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5133 break; 5134 case OMPC_capture: 5135 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5136 IsXLHSInRHSPart, Loc); 5137 break; 5138 case OMPC_if: 5139 case OMPC_final: 5140 case OMPC_num_threads: 5141 case OMPC_private: 5142 case OMPC_firstprivate: 5143 case OMPC_lastprivate: 5144 case OMPC_reduction: 5145 case OMPC_task_reduction: 5146 case OMPC_in_reduction: 5147 case OMPC_safelen: 5148 case OMPC_simdlen: 5149 case OMPC_allocator: 5150 case OMPC_allocate: 5151 case OMPC_collapse: 5152 case OMPC_default: 5153 case OMPC_seq_cst: 5154 case OMPC_acq_rel: 5155 case OMPC_acquire: 5156 case OMPC_release: 5157 case OMPC_relaxed: 5158 case OMPC_shared: 5159 case OMPC_linear: 5160 case OMPC_aligned: 5161 case OMPC_copyin: 5162 case OMPC_copyprivate: 5163 case OMPC_flush: 5164 case OMPC_depobj: 5165 case OMPC_proc_bind: 5166 case OMPC_schedule: 5167 case OMPC_ordered: 5168 case OMPC_nowait: 5169 case OMPC_untied: 5170 case OMPC_threadprivate: 5171 case OMPC_depend: 5172 case OMPC_mergeable: 5173 case OMPC_device: 5174 case OMPC_threads: 5175 case OMPC_simd: 5176 case OMPC_map: 5177 case OMPC_num_teams: 5178 case OMPC_thread_limit: 5179 case OMPC_priority: 5180 case OMPC_grainsize: 5181 case OMPC_nogroup: 5182 case OMPC_num_tasks: 5183 case OMPC_hint: 5184 case OMPC_dist_schedule: 5185 case OMPC_defaultmap: 5186 case OMPC_uniform: 5187 case OMPC_to: 5188 case OMPC_from: 5189 case OMPC_use_device_ptr: 5190 case OMPC_use_device_addr: 5191 case OMPC_is_device_ptr: 5192 case OMPC_unified_address: 5193 case OMPC_unified_shared_memory: 5194 case OMPC_reverse_offload: 5195 case OMPC_dynamic_allocators: 5196 case OMPC_atomic_default_mem_order: 5197 case OMPC_device_type: 5198 case OMPC_match: 5199 case OMPC_nontemporal: 5200 case OMPC_order: 5201 case OMPC_destroy: 5202 case OMPC_detach: 5203 case OMPC_inclusive: 5204 case OMPC_exclusive: 5205 case OMPC_uses_allocators: 5206 case OMPC_affinity: 5207 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5208 } 5209 } 5210 5211 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5212 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5213 bool MemOrderingSpecified = false; 5214 if (S.getSingleClause<OMPSeqCstClause>()) { 5215 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5216 MemOrderingSpecified = true; 5217 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5218 AO = llvm::AtomicOrdering::AcquireRelease; 5219 MemOrderingSpecified = true; 5220 } else if (S.getSingleClause<OMPAcquireClause>()) { 5221 AO = llvm::AtomicOrdering::Acquire; 5222 MemOrderingSpecified = true; 5223 } else if (S.getSingleClause<OMPReleaseClause>()) { 5224 AO = llvm::AtomicOrdering::Release; 5225 MemOrderingSpecified = true; 5226 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5227 AO = llvm::AtomicOrdering::Monotonic; 5228 MemOrderingSpecified = true; 5229 } 5230 OpenMPClauseKind Kind = OMPC_unknown; 5231 for (const OMPClause *C : S.clauses()) { 5232 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5233 // if it is first). 5234 if (C->getClauseKind() != OMPC_seq_cst && 5235 C->getClauseKind() != OMPC_acq_rel && 5236 C->getClauseKind() != OMPC_acquire && 5237 C->getClauseKind() != OMPC_release && 5238 C->getClauseKind() != OMPC_relaxed) { 5239 Kind = C->getClauseKind(); 5240 break; 5241 } 5242 } 5243 if (!MemOrderingSpecified) { 5244 llvm::AtomicOrdering DefaultOrder = 5245 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5246 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5247 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5248 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5249 Kind == OMPC_capture)) { 5250 AO = DefaultOrder; 5251 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5252 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5253 AO = llvm::AtomicOrdering::Release; 5254 } else if (Kind == OMPC_read) { 5255 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5256 AO = llvm::AtomicOrdering::Acquire; 5257 } 5258 } 5259 } 5260 5261 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 5262 5263 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 5264 PrePostActionTy &) { 5265 CGF.EmitStopPoint(CS); 5266 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5267 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5268 S.getBeginLoc()); 5269 }; 5270 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5271 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 5272 } 5273 5274 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5275 const OMPExecutableDirective &S, 5276 const RegionCodeGenTy &CodeGen) { 5277 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5278 CodeGenModule &CGM = CGF.CGM; 5279 5280 // On device emit this construct as inlined code. 5281 if (CGM.getLangOpts().OpenMPIsDevice) { 5282 OMPLexicalScope Scope(CGF, S, OMPD_target); 5283 CGM.getOpenMPRuntime().emitInlinedDirective( 5284 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5285 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5286 }); 5287 return; 5288 } 5289 5290 auto LPCRegion = 5291 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5292 llvm::Function *Fn = nullptr; 5293 llvm::Constant *FnID = nullptr; 5294 5295 const Expr *IfCond = nullptr; 5296 // Check for the at most one if clause associated with the target region. 5297 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5298 if (C->getNameModifier() == OMPD_unknown || 5299 C->getNameModifier() == OMPD_target) { 5300 IfCond = C->getCondition(); 5301 break; 5302 } 5303 } 5304 5305 // Check if we have any device clause associated with the directive. 5306 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5307 nullptr, OMPC_DEVICE_unknown); 5308 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5309 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5310 5311 // Check if we have an if clause whose conditional always evaluates to false 5312 // or if we do not have any targets specified. If so the target region is not 5313 // an offload entry point. 5314 bool IsOffloadEntry = true; 5315 if (IfCond) { 5316 bool Val; 5317 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5318 IsOffloadEntry = false; 5319 } 5320 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5321 IsOffloadEntry = false; 5322 5323 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5324 StringRef ParentName; 5325 // In case we have Ctors/Dtors we use the complete type variant to produce 5326 // the mangling of the device outlined kernel. 5327 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5328 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5329 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5330 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5331 else 5332 ParentName = 5333 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5334 5335 // Emit target region as a standalone region. 5336 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5337 IsOffloadEntry, CodeGen); 5338 OMPLexicalScope Scope(CGF, S, OMPD_task); 5339 auto &&SizeEmitter = 5340 [IsOffloadEntry](CodeGenFunction &CGF, 5341 const OMPLoopDirective &D) -> llvm::Value * { 5342 if (IsOffloadEntry) { 5343 OMPLoopScope(CGF, D); 5344 // Emit calculation of the iterations count. 5345 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5346 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5347 /*isSigned=*/false); 5348 return NumIterations; 5349 } 5350 return nullptr; 5351 }; 5352 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5353 SizeEmitter); 5354 } 5355 5356 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5357 PrePostActionTy &Action) { 5358 Action.Enter(CGF); 5359 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5360 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5361 CGF.EmitOMPPrivateClause(S, PrivateScope); 5362 (void)PrivateScope.Privatize(); 5363 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5364 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5365 5366 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5367 } 5368 5369 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5370 StringRef ParentName, 5371 const OMPTargetDirective &S) { 5372 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5373 emitTargetRegion(CGF, S, Action); 5374 }; 5375 llvm::Function *Fn; 5376 llvm::Constant *Addr; 5377 // Emit target region as a standalone region. 5378 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5379 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5380 assert(Fn && Addr && "Target device function emission failed."); 5381 } 5382 5383 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5384 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5385 emitTargetRegion(CGF, S, Action); 5386 }; 5387 emitCommonOMPTargetDirective(*this, S, CodeGen); 5388 } 5389 5390 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5391 const OMPExecutableDirective &S, 5392 OpenMPDirectiveKind InnermostKind, 5393 const RegionCodeGenTy &CodeGen) { 5394 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5395 llvm::Function *OutlinedFn = 5396 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5397 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5398 5399 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5400 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5401 if (NT || TL) { 5402 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5403 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5404 5405 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5406 S.getBeginLoc()); 5407 } 5408 5409 OMPTeamsScope Scope(CGF, S); 5410 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5411 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5412 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5413 CapturedVars); 5414 } 5415 5416 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5417 // Emit teams region as a standalone region. 5418 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5419 Action.Enter(CGF); 5420 OMPPrivateScope PrivateScope(CGF); 5421 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5422 CGF.EmitOMPPrivateClause(S, PrivateScope); 5423 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5424 (void)PrivateScope.Privatize(); 5425 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5426 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5427 }; 5428 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5429 emitPostUpdateForReductionClause(*this, S, 5430 [](CodeGenFunction &) { return nullptr; }); 5431 } 5432 5433 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5434 const OMPTargetTeamsDirective &S) { 5435 auto *CS = S.getCapturedStmt(OMPD_teams); 5436 Action.Enter(CGF); 5437 // Emit teams region as a standalone region. 5438 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5439 Action.Enter(CGF); 5440 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5441 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5442 CGF.EmitOMPPrivateClause(S, PrivateScope); 5443 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5444 (void)PrivateScope.Privatize(); 5445 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5446 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5447 CGF.EmitStmt(CS->getCapturedStmt()); 5448 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5449 }; 5450 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5451 emitPostUpdateForReductionClause(CGF, S, 5452 [](CodeGenFunction &) { return nullptr; }); 5453 } 5454 5455 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5456 CodeGenModule &CGM, StringRef ParentName, 5457 const OMPTargetTeamsDirective &S) { 5458 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5459 emitTargetTeamsRegion(CGF, Action, S); 5460 }; 5461 llvm::Function *Fn; 5462 llvm::Constant *Addr; 5463 // Emit target region as a standalone region. 5464 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5465 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5466 assert(Fn && Addr && "Target device function emission failed."); 5467 } 5468 5469 void CodeGenFunction::EmitOMPTargetTeamsDirective( 5470 const OMPTargetTeamsDirective &S) { 5471 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5472 emitTargetTeamsRegion(CGF, Action, S); 5473 }; 5474 emitCommonOMPTargetDirective(*this, S, CodeGen); 5475 } 5476 5477 static void 5478 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5479 const OMPTargetTeamsDistributeDirective &S) { 5480 Action.Enter(CGF); 5481 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5482 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5483 }; 5484 5485 // Emit teams region as a standalone region. 5486 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5487 PrePostActionTy &Action) { 5488 Action.Enter(CGF); 5489 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5490 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5491 (void)PrivateScope.Privatize(); 5492 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5493 CodeGenDistribute); 5494 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5495 }; 5496 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 5497 emitPostUpdateForReductionClause(CGF, S, 5498 [](CodeGenFunction &) { return nullptr; }); 5499 } 5500 5501 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 5502 CodeGenModule &CGM, StringRef ParentName, 5503 const OMPTargetTeamsDistributeDirective &S) { 5504 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5505 emitTargetTeamsDistributeRegion(CGF, Action, S); 5506 }; 5507 llvm::Function *Fn; 5508 llvm::Constant *Addr; 5509 // Emit target region as a standalone region. 5510 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5511 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5512 assert(Fn && Addr && "Target device function emission failed."); 5513 } 5514 5515 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 5516 const OMPTargetTeamsDistributeDirective &S) { 5517 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5518 emitTargetTeamsDistributeRegion(CGF, Action, S); 5519 }; 5520 emitCommonOMPTargetDirective(*this, S, CodeGen); 5521 } 5522 5523 static void emitTargetTeamsDistributeSimdRegion( 5524 CodeGenFunction &CGF, PrePostActionTy &Action, 5525 const OMPTargetTeamsDistributeSimdDirective &S) { 5526 Action.Enter(CGF); 5527 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5528 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5529 }; 5530 5531 // Emit teams region as a standalone region. 5532 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5533 PrePostActionTy &Action) { 5534 Action.Enter(CGF); 5535 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5536 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5537 (void)PrivateScope.Privatize(); 5538 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5539 CodeGenDistribute); 5540 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5541 }; 5542 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 5543 emitPostUpdateForReductionClause(CGF, S, 5544 [](CodeGenFunction &) { return nullptr; }); 5545 } 5546 5547 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 5548 CodeGenModule &CGM, StringRef ParentName, 5549 const OMPTargetTeamsDistributeSimdDirective &S) { 5550 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5551 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5552 }; 5553 llvm::Function *Fn; 5554 llvm::Constant *Addr; 5555 // Emit target region as a standalone region. 5556 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5557 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5558 assert(Fn && Addr && "Target device function emission failed."); 5559 } 5560 5561 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 5562 const OMPTargetTeamsDistributeSimdDirective &S) { 5563 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5564 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5565 }; 5566 emitCommonOMPTargetDirective(*this, S, CodeGen); 5567 } 5568 5569 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 5570 const OMPTeamsDistributeDirective &S) { 5571 5572 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5573 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5574 }; 5575 5576 // Emit teams region as a standalone region. 5577 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5578 PrePostActionTy &Action) { 5579 Action.Enter(CGF); 5580 OMPPrivateScope PrivateScope(CGF); 5581 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5582 (void)PrivateScope.Privatize(); 5583 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5584 CodeGenDistribute); 5585 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5586 }; 5587 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5588 emitPostUpdateForReductionClause(*this, S, 5589 [](CodeGenFunction &) { return nullptr; }); 5590 } 5591 5592 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5593 const OMPTeamsDistributeSimdDirective &S) { 5594 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5595 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5596 }; 5597 5598 // Emit teams region as a standalone region. 5599 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5600 PrePostActionTy &Action) { 5601 Action.Enter(CGF); 5602 OMPPrivateScope PrivateScope(CGF); 5603 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5604 (void)PrivateScope.Privatize(); 5605 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5606 CodeGenDistribute); 5607 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5608 }; 5609 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5610 emitPostUpdateForReductionClause(*this, S, 5611 [](CodeGenFunction &) { return nullptr; }); 5612 } 5613 5614 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5615 const OMPTeamsDistributeParallelForDirective &S) { 5616 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5617 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5618 S.getDistInc()); 5619 }; 5620 5621 // Emit teams region as a standalone region. 5622 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5623 PrePostActionTy &Action) { 5624 Action.Enter(CGF); 5625 OMPPrivateScope PrivateScope(CGF); 5626 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5627 (void)PrivateScope.Privatize(); 5628 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5629 CodeGenDistribute); 5630 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5631 }; 5632 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5633 emitPostUpdateForReductionClause(*this, S, 5634 [](CodeGenFunction &) { return nullptr; }); 5635 } 5636 5637 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5638 const OMPTeamsDistributeParallelForSimdDirective &S) { 5639 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5640 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5641 S.getDistInc()); 5642 }; 5643 5644 // Emit teams region as a standalone region. 5645 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5646 PrePostActionTy &Action) { 5647 Action.Enter(CGF); 5648 OMPPrivateScope PrivateScope(CGF); 5649 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5650 (void)PrivateScope.Privatize(); 5651 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5652 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5653 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5654 }; 5655 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5656 CodeGen); 5657 emitPostUpdateForReductionClause(*this, S, 5658 [](CodeGenFunction &) { return nullptr; }); 5659 } 5660 5661 static void emitTargetTeamsDistributeParallelForRegion( 5662 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5663 PrePostActionTy &Action) { 5664 Action.Enter(CGF); 5665 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5666 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5667 S.getDistInc()); 5668 }; 5669 5670 // Emit teams region as a standalone region. 5671 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5672 PrePostActionTy &Action) { 5673 Action.Enter(CGF); 5674 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5675 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5676 (void)PrivateScope.Privatize(); 5677 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5678 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5679 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5680 }; 5681 5682 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5683 CodeGenTeams); 5684 emitPostUpdateForReductionClause(CGF, S, 5685 [](CodeGenFunction &) { return nullptr; }); 5686 } 5687 5688 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5689 CodeGenModule &CGM, StringRef ParentName, 5690 const OMPTargetTeamsDistributeParallelForDirective &S) { 5691 // Emit SPMD target teams distribute parallel for region as a standalone 5692 // region. 5693 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5694 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5695 }; 5696 llvm::Function *Fn; 5697 llvm::Constant *Addr; 5698 // Emit target region as a standalone region. 5699 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5700 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5701 assert(Fn && Addr && "Target device function emission failed."); 5702 } 5703 5704 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5705 const OMPTargetTeamsDistributeParallelForDirective &S) { 5706 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5707 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5708 }; 5709 emitCommonOMPTargetDirective(*this, S, CodeGen); 5710 } 5711 5712 static void emitTargetTeamsDistributeParallelForSimdRegion( 5713 CodeGenFunction &CGF, 5714 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5715 PrePostActionTy &Action) { 5716 Action.Enter(CGF); 5717 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5718 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5719 S.getDistInc()); 5720 }; 5721 5722 // Emit teams region as a standalone region. 5723 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5724 PrePostActionTy &Action) { 5725 Action.Enter(CGF); 5726 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5727 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5728 (void)PrivateScope.Privatize(); 5729 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5730 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5731 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5732 }; 5733 5734 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5735 CodeGenTeams); 5736 emitPostUpdateForReductionClause(CGF, S, 5737 [](CodeGenFunction &) { return nullptr; }); 5738 } 5739 5740 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5741 CodeGenModule &CGM, StringRef ParentName, 5742 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5743 // Emit SPMD target teams distribute parallel for simd region as a standalone 5744 // region. 5745 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5746 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5747 }; 5748 llvm::Function *Fn; 5749 llvm::Constant *Addr; 5750 // Emit target region as a standalone region. 5751 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5752 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5753 assert(Fn && Addr && "Target device function emission failed."); 5754 } 5755 5756 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5757 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5758 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5759 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5760 }; 5761 emitCommonOMPTargetDirective(*this, S, CodeGen); 5762 } 5763 5764 void CodeGenFunction::EmitOMPCancellationPointDirective( 5765 const OMPCancellationPointDirective &S) { 5766 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5767 S.getCancelRegion()); 5768 } 5769 5770 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5771 const Expr *IfCond = nullptr; 5772 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5773 if (C->getNameModifier() == OMPD_unknown || 5774 C->getNameModifier() == OMPD_cancel) { 5775 IfCond = C->getCondition(); 5776 break; 5777 } 5778 } 5779 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5780 // TODO: This check is necessary as we only generate `omp parallel` through 5781 // the OpenMPIRBuilder for now. 5782 if (S.getCancelRegion() == OMPD_parallel) { 5783 llvm::Value *IfCondition = nullptr; 5784 if (IfCond) 5785 IfCondition = EmitScalarExpr(IfCond, 5786 /*IgnoreResultAssign=*/true); 5787 return Builder.restoreIP( 5788 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5789 } 5790 } 5791 5792 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5793 S.getCancelRegion()); 5794 } 5795 5796 CodeGenFunction::JumpDest 5797 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5798 if (Kind == OMPD_parallel || Kind == OMPD_task || 5799 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5800 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5801 return ReturnBlock; 5802 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5803 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5804 Kind == OMPD_distribute_parallel_for || 5805 Kind == OMPD_target_parallel_for || 5806 Kind == OMPD_teams_distribute_parallel_for || 5807 Kind == OMPD_target_teams_distribute_parallel_for); 5808 return OMPCancelStack.getExitBlock(); 5809 } 5810 5811 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5812 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 5813 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5814 auto OrigVarIt = C.varlist_begin(); 5815 auto InitIt = C.inits().begin(); 5816 for (const Expr *PvtVarIt : C.private_copies()) { 5817 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5818 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5819 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5820 5821 // In order to identify the right initializer we need to match the 5822 // declaration used by the mapping logic. In some cases we may get 5823 // OMPCapturedExprDecl that refers to the original declaration. 5824 const ValueDecl *MatchingVD = OrigVD; 5825 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5826 // OMPCapturedExprDecl are used to privative fields of the current 5827 // structure. 5828 const auto *ME = cast<MemberExpr>(OED->getInit()); 5829 assert(isa<CXXThisExpr>(ME->getBase()) && 5830 "Base should be the current struct!"); 5831 MatchingVD = ME->getMemberDecl(); 5832 } 5833 5834 // If we don't have information about the current list item, move on to 5835 // the next one. 5836 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5837 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5838 continue; 5839 5840 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5841 InitAddrIt, InitVD, 5842 PvtVD]() { 5843 // Initialize the temporary initialization variable with the address we 5844 // get from the runtime library. We have to cast the source address 5845 // because it is always a void *. References are materialized in the 5846 // privatization scope, so the initialization here disregards the fact 5847 // the original variable is a reference. 5848 QualType AddrQTy = 5849 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5850 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5851 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5852 setAddrOfLocalVar(InitVD, InitAddr); 5853 5854 // Emit private declaration, it will be initialized by the value we 5855 // declaration we just added to the local declarations map. 5856 EmitDecl(*PvtVD); 5857 5858 // The initialization variables reached its purpose in the emission 5859 // of the previous declaration, so we don't need it anymore. 5860 LocalDeclMap.erase(InitVD); 5861 5862 // Return the address of the private variable. 5863 return GetAddrOfLocalVar(PvtVD); 5864 }); 5865 assert(IsRegistered && "firstprivate var already registered as private"); 5866 // Silence the warning about unused variable. 5867 (void)IsRegistered; 5868 5869 ++OrigVarIt; 5870 ++InitIt; 5871 } 5872 } 5873 5874 static const VarDecl *getBaseDecl(const Expr *Ref) { 5875 const Expr *Base = Ref->IgnoreParenImpCasts(); 5876 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 5877 Base = OASE->getBase()->IgnoreParenImpCasts(); 5878 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 5879 Base = ASE->getBase()->IgnoreParenImpCasts(); 5880 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 5881 } 5882 5883 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 5884 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 5885 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5886 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 5887 for (const Expr *Ref : C.varlists()) { 5888 const VarDecl *OrigVD = getBaseDecl(Ref); 5889 if (!Processed.insert(OrigVD).second) 5890 continue; 5891 // In order to identify the right initializer we need to match the 5892 // declaration used by the mapping logic. In some cases we may get 5893 // OMPCapturedExprDecl that refers to the original declaration. 5894 const ValueDecl *MatchingVD = OrigVD; 5895 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5896 // OMPCapturedExprDecl are used to privative fields of the current 5897 // structure. 5898 const auto *ME = cast<MemberExpr>(OED->getInit()); 5899 assert(isa<CXXThisExpr>(ME->getBase()) && 5900 "Base should be the current struct!"); 5901 MatchingVD = ME->getMemberDecl(); 5902 } 5903 5904 // If we don't have information about the current list item, move on to 5905 // the next one. 5906 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5907 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5908 continue; 5909 5910 Address PrivAddr = InitAddrIt->getSecond(); 5911 // For declrefs and variable length array need to load the pointer for 5912 // correct mapping, since the pointer to the data was passed to the runtime. 5913 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 5914 MatchingVD->getType()->isArrayType()) 5915 PrivAddr = 5916 EmitLoadOfPointer(PrivAddr, getContext() 5917 .getPointerType(OrigVD->getType()) 5918 ->castAs<PointerType>()); 5919 llvm::Type *RealTy = 5920 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 5921 ->getPointerTo(); 5922 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 5923 5924 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 5925 } 5926 } 5927 5928 // Generate the instructions for '#pragma omp target data' directive. 5929 void CodeGenFunction::EmitOMPTargetDataDirective( 5930 const OMPTargetDataDirective &S) { 5931 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5932 5933 // Create a pre/post action to signal the privatization of the device pointer. 5934 // This action can be replaced by the OpenMP runtime code generation to 5935 // deactivate privatization. 5936 bool PrivatizeDevicePointers = false; 5937 class DevicePointerPrivActionTy : public PrePostActionTy { 5938 bool &PrivatizeDevicePointers; 5939 5940 public: 5941 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5942 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5943 void Enter(CodeGenFunction &CGF) override { 5944 PrivatizeDevicePointers = true; 5945 } 5946 }; 5947 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5948 5949 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5950 CodeGenFunction &CGF, PrePostActionTy &Action) { 5951 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5952 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5953 }; 5954 5955 // Codegen that selects whether to generate the privatization code or not. 5956 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5957 &InnermostCodeGen](CodeGenFunction &CGF, 5958 PrePostActionTy &Action) { 5959 RegionCodeGenTy RCG(InnermostCodeGen); 5960 PrivatizeDevicePointers = false; 5961 5962 // Call the pre-action to change the status of PrivatizeDevicePointers if 5963 // needed. 5964 Action.Enter(CGF); 5965 5966 if (PrivatizeDevicePointers) { 5967 OMPPrivateScope PrivateScope(CGF); 5968 // Emit all instances of the use_device_ptr clause. 5969 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5970 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5971 Info.CaptureDeviceAddrMap); 5972 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 5973 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 5974 Info.CaptureDeviceAddrMap); 5975 (void)PrivateScope.Privatize(); 5976 RCG(CGF); 5977 } else { 5978 RCG(CGF); 5979 } 5980 }; 5981 5982 // Forward the provided action to the privatization codegen. 5983 RegionCodeGenTy PrivRCG(PrivCodeGen); 5984 PrivRCG.setAction(Action); 5985 5986 // Notwithstanding the body of the region is emitted as inlined directive, 5987 // we don't use an inline scope as changes in the references inside the 5988 // region are expected to be visible outside, so we do not privative them. 5989 OMPLexicalScope Scope(CGF, S); 5990 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5991 PrivRCG); 5992 }; 5993 5994 RegionCodeGenTy RCG(CodeGen); 5995 5996 // If we don't have target devices, don't bother emitting the data mapping 5997 // code. 5998 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 5999 RCG(*this); 6000 return; 6001 } 6002 6003 // Check if we have any if clause associated with the directive. 6004 const Expr *IfCond = nullptr; 6005 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6006 IfCond = C->getCondition(); 6007 6008 // Check if we have any device clause associated with the directive. 6009 const Expr *Device = nullptr; 6010 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6011 Device = C->getDevice(); 6012 6013 // Set the action to signal privatization of device pointers. 6014 RCG.setAction(PrivAction); 6015 6016 // Emit region code. 6017 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6018 Info); 6019 } 6020 6021 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6022 const OMPTargetEnterDataDirective &S) { 6023 // If we don't have target devices, don't bother emitting the data mapping 6024 // code. 6025 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6026 return; 6027 6028 // Check if we have any if clause associated with the directive. 6029 const Expr *IfCond = nullptr; 6030 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6031 IfCond = C->getCondition(); 6032 6033 // Check if we have any device clause associated with the directive. 6034 const Expr *Device = nullptr; 6035 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6036 Device = C->getDevice(); 6037 6038 OMPLexicalScope Scope(*this, S, OMPD_task); 6039 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6040 } 6041 6042 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6043 const OMPTargetExitDataDirective &S) { 6044 // If we don't have target devices, don't bother emitting the data mapping 6045 // code. 6046 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6047 return; 6048 6049 // Check if we have any if clause associated with the directive. 6050 const Expr *IfCond = nullptr; 6051 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6052 IfCond = C->getCondition(); 6053 6054 // Check if we have any device clause associated with the directive. 6055 const Expr *Device = nullptr; 6056 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6057 Device = C->getDevice(); 6058 6059 OMPLexicalScope Scope(*this, S, OMPD_task); 6060 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6061 } 6062 6063 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6064 const OMPTargetParallelDirective &S, 6065 PrePostActionTy &Action) { 6066 // Get the captured statement associated with the 'parallel' region. 6067 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6068 Action.Enter(CGF); 6069 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6070 Action.Enter(CGF); 6071 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6072 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6073 CGF.EmitOMPPrivateClause(S, PrivateScope); 6074 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6075 (void)PrivateScope.Privatize(); 6076 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6077 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6078 // TODO: Add support for clauses. 6079 CGF.EmitStmt(CS->getCapturedStmt()); 6080 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6081 }; 6082 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6083 emitEmptyBoundParameters); 6084 emitPostUpdateForReductionClause(CGF, S, 6085 [](CodeGenFunction &) { return nullptr; }); 6086 } 6087 6088 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6089 CodeGenModule &CGM, StringRef ParentName, 6090 const OMPTargetParallelDirective &S) { 6091 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6092 emitTargetParallelRegion(CGF, S, Action); 6093 }; 6094 llvm::Function *Fn; 6095 llvm::Constant *Addr; 6096 // Emit target region as a standalone region. 6097 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6098 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6099 assert(Fn && Addr && "Target device function emission failed."); 6100 } 6101 6102 void CodeGenFunction::EmitOMPTargetParallelDirective( 6103 const OMPTargetParallelDirective &S) { 6104 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6105 emitTargetParallelRegion(CGF, S, Action); 6106 }; 6107 emitCommonOMPTargetDirective(*this, S, CodeGen); 6108 } 6109 6110 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6111 const OMPTargetParallelForDirective &S, 6112 PrePostActionTy &Action) { 6113 Action.Enter(CGF); 6114 // Emit directive as a combined directive that consists of two implicit 6115 // directives: 'parallel' with 'for' directive. 6116 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6117 Action.Enter(CGF); 6118 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6119 CGF, OMPD_target_parallel_for, S.hasCancel()); 6120 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6121 emitDispatchForLoopBounds); 6122 }; 6123 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6124 emitEmptyBoundParameters); 6125 } 6126 6127 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6128 CodeGenModule &CGM, StringRef ParentName, 6129 const OMPTargetParallelForDirective &S) { 6130 // Emit SPMD target parallel for region as a standalone region. 6131 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6132 emitTargetParallelForRegion(CGF, S, Action); 6133 }; 6134 llvm::Function *Fn; 6135 llvm::Constant *Addr; 6136 // Emit target region as a standalone region. 6137 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6138 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6139 assert(Fn && Addr && "Target device function emission failed."); 6140 } 6141 6142 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6143 const OMPTargetParallelForDirective &S) { 6144 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6145 emitTargetParallelForRegion(CGF, S, Action); 6146 }; 6147 emitCommonOMPTargetDirective(*this, S, CodeGen); 6148 } 6149 6150 static void 6151 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6152 const OMPTargetParallelForSimdDirective &S, 6153 PrePostActionTy &Action) { 6154 Action.Enter(CGF); 6155 // Emit directive as a combined directive that consists of two implicit 6156 // directives: 'parallel' with 'for' directive. 6157 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6158 Action.Enter(CGF); 6159 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6160 emitDispatchForLoopBounds); 6161 }; 6162 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6163 emitEmptyBoundParameters); 6164 } 6165 6166 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6167 CodeGenModule &CGM, StringRef ParentName, 6168 const OMPTargetParallelForSimdDirective &S) { 6169 // Emit SPMD target parallel for region as a standalone region. 6170 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6171 emitTargetParallelForSimdRegion(CGF, S, Action); 6172 }; 6173 llvm::Function *Fn; 6174 llvm::Constant *Addr; 6175 // Emit target region as a standalone region. 6176 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6177 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6178 assert(Fn && Addr && "Target device function emission failed."); 6179 } 6180 6181 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6182 const OMPTargetParallelForSimdDirective &S) { 6183 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6184 emitTargetParallelForSimdRegion(CGF, S, Action); 6185 }; 6186 emitCommonOMPTargetDirective(*this, S, CodeGen); 6187 } 6188 6189 /// Emit a helper variable and return corresponding lvalue. 6190 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6191 const ImplicitParamDecl *PVD, 6192 CodeGenFunction::OMPPrivateScope &Privates) { 6193 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6194 Privates.addPrivate(VDecl, 6195 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6196 } 6197 6198 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6199 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6200 // Emit outlined function for task construct. 6201 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6202 Address CapturedStruct = Address::invalid(); 6203 { 6204 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6205 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6206 } 6207 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6208 const Expr *IfCond = nullptr; 6209 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6210 if (C->getNameModifier() == OMPD_unknown || 6211 C->getNameModifier() == OMPD_taskloop) { 6212 IfCond = C->getCondition(); 6213 break; 6214 } 6215 } 6216 6217 OMPTaskDataTy Data; 6218 // Check if taskloop must be emitted without taskgroup. 6219 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6220 // TODO: Check if we should emit tied or untied task. 6221 Data.Tied = true; 6222 // Set scheduling for taskloop 6223 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6224 // grainsize clause 6225 Data.Schedule.setInt(/*IntVal=*/false); 6226 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6227 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6228 // num_tasks clause 6229 Data.Schedule.setInt(/*IntVal=*/true); 6230 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6231 } 6232 6233 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6234 // if (PreCond) { 6235 // for (IV in 0..LastIteration) BODY; 6236 // <Final counter/linear vars updates>; 6237 // } 6238 // 6239 6240 // Emit: if (PreCond) - begin. 6241 // If the condition constant folds and can be elided, avoid emitting the 6242 // whole loop. 6243 bool CondConstant; 6244 llvm::BasicBlock *ContBlock = nullptr; 6245 OMPLoopScope PreInitScope(CGF, S); 6246 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6247 if (!CondConstant) 6248 return; 6249 } else { 6250 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6251 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6252 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6253 CGF.getProfileCount(&S)); 6254 CGF.EmitBlock(ThenBlock); 6255 CGF.incrementProfileCounter(&S); 6256 } 6257 6258 (void)CGF.EmitOMPLinearClauseInit(S); 6259 6260 OMPPrivateScope LoopScope(CGF); 6261 // Emit helper vars inits. 6262 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6263 auto *I = CS->getCapturedDecl()->param_begin(); 6264 auto *LBP = std::next(I, LowerBound); 6265 auto *UBP = std::next(I, UpperBound); 6266 auto *STP = std::next(I, Stride); 6267 auto *LIP = std::next(I, LastIter); 6268 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6269 LoopScope); 6270 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6271 LoopScope); 6272 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6273 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6274 LoopScope); 6275 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6276 CGF.EmitOMPLinearClause(S, LoopScope); 6277 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6278 (void)LoopScope.Privatize(); 6279 // Emit the loop iteration variable. 6280 const Expr *IVExpr = S.getIterationVariable(); 6281 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6282 CGF.EmitVarDecl(*IVDecl); 6283 CGF.EmitIgnoredExpr(S.getInit()); 6284 6285 // Emit the iterations count variable. 6286 // If it is not a variable, Sema decided to calculate iterations count on 6287 // each iteration (e.g., it is foldable into a constant). 6288 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6289 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6290 // Emit calculation of the iterations count. 6291 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6292 } 6293 6294 { 6295 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6296 emitCommonSimdLoop( 6297 CGF, S, 6298 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6299 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6300 CGF.EmitOMPSimdInit(S); 6301 }, 6302 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6303 CGF.EmitOMPInnerLoop( 6304 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6305 [&S](CodeGenFunction &CGF) { 6306 emitOMPLoopBodyWithStopPoint(CGF, S, 6307 CodeGenFunction::JumpDest()); 6308 }, 6309 [](CodeGenFunction &) {}); 6310 }); 6311 } 6312 // Emit: if (PreCond) - end. 6313 if (ContBlock) { 6314 CGF.EmitBranch(ContBlock); 6315 CGF.EmitBlock(ContBlock, true); 6316 } 6317 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6318 if (HasLastprivateClause) { 6319 CGF.EmitOMPLastprivateClauseFinal( 6320 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6321 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6322 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6323 (*LIP)->getType(), S.getBeginLoc()))); 6324 } 6325 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6326 return CGF.Builder.CreateIsNotNull( 6327 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6328 (*LIP)->getType(), S.getBeginLoc())); 6329 }); 6330 }; 6331 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6332 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6333 const OMPTaskDataTy &Data) { 6334 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6335 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6336 OMPLoopScope PreInitScope(CGF, S); 6337 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6338 OutlinedFn, SharedsTy, 6339 CapturedStruct, IfCond, Data); 6340 }; 6341 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6342 CodeGen); 6343 }; 6344 if (Data.Nogroup) { 6345 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6346 } else { 6347 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6348 *this, 6349 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6350 PrePostActionTy &Action) { 6351 Action.Enter(CGF); 6352 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6353 Data); 6354 }, 6355 S.getBeginLoc()); 6356 } 6357 } 6358 6359 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6360 auto LPCRegion = 6361 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6362 EmitOMPTaskLoopBasedDirective(S); 6363 } 6364 6365 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6366 const OMPTaskLoopSimdDirective &S) { 6367 auto LPCRegion = 6368 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6369 OMPLexicalScope Scope(*this, S); 6370 EmitOMPTaskLoopBasedDirective(S); 6371 } 6372 6373 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6374 const OMPMasterTaskLoopDirective &S) { 6375 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6376 Action.Enter(CGF); 6377 EmitOMPTaskLoopBasedDirective(S); 6378 }; 6379 auto LPCRegion = 6380 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6381 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6382 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6383 } 6384 6385 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6386 const OMPMasterTaskLoopSimdDirective &S) { 6387 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6388 Action.Enter(CGF); 6389 EmitOMPTaskLoopBasedDirective(S); 6390 }; 6391 auto LPCRegion = 6392 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6393 OMPLexicalScope Scope(*this, S); 6394 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6395 } 6396 6397 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6398 const OMPParallelMasterTaskLoopDirective &S) { 6399 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6400 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6401 PrePostActionTy &Action) { 6402 Action.Enter(CGF); 6403 CGF.EmitOMPTaskLoopBasedDirective(S); 6404 }; 6405 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6406 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6407 S.getBeginLoc()); 6408 }; 6409 auto LPCRegion = 6410 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6411 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6412 emitEmptyBoundParameters); 6413 } 6414 6415 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6416 const OMPParallelMasterTaskLoopSimdDirective &S) { 6417 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6418 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6419 PrePostActionTy &Action) { 6420 Action.Enter(CGF); 6421 CGF.EmitOMPTaskLoopBasedDirective(S); 6422 }; 6423 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6424 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6425 S.getBeginLoc()); 6426 }; 6427 auto LPCRegion = 6428 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6429 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6430 emitEmptyBoundParameters); 6431 } 6432 6433 // Generate the instructions for '#pragma omp target update' directive. 6434 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6435 const OMPTargetUpdateDirective &S) { 6436 // If we don't have target devices, don't bother emitting the data mapping 6437 // code. 6438 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6439 return; 6440 6441 // Check if we have any if clause associated with the directive. 6442 const Expr *IfCond = nullptr; 6443 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6444 IfCond = C->getCondition(); 6445 6446 // Check if we have any device clause associated with the directive. 6447 const Expr *Device = nullptr; 6448 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6449 Device = C->getDevice(); 6450 6451 OMPLexicalScope Scope(*this, S, OMPD_task); 6452 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6453 } 6454 6455 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6456 const OMPExecutableDirective &D) { 6457 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6458 EmitOMPScanDirective(*SD); 6459 return; 6460 } 6461 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6462 return; 6463 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6464 OMPPrivateScope GlobalsScope(CGF); 6465 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 6466 // Capture global firstprivates to avoid crash. 6467 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 6468 for (const Expr *Ref : C->varlists()) { 6469 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 6470 if (!DRE) 6471 continue; 6472 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 6473 if (!VD || VD->hasLocalStorage()) 6474 continue; 6475 if (!CGF.LocalDeclMap.count(VD)) { 6476 LValue GlobLVal = CGF.EmitLValue(Ref); 6477 GlobalsScope.addPrivate( 6478 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6479 } 6480 } 6481 } 6482 } 6483 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 6484 (void)GlobalsScope.Privatize(); 6485 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 6486 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 6487 } else { 6488 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 6489 for (const Expr *E : LD->counters()) { 6490 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 6491 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 6492 LValue GlobLVal = CGF.EmitLValue(E); 6493 GlobalsScope.addPrivate( 6494 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6495 } 6496 if (isa<OMPCapturedExprDecl>(VD)) { 6497 // Emit only those that were not explicitly referenced in clauses. 6498 if (!CGF.LocalDeclMap.count(VD)) 6499 CGF.EmitVarDecl(*VD); 6500 } 6501 } 6502 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 6503 if (!C->getNumForLoops()) 6504 continue; 6505 for (unsigned I = LD->getCollapsedNumber(), 6506 E = C->getLoopNumIterations().size(); 6507 I < E; ++I) { 6508 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 6509 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 6510 // Emit only those that were not explicitly referenced in clauses. 6511 if (!CGF.LocalDeclMap.count(VD)) 6512 CGF.EmitVarDecl(*VD); 6513 } 6514 } 6515 } 6516 } 6517 (void)GlobalsScope.Privatize(); 6518 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 6519 } 6520 }; 6521 { 6522 auto LPCRegion = 6523 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 6524 OMPSimdLexicalScope Scope(*this, D); 6525 CGM.getOpenMPRuntime().emitInlinedDirective( 6526 *this, 6527 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 6528 : D.getDirectiveKind(), 6529 CodeGen); 6530 } 6531 // Check for outer lastprivate conditional update. 6532 checkForLastprivateConditionalUpdate(*this, D); 6533 } 6534