1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/Support/AtomicOrdering.h" 30 using namespace clang; 31 using namespace CodeGen; 32 using namespace llvm::omp; 33 34 static const VarDecl *getBaseDecl(const Expr *Ref); 35 36 namespace { 37 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 38 /// for captured expressions. 39 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 40 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 41 for (const auto *C : S.clauses()) { 42 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 43 if (const auto *PreInit = 44 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 45 for (const auto *I : PreInit->decls()) { 46 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 47 CGF.EmitVarDecl(cast<VarDecl>(*I)); 48 } else { 49 CodeGenFunction::AutoVarEmission Emission = 50 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 51 CGF.EmitAutoVarCleanups(Emission); 52 } 53 } 54 } 55 } 56 } 57 } 58 CodeGenFunction::OMPPrivateScope InlinedShareds; 59 60 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 61 return CGF.LambdaCaptureFields.lookup(VD) || 62 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 63 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 64 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 65 } 66 67 public: 68 OMPLexicalScope( 69 CodeGenFunction &CGF, const OMPExecutableDirective &S, 70 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 71 const bool EmitPreInitStmt = true) 72 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 73 InlinedShareds(CGF) { 74 if (EmitPreInitStmt) 75 emitPreInitStmt(CGF, S); 76 if (!CapturedRegion.hasValue()) 77 return; 78 assert(S.hasAssociatedStmt() && 79 "Expected associated statement for inlined directive."); 80 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 81 for (const auto &C : CS->captures()) { 82 if (C.capturesVariable() || C.capturesVariableByCopy()) { 83 auto *VD = C.getCapturedVar(); 84 assert(VD == VD->getCanonicalDecl() && 85 "Canonical decl must be captured."); 86 DeclRefExpr DRE( 87 CGF.getContext(), const_cast<VarDecl *>(VD), 88 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 89 InlinedShareds.isGlobalVarCaptured(VD)), 90 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 91 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 92 return CGF.EmitLValue(&DRE).getAddress(CGF); 93 }); 94 } 95 } 96 (void)InlinedShareds.Privatize(); 97 } 98 }; 99 100 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 101 /// for captured expressions. 102 class OMPParallelScope final : public OMPLexicalScope { 103 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 104 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 105 return !(isOpenMPTargetExecutionDirective(Kind) || 106 isOpenMPLoopBoundSharingDirective(Kind)) && 107 isOpenMPParallelDirective(Kind); 108 } 109 110 public: 111 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 112 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 113 EmitPreInitStmt(S)) {} 114 }; 115 116 /// Lexical scope for OpenMP teams construct, that handles correct codegen 117 /// for captured expressions. 118 class OMPTeamsScope final : public OMPLexicalScope { 119 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 120 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 121 return !isOpenMPTargetExecutionDirective(Kind) && 122 isOpenMPTeamsDirective(Kind); 123 } 124 125 public: 126 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 127 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 128 EmitPreInitStmt(S)) {} 129 }; 130 131 /// Private scope for OpenMP loop-based directives, that supports capturing 132 /// of used expression from loop statement. 133 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 134 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 135 CodeGenFunction::OMPMapVars PreCondVars; 136 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 137 for (const auto *E : S.counters()) { 138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 139 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 140 (void)PreCondVars.setVarAddr( 141 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 142 } 143 // Mark private vars as undefs. 144 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 145 for (const Expr *IRef : C->varlists()) { 146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 147 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 148 (void)PreCondVars.setVarAddr( 149 CGF, OrigVD, 150 Address(llvm::UndefValue::get( 151 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 152 OrigVD->getType().getNonReferenceType()))), 153 CGF.getContext().getDeclAlign(OrigVD))); 154 } 155 } 156 } 157 (void)PreCondVars.apply(CGF); 158 // Emit init, __range and __end variables for C++ range loops. 159 const Stmt *Body = 160 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 161 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 162 Body = OMPLoopDirective::tryToFindNextInnerLoop( 163 Body, /*TryImperfectlyNestedLoops=*/true); 164 if (auto *For = dyn_cast<ForStmt>(Body)) { 165 Body = For->getBody(); 166 } else { 167 assert(isa<CXXForRangeStmt>(Body) && 168 "Expected canonical for loop or range-based for loop."); 169 auto *CXXFor = cast<CXXForRangeStmt>(Body); 170 if (const Stmt *Init = CXXFor->getInit()) 171 CGF.EmitStmt(Init); 172 CGF.EmitStmt(CXXFor->getRangeStmt()); 173 CGF.EmitStmt(CXXFor->getEndStmt()); 174 Body = CXXFor->getBody(); 175 } 176 } 177 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 178 for (const auto *I : PreInits->decls()) 179 CGF.EmitVarDecl(cast<VarDecl>(*I)); 180 } 181 PreCondVars.restore(CGF); 182 } 183 184 public: 185 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 186 : CodeGenFunction::RunCleanupsScope(CGF) { 187 emitPreInitStmt(CGF, S); 188 } 189 }; 190 191 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 192 CodeGenFunction::OMPPrivateScope InlinedShareds; 193 194 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 195 return CGF.LambdaCaptureFields.lookup(VD) || 196 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 197 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 198 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 199 } 200 201 public: 202 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 203 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 204 InlinedShareds(CGF) { 205 for (const auto *C : S.clauses()) { 206 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 207 if (const auto *PreInit = 208 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 209 for (const auto *I : PreInit->decls()) { 210 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 211 CGF.EmitVarDecl(cast<VarDecl>(*I)); 212 } else { 213 CodeGenFunction::AutoVarEmission Emission = 214 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 215 CGF.EmitAutoVarCleanups(Emission); 216 } 217 } 218 } 219 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 220 for (const Expr *E : UDP->varlists()) { 221 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 222 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 223 CGF.EmitVarDecl(*OED); 224 } 225 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 226 for (const Expr *E : UDP->varlists()) { 227 const Decl *D = getBaseDecl(E); 228 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 229 CGF.EmitVarDecl(*OED); 230 } 231 } 232 } 233 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 234 CGF.EmitOMPPrivateClause(S, InlinedShareds); 235 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 236 if (const Expr *E = TG->getReductionRef()) 237 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 238 } 239 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 240 while (CS) { 241 for (auto &C : CS->captures()) { 242 if (C.capturesVariable() || C.capturesVariableByCopy()) { 243 auto *VD = C.getCapturedVar(); 244 assert(VD == VD->getCanonicalDecl() && 245 "Canonical decl must be captured."); 246 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 247 isCapturedVar(CGF, VD) || 248 (CGF.CapturedStmtInfo && 249 InlinedShareds.isGlobalVarCaptured(VD)), 250 VD->getType().getNonReferenceType(), VK_LValue, 251 C.getLocation()); 252 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 253 return CGF.EmitLValue(&DRE).getAddress(CGF); 254 }); 255 } 256 } 257 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 258 } 259 (void)InlinedShareds.Privatize(); 260 } 261 }; 262 263 } // namespace 264 265 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 266 const OMPExecutableDirective &S, 267 const RegionCodeGenTy &CodeGen); 268 269 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 270 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 271 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 272 OrigVD = OrigVD->getCanonicalDecl(); 273 bool IsCaptured = 274 LambdaCaptureFields.lookup(OrigVD) || 275 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 276 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 277 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 278 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 279 return EmitLValue(&DRE); 280 } 281 } 282 return EmitLValue(E); 283 } 284 285 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 286 ASTContext &C = getContext(); 287 llvm::Value *Size = nullptr; 288 auto SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) { 290 // getTypeSizeInChars() returns 0 for a VLA. 291 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 292 VlaSizePair VlaSize = getVLASize(VAT); 293 Ty = VlaSize.Type; 294 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 295 : VlaSize.NumElts; 296 } 297 SizeInChars = C.getTypeSizeInChars(Ty); 298 if (SizeInChars.isZero()) 299 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 300 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 301 } 302 return CGM.getSize(SizeInChars); 303 } 304 305 void CodeGenFunction::GenerateOpenMPCapturedVars( 306 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 307 const RecordDecl *RD = S.getCapturedRecordDecl(); 308 auto CurField = RD->field_begin(); 309 auto CurCap = S.captures().begin(); 310 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 311 E = S.capture_init_end(); 312 I != E; ++I, ++CurField, ++CurCap) { 313 if (CurField->hasCapturedVLAType()) { 314 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 315 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 316 CapturedVars.push_back(Val); 317 } else if (CurCap->capturesThis()) { 318 CapturedVars.push_back(CXXThisValue); 319 } else if (CurCap->capturesVariableByCopy()) { 320 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 321 322 // If the field is not a pointer, we need to save the actual value 323 // and load it as a void pointer. 324 if (!CurField->getType()->isAnyPointerType()) { 325 ASTContext &Ctx = getContext(); 326 Address DstAddr = CreateMemTemp( 327 Ctx.getUIntPtrType(), 328 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 329 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 330 331 llvm::Value *SrcAddrVal = EmitScalarConversion( 332 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 333 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 334 LValue SrcLV = 335 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 336 337 // Store the value using the source type pointer. 338 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 339 340 // Load the value using the destination type pointer. 341 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 342 } 343 CapturedVars.push_back(CV); 344 } else { 345 assert(CurCap->capturesVariable() && "Expected capture by reference."); 346 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 347 } 348 } 349 } 350 351 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 352 QualType DstType, StringRef Name, 353 LValue AddrLV) { 354 ASTContext &Ctx = CGF.getContext(); 355 356 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 357 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 358 Ctx.getPointerType(DstType), Loc); 359 Address TmpAddr = 360 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 361 .getAddress(CGF); 362 return TmpAddr; 363 } 364 365 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 366 if (T->isLValueReferenceType()) 367 return C.getLValueReferenceType( 368 getCanonicalParamType(C, T.getNonReferenceType()), 369 /*SpelledAsLValue=*/false); 370 if (T->isPointerType()) 371 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 372 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 373 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 374 return getCanonicalParamType(C, VLA->getElementType()); 375 if (!A->isVariablyModifiedType()) 376 return C.getCanonicalType(T); 377 } 378 return C.getCanonicalParamType(T); 379 } 380 381 namespace { 382 /// Contains required data for proper outlined function codegen. 383 struct FunctionOptions { 384 /// Captured statement for which the function is generated. 385 const CapturedStmt *S = nullptr; 386 /// true if cast to/from UIntPtr is required for variables captured by 387 /// value. 388 const bool UIntPtrCastRequired = true; 389 /// true if only casted arguments must be registered as local args or VLA 390 /// sizes. 391 const bool RegisterCastedArgsOnly = false; 392 /// Name of the generated function. 393 const StringRef FunctionName; 394 /// Location of the non-debug version of the outlined function. 395 SourceLocation Loc; 396 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 397 bool RegisterCastedArgsOnly, StringRef FunctionName, 398 SourceLocation Loc) 399 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 400 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 401 FunctionName(FunctionName), Loc(Loc) {} 402 }; 403 } // namespace 404 405 static llvm::Function *emitOutlinedFunctionPrologue( 406 CodeGenFunction &CGF, FunctionArgList &Args, 407 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 408 &LocalAddrs, 409 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 410 &VLASizes, 411 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 412 const CapturedDecl *CD = FO.S->getCapturedDecl(); 413 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 414 assert(CD->hasBody() && "missing CapturedDecl body"); 415 416 CXXThisValue = nullptr; 417 // Build the argument list. 418 CodeGenModule &CGM = CGF.CGM; 419 ASTContext &Ctx = CGM.getContext(); 420 FunctionArgList TargetArgs; 421 Args.append(CD->param_begin(), 422 std::next(CD->param_begin(), CD->getContextParamPosition())); 423 TargetArgs.append( 424 CD->param_begin(), 425 std::next(CD->param_begin(), CD->getContextParamPosition())); 426 auto I = FO.S->captures().begin(); 427 FunctionDecl *DebugFunctionDecl = nullptr; 428 if (!FO.UIntPtrCastRequired) { 429 FunctionProtoType::ExtProtoInfo EPI; 430 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 431 DebugFunctionDecl = FunctionDecl::Create( 432 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 433 SourceLocation(), DeclarationName(), FunctionTy, 434 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 435 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 436 } 437 for (const FieldDecl *FD : RD->fields()) { 438 QualType ArgType = FD->getType(); 439 IdentifierInfo *II = nullptr; 440 VarDecl *CapVar = nullptr; 441 442 // If this is a capture by copy and the type is not a pointer, the outlined 443 // function argument type should be uintptr and the value properly casted to 444 // uintptr. This is necessary given that the runtime library is only able to 445 // deal with pointers. We can pass in the same way the VLA type sizes to the 446 // outlined function. 447 if (FO.UIntPtrCastRequired && 448 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 449 I->capturesVariableArrayType())) 450 ArgType = Ctx.getUIntPtrType(); 451 452 if (I->capturesVariable() || I->capturesVariableByCopy()) { 453 CapVar = I->getCapturedVar(); 454 II = CapVar->getIdentifier(); 455 } else if (I->capturesThis()) { 456 II = &Ctx.Idents.get("this"); 457 } else { 458 assert(I->capturesVariableArrayType()); 459 II = &Ctx.Idents.get("vla"); 460 } 461 if (ArgType->isVariablyModifiedType()) 462 ArgType = getCanonicalParamType(Ctx, ArgType); 463 VarDecl *Arg; 464 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 465 Arg = ParmVarDecl::Create( 466 Ctx, DebugFunctionDecl, 467 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 468 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 469 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 470 } else { 471 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 472 II, ArgType, ImplicitParamDecl::Other); 473 } 474 Args.emplace_back(Arg); 475 // Do not cast arguments if we emit function with non-original types. 476 TargetArgs.emplace_back( 477 FO.UIntPtrCastRequired 478 ? Arg 479 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 480 ++I; 481 } 482 Args.append( 483 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 484 CD->param_end()); 485 TargetArgs.append( 486 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 487 CD->param_end()); 488 489 // Create the function declaration. 490 const CGFunctionInfo &FuncInfo = 491 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 492 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 493 494 auto *F = 495 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 496 FO.FunctionName, &CGM.getModule()); 497 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 498 if (CD->isNothrow()) 499 F->setDoesNotThrow(); 500 F->setDoesNotRecurse(); 501 502 // Generate the function. 503 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 504 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 505 FO.UIntPtrCastRequired ? FO.Loc 506 : CD->getBody()->getBeginLoc()); 507 unsigned Cnt = CD->getContextParamPosition(); 508 I = FO.S->captures().begin(); 509 for (const FieldDecl *FD : RD->fields()) { 510 // Do not map arguments if we emit function with non-original types. 511 Address LocalAddr(Address::invalid()); 512 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 513 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 514 TargetArgs[Cnt]); 515 } else { 516 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 517 } 518 // If we are capturing a pointer by copy we don't need to do anything, just 519 // use the value that we get from the arguments. 520 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 521 const VarDecl *CurVD = I->getCapturedVar(); 522 if (!FO.RegisterCastedArgsOnly) 523 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 524 ++Cnt; 525 ++I; 526 continue; 527 } 528 529 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 530 AlignmentSource::Decl); 531 if (FD->hasCapturedVLAType()) { 532 if (FO.UIntPtrCastRequired) { 533 ArgLVal = CGF.MakeAddrLValue( 534 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 535 Args[Cnt]->getName(), ArgLVal), 536 FD->getType(), AlignmentSource::Decl); 537 } 538 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 539 const VariableArrayType *VAT = FD->getCapturedVLAType(); 540 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 541 } else if (I->capturesVariable()) { 542 const VarDecl *Var = I->getCapturedVar(); 543 QualType VarTy = Var->getType(); 544 Address ArgAddr = ArgLVal.getAddress(CGF); 545 if (ArgLVal.getType()->isLValueReferenceType()) { 546 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 547 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 548 assert(ArgLVal.getType()->isPointerType()); 549 ArgAddr = CGF.EmitLoadOfPointer( 550 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 551 } 552 if (!FO.RegisterCastedArgsOnly) { 553 LocalAddrs.insert( 554 {Args[Cnt], 555 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 556 } 557 } else if (I->capturesVariableByCopy()) { 558 assert(!FD->getType()->isAnyPointerType() && 559 "Not expecting a captured pointer."); 560 const VarDecl *Var = I->getCapturedVar(); 561 LocalAddrs.insert({Args[Cnt], 562 {Var, FO.UIntPtrCastRequired 563 ? castValueFromUintptr( 564 CGF, I->getLocation(), FD->getType(), 565 Args[Cnt]->getName(), ArgLVal) 566 : ArgLVal.getAddress(CGF)}}); 567 } else { 568 // If 'this' is captured, load it into CXXThisValue. 569 assert(I->capturesThis()); 570 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 571 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 572 } 573 ++Cnt; 574 ++I; 575 } 576 577 return F; 578 } 579 580 llvm::Function * 581 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 582 SourceLocation Loc) { 583 assert( 584 CapturedStmtInfo && 585 "CapturedStmtInfo should be set when generating the captured function"); 586 const CapturedDecl *CD = S.getCapturedDecl(); 587 // Build the argument list. 588 bool NeedWrapperFunction = 589 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 590 FunctionArgList Args; 591 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 592 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 593 SmallString<256> Buffer; 594 llvm::raw_svector_ostream Out(Buffer); 595 Out << CapturedStmtInfo->getHelperName(); 596 if (NeedWrapperFunction) 597 Out << "_debug__"; 598 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 599 Out.str(), Loc); 600 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 601 VLASizes, CXXThisValue, FO); 602 CodeGenFunction::OMPPrivateScope LocalScope(*this); 603 for (const auto &LocalAddrPair : LocalAddrs) { 604 if (LocalAddrPair.second.first) { 605 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 606 return LocalAddrPair.second.second; 607 }); 608 } 609 } 610 (void)LocalScope.Privatize(); 611 for (const auto &VLASizePair : VLASizes) 612 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 613 PGO.assignRegionCounters(GlobalDecl(CD), F); 614 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 615 (void)LocalScope.ForceCleanup(); 616 FinishFunction(CD->getBodyRBrace()); 617 if (!NeedWrapperFunction) 618 return F; 619 620 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 621 /*RegisterCastedArgsOnly=*/true, 622 CapturedStmtInfo->getHelperName(), Loc); 623 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 624 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 625 Args.clear(); 626 LocalAddrs.clear(); 627 VLASizes.clear(); 628 llvm::Function *WrapperF = 629 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 630 WrapperCGF.CXXThisValue, WrapperFO); 631 llvm::SmallVector<llvm::Value *, 4> CallArgs; 632 for (const auto *Arg : Args) { 633 llvm::Value *CallArg; 634 auto I = LocalAddrs.find(Arg); 635 if (I != LocalAddrs.end()) { 636 LValue LV = WrapperCGF.MakeAddrLValue( 637 I->second.second, 638 I->second.first ? I->second.first->getType() : Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } else { 642 auto EI = VLASizes.find(Arg); 643 if (EI != VLASizes.end()) { 644 CallArg = EI->second.second; 645 } else { 646 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 647 Arg->getType(), 648 AlignmentSource::Decl); 649 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 650 } 651 } 652 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 653 } 654 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 655 WrapperCGF.FinishFunction(); 656 return WrapperF; 657 } 658 659 //===----------------------------------------------------------------------===// 660 // OpenMP Directive Emission 661 //===----------------------------------------------------------------------===// 662 void CodeGenFunction::EmitOMPAggregateAssign( 663 Address DestAddr, Address SrcAddr, QualType OriginalType, 664 const llvm::function_ref<void(Address, Address)> CopyGen) { 665 // Perform element-by-element initialization. 666 QualType ElementTy; 667 668 // Drill down to the base element type on both arrays. 669 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 670 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 671 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 672 673 llvm::Value *SrcBegin = SrcAddr.getPointer(); 674 llvm::Value *DestBegin = DestAddr.getPointer(); 675 // Cast from pointer to array type to pointer to single element. 676 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 677 // The basic structure here is a while-do loop. 678 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 679 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 680 llvm::Value *IsEmpty = 681 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 682 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 683 684 // Enter the loop body, making that address the current address. 685 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 686 EmitBlock(BodyBB); 687 688 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 689 690 llvm::PHINode *SrcElementPHI = 691 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 692 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 693 Address SrcElementCurrent = 694 Address(SrcElementPHI, 695 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 696 697 llvm::PHINode *DestElementPHI = 698 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 699 DestElementPHI->addIncoming(DestBegin, EntryBB); 700 Address DestElementCurrent = 701 Address(DestElementPHI, 702 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 703 704 // Emit copy. 705 CopyGen(DestElementCurrent, SrcElementCurrent); 706 707 // Shift the address forward by one element. 708 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 709 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 710 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 711 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 712 // Check whether we've reached the end. 713 llvm::Value *Done = 714 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 715 Builder.CreateCondBr(Done, DoneBB, BodyBB); 716 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 717 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 718 719 // Done. 720 EmitBlock(DoneBB, /*IsFinished=*/true); 721 } 722 723 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 724 Address SrcAddr, const VarDecl *DestVD, 725 const VarDecl *SrcVD, const Expr *Copy) { 726 if (OriginalType->isArrayType()) { 727 const auto *BO = dyn_cast<BinaryOperator>(Copy); 728 if (BO && BO->getOpcode() == BO_Assign) { 729 // Perform simple memcpy for simple copying. 730 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 731 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 732 EmitAggregateAssign(Dest, Src, OriginalType); 733 } else { 734 // For arrays with complex element types perform element by element 735 // copying. 736 EmitOMPAggregateAssign( 737 DestAddr, SrcAddr, OriginalType, 738 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 739 // Working with the single array element, so have to remap 740 // destination and source variables to corresponding array 741 // elements. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 744 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 745 (void)Remap.Privatize(); 746 EmitIgnoredExpr(Copy); 747 }); 748 } 749 } else { 750 // Remap pseudo source variable to private copy. 751 CodeGenFunction::OMPPrivateScope Remap(*this); 752 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 753 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 754 (void)Remap.Privatize(); 755 // Emit copying of the whole variable. 756 EmitIgnoredExpr(Copy); 757 } 758 } 759 760 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 761 OMPPrivateScope &PrivateScope) { 762 if (!HaveInsertPoint()) 763 return false; 764 bool DeviceConstTarget = 765 getLangOpts().OpenMPIsDevice && 766 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 767 bool FirstprivateIsLastprivate = false; 768 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 769 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 770 for (const auto *D : C->varlists()) 771 Lastprivates.try_emplace( 772 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 773 C->getKind()); 774 } 775 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 776 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 777 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 778 // Force emission of the firstprivate copy if the directive does not emit 779 // outlined function, like omp for, omp simd, omp distribute etc. 780 bool MustEmitFirstprivateCopy = 781 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 782 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 783 const auto *IRef = C->varlist_begin(); 784 const auto *InitsRef = C->inits().begin(); 785 for (const Expr *IInit : C->private_copies()) { 786 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 787 bool ThisFirstprivateIsLastprivate = 788 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 789 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 790 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 791 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 792 !FD->getType()->isReferenceType() && 793 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 794 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 795 ++IRef; 796 ++InitsRef; 797 continue; 798 } 799 // Do not emit copy for firstprivate constant variables in target regions, 800 // captured by reference. 801 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 802 FD && FD->getType()->isReferenceType() && 803 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 804 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 805 OrigVD); 806 ++IRef; 807 ++InitsRef; 808 continue; 809 } 810 FirstprivateIsLastprivate = 811 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 812 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 813 const auto *VDInit = 814 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 815 bool IsRegistered; 816 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 817 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 818 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 819 LValue OriginalLVal; 820 if (!FD) { 821 // Check if the firstprivate variable is just a constant value. 822 ConstantEmission CE = tryEmitAsConstant(&DRE); 823 if (CE && !CE.isReference()) { 824 // Constant value, no need to create a copy. 825 ++IRef; 826 ++InitsRef; 827 continue; 828 } 829 if (CE && CE.isReference()) { 830 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 831 } else { 832 assert(!CE && "Expected non-constant firstprivate."); 833 OriginalLVal = EmitLValue(&DRE); 834 } 835 } else { 836 OriginalLVal = EmitLValue(&DRE); 837 } 838 QualType Type = VD->getType(); 839 if (Type->isArrayType()) { 840 // Emit VarDecl with copy init for arrays. 841 // Get the address of the original variable captured in current 842 // captured region. 843 IsRegistered = PrivateScope.addPrivate( 844 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 845 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 846 const Expr *Init = VD->getInit(); 847 if (!isa<CXXConstructExpr>(Init) || 848 isTrivialInitializer(Init)) { 849 // Perform simple memcpy. 850 LValue Dest = 851 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 852 EmitAggregateAssign(Dest, OriginalLVal, Type); 853 } else { 854 EmitOMPAggregateAssign( 855 Emission.getAllocatedAddress(), 856 OriginalLVal.getAddress(*this), Type, 857 [this, VDInit, Init](Address DestElement, 858 Address SrcElement) { 859 // Clean up any temporaries needed by the 860 // initialization. 861 RunCleanupsScope InitScope(*this); 862 // Emit initialization for single element. 863 setAddrOfLocalVar(VDInit, SrcElement); 864 EmitAnyExprToMem(Init, DestElement, 865 Init->getType().getQualifiers(), 866 /*IsInitializer*/ false); 867 LocalDeclMap.erase(VDInit); 868 }); 869 } 870 EmitAutoVarCleanups(Emission); 871 return Emission.getAllocatedAddress(); 872 }); 873 } else { 874 Address OriginalAddr = OriginalLVal.getAddress(*this); 875 IsRegistered = 876 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 877 ThisFirstprivateIsLastprivate, 878 OrigVD, &Lastprivates, IRef]() { 879 // Emit private VarDecl with copy init. 880 // Remap temp VDInit variable to the address of the original 881 // variable (for proper handling of captured global variables). 882 setAddrOfLocalVar(VDInit, OriginalAddr); 883 EmitDecl(*VD); 884 LocalDeclMap.erase(VDInit); 885 if (ThisFirstprivateIsLastprivate && 886 Lastprivates[OrigVD->getCanonicalDecl()] == 887 OMPC_LASTPRIVATE_conditional) { 888 // Create/init special variable for lastprivate conditionals. 889 Address VDAddr = 890 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 891 *this, OrigVD); 892 llvm::Value *V = EmitLoadOfScalar( 893 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 894 AlignmentSource::Decl), 895 (*IRef)->getExprLoc()); 896 EmitStoreOfScalar(V, 897 MakeAddrLValue(VDAddr, (*IRef)->getType(), 898 AlignmentSource::Decl)); 899 LocalDeclMap.erase(VD); 900 setAddrOfLocalVar(VD, VDAddr); 901 return VDAddr; 902 } 903 return GetAddrOfLocalVar(VD); 904 }); 905 } 906 assert(IsRegistered && 907 "firstprivate var already registered as private"); 908 // Silence the warning about unused variable. 909 (void)IsRegistered; 910 } 911 ++IRef; 912 ++InitsRef; 913 } 914 } 915 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 916 } 917 918 void CodeGenFunction::EmitOMPPrivateClause( 919 const OMPExecutableDirective &D, 920 CodeGenFunction::OMPPrivateScope &PrivateScope) { 921 if (!HaveInsertPoint()) 922 return; 923 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 924 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 925 auto IRef = C->varlist_begin(); 926 for (const Expr *IInit : C->private_copies()) { 927 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 928 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 929 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 930 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 931 // Emit private VarDecl with copy init. 932 EmitDecl(*VD); 933 return GetAddrOfLocalVar(VD); 934 }); 935 assert(IsRegistered && "private var already registered as private"); 936 // Silence the warning about unused variable. 937 (void)IsRegistered; 938 } 939 ++IRef; 940 } 941 } 942 } 943 944 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 945 if (!HaveInsertPoint()) 946 return false; 947 // threadprivate_var1 = master_threadprivate_var1; 948 // operator=(threadprivate_var2, master_threadprivate_var2); 949 // ... 950 // __kmpc_barrier(&loc, global_tid); 951 llvm::DenseSet<const VarDecl *> CopiedVars; 952 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 953 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 954 auto IRef = C->varlist_begin(); 955 auto ISrcRef = C->source_exprs().begin(); 956 auto IDestRef = C->destination_exprs().begin(); 957 for (const Expr *AssignOp : C->assignment_ops()) { 958 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 959 QualType Type = VD->getType(); 960 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 961 // Get the address of the master variable. If we are emitting code with 962 // TLS support, the address is passed from the master as field in the 963 // captured declaration. 964 Address MasterAddr = Address::invalid(); 965 if (getLangOpts().OpenMPUseTLS && 966 getContext().getTargetInfo().isTLSSupported()) { 967 assert(CapturedStmtInfo->lookup(VD) && 968 "Copyin threadprivates should have been captured!"); 969 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 970 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 971 MasterAddr = EmitLValue(&DRE).getAddress(*this); 972 LocalDeclMap.erase(VD); 973 } else { 974 MasterAddr = 975 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 976 : CGM.GetAddrOfGlobal(VD), 977 getContext().getDeclAlign(VD)); 978 } 979 // Get the address of the threadprivate variable. 980 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 981 if (CopiedVars.size() == 1) { 982 // At first check if current thread is a master thread. If it is, no 983 // need to copy data. 984 CopyBegin = createBasicBlock("copyin.not.master"); 985 CopyEnd = createBasicBlock("copyin.not.master.end"); 986 Builder.CreateCondBr( 987 Builder.CreateICmpNE( 988 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 989 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 990 CGM.IntPtrTy)), 991 CopyBegin, CopyEnd); 992 EmitBlock(CopyBegin); 993 } 994 const auto *SrcVD = 995 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 996 const auto *DestVD = 997 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 998 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 999 } 1000 ++IRef; 1001 ++ISrcRef; 1002 ++IDestRef; 1003 } 1004 } 1005 if (CopyEnd) { 1006 // Exit out of copying procedure for non-master thread. 1007 EmitBlock(CopyEnd, /*IsFinished=*/true); 1008 return true; 1009 } 1010 return false; 1011 } 1012 1013 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1014 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1015 if (!HaveInsertPoint()) 1016 return false; 1017 bool HasAtLeastOneLastprivate = false; 1018 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1019 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1020 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1021 for (const Expr *C : LoopDirective->counters()) { 1022 SIMDLCVs.insert( 1023 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1024 } 1025 } 1026 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1027 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1028 HasAtLeastOneLastprivate = true; 1029 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1030 !getLangOpts().OpenMPSimd) 1031 break; 1032 const auto *IRef = C->varlist_begin(); 1033 const auto *IDestRef = C->destination_exprs().begin(); 1034 for (const Expr *IInit : C->private_copies()) { 1035 // Keep the address of the original variable for future update at the end 1036 // of the loop. 1037 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1038 // Taskloops do not require additional initialization, it is done in 1039 // runtime support library. 1040 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1041 const auto *DestVD = 1042 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1043 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1044 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1045 /*RefersToEnclosingVariableOrCapture=*/ 1046 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1047 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1048 return EmitLValue(&DRE).getAddress(*this); 1049 }); 1050 // Check if the variable is also a firstprivate: in this case IInit is 1051 // not generated. Initialization of this variable will happen in codegen 1052 // for 'firstprivate' clause. 1053 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1054 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1055 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1056 OrigVD]() { 1057 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1058 Address VDAddr = 1059 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1060 OrigVD); 1061 setAddrOfLocalVar(VD, VDAddr); 1062 return VDAddr; 1063 } 1064 // Emit private VarDecl with copy init. 1065 EmitDecl(*VD); 1066 return GetAddrOfLocalVar(VD); 1067 }); 1068 assert(IsRegistered && 1069 "lastprivate var already registered as private"); 1070 (void)IsRegistered; 1071 } 1072 } 1073 ++IRef; 1074 ++IDestRef; 1075 } 1076 } 1077 return HasAtLeastOneLastprivate; 1078 } 1079 1080 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1081 const OMPExecutableDirective &D, bool NoFinals, 1082 llvm::Value *IsLastIterCond) { 1083 if (!HaveInsertPoint()) 1084 return; 1085 // Emit following code: 1086 // if (<IsLastIterCond>) { 1087 // orig_var1 = private_orig_var1; 1088 // ... 1089 // orig_varn = private_orig_varn; 1090 // } 1091 llvm::BasicBlock *ThenBB = nullptr; 1092 llvm::BasicBlock *DoneBB = nullptr; 1093 if (IsLastIterCond) { 1094 // Emit implicit barrier if at least one lastprivate conditional is found 1095 // and this is not a simd mode. 1096 if (!getLangOpts().OpenMPSimd && 1097 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1098 [](const OMPLastprivateClause *C) { 1099 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1100 })) { 1101 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1102 OMPD_unknown, 1103 /*EmitChecks=*/false, 1104 /*ForceSimpleCall=*/true); 1105 } 1106 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1107 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1108 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1109 EmitBlock(ThenBB); 1110 } 1111 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1112 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1113 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1114 auto IC = LoopDirective->counters().begin(); 1115 for (const Expr *F : LoopDirective->finals()) { 1116 const auto *D = 1117 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1118 if (NoFinals) 1119 AlreadyEmittedVars.insert(D); 1120 else 1121 LoopCountersAndUpdates[D] = F; 1122 ++IC; 1123 } 1124 } 1125 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1126 auto IRef = C->varlist_begin(); 1127 auto ISrcRef = C->source_exprs().begin(); 1128 auto IDestRef = C->destination_exprs().begin(); 1129 for (const Expr *AssignOp : C->assignment_ops()) { 1130 const auto *PrivateVD = 1131 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1132 QualType Type = PrivateVD->getType(); 1133 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1134 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1135 // If lastprivate variable is a loop control variable for loop-based 1136 // directive, update its value before copyin back to original 1137 // variable. 1138 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1139 EmitIgnoredExpr(FinalExpr); 1140 const auto *SrcVD = 1141 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1142 const auto *DestVD = 1143 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1144 // Get the address of the private variable. 1145 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1146 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1147 PrivateAddr = 1148 Address(Builder.CreateLoad(PrivateAddr), 1149 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1150 // Store the last value to the private copy in the last iteration. 1151 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1152 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1153 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1154 (*IRef)->getExprLoc()); 1155 // Get the address of the original variable. 1156 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1157 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1158 } 1159 ++IRef; 1160 ++ISrcRef; 1161 ++IDestRef; 1162 } 1163 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1164 EmitIgnoredExpr(PostUpdate); 1165 } 1166 if (IsLastIterCond) 1167 EmitBlock(DoneBB, /*IsFinished=*/true); 1168 } 1169 1170 void CodeGenFunction::EmitOMPReductionClauseInit( 1171 const OMPExecutableDirective &D, 1172 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1173 if (!HaveInsertPoint()) 1174 return; 1175 SmallVector<const Expr *, 4> Shareds; 1176 SmallVector<const Expr *, 4> Privates; 1177 SmallVector<const Expr *, 4> ReductionOps; 1178 SmallVector<const Expr *, 4> LHSs; 1179 SmallVector<const Expr *, 4> RHSs; 1180 OMPTaskDataTy Data; 1181 SmallVector<const Expr *, 4> TaskLHSs; 1182 SmallVector<const Expr *, 4> TaskRHSs; 1183 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1184 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1185 continue; 1186 Shareds.append(C->varlist_begin(), C->varlist_end()); 1187 Privates.append(C->privates().begin(), C->privates().end()); 1188 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1189 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1190 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1191 if (C->getModifier() == OMPC_REDUCTION_task) { 1192 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1193 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1194 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1195 Data.ReductionOps.append(C->reduction_ops().begin(), 1196 C->reduction_ops().end()); 1197 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1198 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1199 } 1200 } 1201 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1202 unsigned Count = 0; 1203 auto *ILHS = LHSs.begin(); 1204 auto *IRHS = RHSs.begin(); 1205 auto *IPriv = Privates.begin(); 1206 for (const Expr *IRef : Shareds) { 1207 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1208 // Emit private VarDecl with reduction init. 1209 RedCG.emitSharedOrigLValue(*this, Count); 1210 RedCG.emitAggregateType(*this, Count); 1211 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1212 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1213 RedCG.getSharedLValue(Count), 1214 [&Emission](CodeGenFunction &CGF) { 1215 CGF.EmitAutoVarInit(Emission); 1216 return true; 1217 }); 1218 EmitAutoVarCleanups(Emission); 1219 Address BaseAddr = RedCG.adjustPrivateAddress( 1220 *this, Count, Emission.getAllocatedAddress()); 1221 bool IsRegistered = PrivateScope.addPrivate( 1222 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1223 assert(IsRegistered && "private var already registered as private"); 1224 // Silence the warning about unused variable. 1225 (void)IsRegistered; 1226 1227 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1228 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1229 QualType Type = PrivateVD->getType(); 1230 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1231 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1232 // Store the address of the original variable associated with the LHS 1233 // implicit variable. 1234 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1235 return RedCG.getSharedLValue(Count).getAddress(*this); 1236 }); 1237 PrivateScope.addPrivate( 1238 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1239 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1240 isa<ArraySubscriptExpr>(IRef)) { 1241 // Store the address of the original variable associated with the LHS 1242 // implicit variable. 1243 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1244 return RedCG.getSharedLValue(Count).getAddress(*this); 1245 }); 1246 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1247 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1248 ConvertTypeForMem(RHSVD->getType()), 1249 "rhs.begin"); 1250 }); 1251 } else { 1252 QualType Type = PrivateVD->getType(); 1253 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1254 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 if (IsArray) { 1258 OriginalAddr = Builder.CreateElementBitCast( 1259 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1260 } 1261 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1262 PrivateScope.addPrivate( 1263 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1264 return IsArray 1265 ? Builder.CreateElementBitCast( 1266 GetAddrOfLocalVar(PrivateVD), 1267 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1268 : GetAddrOfLocalVar(PrivateVD); 1269 }); 1270 } 1271 ++ILHS; 1272 ++IRHS; 1273 ++IPriv; 1274 ++Count; 1275 } 1276 if (!Data.ReductionVars.empty()) { 1277 Data.IsReductionWithTaskMod = true; 1278 Data.IsWorksharingReduction = 1279 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1280 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1281 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1282 const Expr *TaskRedRef = nullptr; 1283 switch (D.getDirectiveKind()) { 1284 case OMPD_parallel: 1285 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1286 break; 1287 case OMPD_for: 1288 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1289 break; 1290 case OMPD_sections: 1291 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1292 break; 1293 case OMPD_parallel_for: 1294 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1295 break; 1296 case OMPD_parallel_master: 1297 TaskRedRef = 1298 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1299 break; 1300 case OMPD_parallel_sections: 1301 TaskRedRef = 1302 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1303 break; 1304 case OMPD_target_parallel: 1305 TaskRedRef = 1306 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1307 break; 1308 case OMPD_target_parallel_for: 1309 TaskRedRef = 1310 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_distribute_parallel_for: 1313 TaskRedRef = 1314 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_teams_distribute_parallel_for: 1317 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1318 .getTaskReductionRefExpr(); 1319 break; 1320 case OMPD_target_teams_distribute_parallel_for: 1321 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1322 .getTaskReductionRefExpr(); 1323 break; 1324 case OMPD_simd: 1325 case OMPD_for_simd: 1326 case OMPD_section: 1327 case OMPD_single: 1328 case OMPD_master: 1329 case OMPD_critical: 1330 case OMPD_parallel_for_simd: 1331 case OMPD_task: 1332 case OMPD_taskyield: 1333 case OMPD_barrier: 1334 case OMPD_taskwait: 1335 case OMPD_taskgroup: 1336 case OMPD_flush: 1337 case OMPD_depobj: 1338 case OMPD_scan: 1339 case OMPD_ordered: 1340 case OMPD_atomic: 1341 case OMPD_teams: 1342 case OMPD_target: 1343 case OMPD_cancellation_point: 1344 case OMPD_cancel: 1345 case OMPD_target_data: 1346 case OMPD_target_enter_data: 1347 case OMPD_target_exit_data: 1348 case OMPD_taskloop: 1349 case OMPD_taskloop_simd: 1350 case OMPD_master_taskloop: 1351 case OMPD_master_taskloop_simd: 1352 case OMPD_parallel_master_taskloop: 1353 case OMPD_parallel_master_taskloop_simd: 1354 case OMPD_distribute: 1355 case OMPD_target_update: 1356 case OMPD_distribute_parallel_for_simd: 1357 case OMPD_distribute_simd: 1358 case OMPD_target_parallel_for_simd: 1359 case OMPD_target_simd: 1360 case OMPD_teams_distribute: 1361 case OMPD_teams_distribute_simd: 1362 case OMPD_teams_distribute_parallel_for_simd: 1363 case OMPD_target_teams: 1364 case OMPD_target_teams_distribute: 1365 case OMPD_target_teams_distribute_parallel_for_simd: 1366 case OMPD_target_teams_distribute_simd: 1367 case OMPD_declare_target: 1368 case OMPD_end_declare_target: 1369 case OMPD_threadprivate: 1370 case OMPD_allocate: 1371 case OMPD_declare_reduction: 1372 case OMPD_declare_mapper: 1373 case OMPD_declare_simd: 1374 case OMPD_requires: 1375 case OMPD_declare_variant: 1376 case OMPD_begin_declare_variant: 1377 case OMPD_end_declare_variant: 1378 case OMPD_unknown: 1379 llvm_unreachable("Enexpected directive with task reductions."); 1380 } 1381 1382 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1383 EmitVarDecl(*VD); 1384 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1385 /*Volatile=*/false, TaskRedRef->getType()); 1386 } 1387 } 1388 1389 void CodeGenFunction::EmitOMPReductionClauseFinal( 1390 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1391 if (!HaveInsertPoint()) 1392 return; 1393 llvm::SmallVector<const Expr *, 8> Privates; 1394 llvm::SmallVector<const Expr *, 8> LHSExprs; 1395 llvm::SmallVector<const Expr *, 8> RHSExprs; 1396 llvm::SmallVector<const Expr *, 8> ReductionOps; 1397 bool HasAtLeastOneReduction = false; 1398 bool IsReductionWithTaskMod = false; 1399 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1400 // Do not emit for inscan reductions. 1401 if (C->getModifier() == OMPC_REDUCTION_inscan) 1402 continue; 1403 HasAtLeastOneReduction = true; 1404 Privates.append(C->privates().begin(), C->privates().end()); 1405 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1406 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1407 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1408 IsReductionWithTaskMod = 1409 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1410 } 1411 if (HasAtLeastOneReduction) { 1412 if (IsReductionWithTaskMod) { 1413 CGM.getOpenMPRuntime().emitTaskReductionFini( 1414 *this, D.getBeginLoc(), 1415 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1416 } 1417 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1418 isOpenMPParallelDirective(D.getDirectiveKind()) || 1419 ReductionKind == OMPD_simd; 1420 bool SimpleReduction = ReductionKind == OMPD_simd; 1421 // Emit nowait reduction if nowait clause is present or directive is a 1422 // parallel directive (it always has implicit barrier). 1423 CGM.getOpenMPRuntime().emitReduction( 1424 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1425 {WithNowait, SimpleReduction, ReductionKind}); 1426 } 1427 } 1428 1429 static void emitPostUpdateForReductionClause( 1430 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1431 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1432 if (!CGF.HaveInsertPoint()) 1433 return; 1434 llvm::BasicBlock *DoneBB = nullptr; 1435 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1436 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1437 if (!DoneBB) { 1438 if (llvm::Value *Cond = CondGen(CGF)) { 1439 // If the first post-update expression is found, emit conditional 1440 // block if it was requested. 1441 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1442 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1443 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1444 CGF.EmitBlock(ThenBB); 1445 } 1446 } 1447 CGF.EmitIgnoredExpr(PostUpdate); 1448 } 1449 } 1450 if (DoneBB) 1451 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1452 } 1453 1454 namespace { 1455 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1456 /// parallel function. This is necessary for combined constructs such as 1457 /// 'distribute parallel for' 1458 typedef llvm::function_ref<void(CodeGenFunction &, 1459 const OMPExecutableDirective &, 1460 llvm::SmallVectorImpl<llvm::Value *> &)> 1461 CodeGenBoundParametersTy; 1462 } // anonymous namespace 1463 1464 static void 1465 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1466 const OMPExecutableDirective &S) { 1467 if (CGF.getLangOpts().OpenMP < 50) 1468 return; 1469 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1470 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1471 for (const Expr *Ref : C->varlists()) { 1472 if (!Ref->getType()->isScalarType()) 1473 continue; 1474 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1475 if (!DRE) 1476 continue; 1477 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1478 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1479 } 1480 } 1481 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1482 for (const Expr *Ref : C->varlists()) { 1483 if (!Ref->getType()->isScalarType()) 1484 continue; 1485 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1486 if (!DRE) 1487 continue; 1488 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1489 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1490 } 1491 } 1492 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1493 for (const Expr *Ref : C->varlists()) { 1494 if (!Ref->getType()->isScalarType()) 1495 continue; 1496 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1497 if (!DRE) 1498 continue; 1499 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1500 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1501 } 1502 } 1503 // Privates should ne analyzed since they are not captured at all. 1504 // Task reductions may be skipped - tasks are ignored. 1505 // Firstprivates do not return value but may be passed by reference - no need 1506 // to check for updated lastprivate conditional. 1507 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1508 for (const Expr *Ref : C->varlists()) { 1509 if (!Ref->getType()->isScalarType()) 1510 continue; 1511 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1512 if (!DRE) 1513 continue; 1514 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1515 } 1516 } 1517 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1518 CGF, S, PrivateDecls); 1519 } 1520 1521 static void emitCommonOMPParallelDirective( 1522 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1523 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1524 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1525 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1526 llvm::Function *OutlinedFn = 1527 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1528 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1529 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1530 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1531 llvm::Value *NumThreads = 1532 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1533 /*IgnoreResultAssign=*/true); 1534 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1535 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1536 } 1537 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1538 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1539 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1540 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1541 } 1542 const Expr *IfCond = nullptr; 1543 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1544 if (C->getNameModifier() == OMPD_unknown || 1545 C->getNameModifier() == OMPD_parallel) { 1546 IfCond = C->getCondition(); 1547 break; 1548 } 1549 } 1550 1551 OMPParallelScope Scope(CGF, S); 1552 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1553 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1554 // lower and upper bounds with the pragma 'for' chunking mechanism. 1555 // The following lambda takes care of appending the lower and upper bound 1556 // parameters when necessary 1557 CodeGenBoundParameters(CGF, S, CapturedVars); 1558 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1559 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1560 CapturedVars, IfCond); 1561 } 1562 1563 static void emitEmptyBoundParameters(CodeGenFunction &, 1564 const OMPExecutableDirective &, 1565 llvm::SmallVectorImpl<llvm::Value *> &) {} 1566 1567 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1568 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1569 // Check if we have any if clause associated with the directive. 1570 llvm::Value *IfCond = nullptr; 1571 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1572 IfCond = EmitScalarExpr(C->getCondition(), 1573 /*IgnoreResultAssign=*/true); 1574 1575 llvm::Value *NumThreads = nullptr; 1576 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1577 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1578 /*IgnoreResultAssign=*/true); 1579 1580 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1581 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1582 ProcBind = ProcBindClause->getProcBindKind(); 1583 1584 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1585 1586 // The cleanup callback that finalizes all variabels at the given location, 1587 // thus calls destructors etc. 1588 auto FiniCB = [this](InsertPointTy IP) { 1589 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1590 }; 1591 1592 // Privatization callback that performs appropriate action for 1593 // shared/private/firstprivate/lastprivate/copyin/... variables. 1594 // 1595 // TODO: This defaults to shared right now. 1596 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1597 llvm::Value &Val, llvm::Value *&ReplVal) { 1598 // The next line is appropriate only for variables (Val) with the 1599 // data-sharing attribute "shared". 1600 ReplVal = &Val; 1601 1602 return CodeGenIP; 1603 }; 1604 1605 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1606 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1607 1608 auto BodyGenCB = [ParallelRegionBodyStmt, 1609 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1610 llvm::BasicBlock &ContinuationBB) { 1611 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1612 ContinuationBB); 1613 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1614 CodeGenIP, ContinuationBB); 1615 }; 1616 1617 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1618 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1619 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1620 FiniCB, IfCond, NumThreads, 1621 ProcBind, S.hasCancel())); 1622 return; 1623 } 1624 1625 // Emit parallel region as a standalone region. 1626 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1627 Action.Enter(CGF); 1628 OMPPrivateScope PrivateScope(CGF); 1629 bool Copyins = CGF.EmitOMPCopyinClause(S); 1630 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1631 if (Copyins) { 1632 // Emit implicit barrier to synchronize threads and avoid data races on 1633 // propagation master's thread values of threadprivate variables to local 1634 // instances of that variables of all other implicit threads. 1635 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1636 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1637 /*ForceSimpleCall=*/true); 1638 } 1639 CGF.EmitOMPPrivateClause(S, PrivateScope); 1640 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1641 (void)PrivateScope.Privatize(); 1642 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1643 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1644 }; 1645 { 1646 auto LPCRegion = 1647 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1648 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1649 emitEmptyBoundParameters); 1650 emitPostUpdateForReductionClause(*this, S, 1651 [](CodeGenFunction &) { return nullptr; }); 1652 } 1653 // Check for outer lastprivate conditional update. 1654 checkForLastprivateConditionalUpdate(*this, S); 1655 } 1656 1657 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1658 int MaxLevel, int Level = 0) { 1659 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1660 const Stmt *SimplifiedS = S->IgnoreContainers(); 1661 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1662 PrettyStackTraceLoc CrashInfo( 1663 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1664 "LLVM IR generation of compound statement ('{}')"); 1665 1666 // Keep track of the current cleanup stack depth, including debug scopes. 1667 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1668 for (const Stmt *CurStmt : CS->body()) 1669 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1670 return; 1671 } 1672 if (SimplifiedS == NextLoop) { 1673 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1674 S = For->getBody(); 1675 } else { 1676 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1677 "Expected canonical for loop or range-based for loop."); 1678 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1679 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1680 S = CXXFor->getBody(); 1681 } 1682 if (Level + 1 < MaxLevel) { 1683 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1684 S, /*TryImperfectlyNestedLoops=*/true); 1685 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1686 return; 1687 } 1688 } 1689 CGF.EmitStmt(S); 1690 } 1691 1692 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1693 JumpDest LoopExit) { 1694 RunCleanupsScope BodyScope(*this); 1695 // Update counters values on current iteration. 1696 for (const Expr *UE : D.updates()) 1697 EmitIgnoredExpr(UE); 1698 // Update the linear variables. 1699 // In distribute directives only loop counters may be marked as linear, no 1700 // need to generate the code for them. 1701 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1702 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1703 for (const Expr *UE : C->updates()) 1704 EmitIgnoredExpr(UE); 1705 } 1706 } 1707 1708 // On a continue in the body, jump to the end. 1709 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1710 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1711 for (const Expr *E : D.finals_conditions()) { 1712 if (!E) 1713 continue; 1714 // Check that loop counter in non-rectangular nest fits into the iteration 1715 // space. 1716 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1717 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1718 getProfileCount(D.getBody())); 1719 EmitBlock(NextBB); 1720 } 1721 1722 OMPPrivateScope InscanScope(*this); 1723 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1724 bool IsInscanRegion = InscanScope.Privatize(); 1725 if (IsInscanRegion) { 1726 // Need to remember the block before and after scan directive 1727 // to dispatch them correctly depending on the clause used in 1728 // this directive, inclusive or exclusive. For inclusive scan the natural 1729 // order of the blocks is used, for exclusive clause the blocks must be 1730 // executed in reverse order. 1731 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1732 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1733 // No need to allocate inscan exit block, in simd mode it is selected in the 1734 // codegen for the scan directive. 1735 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1736 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1737 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1738 EmitBranch(OMPScanDispatch); 1739 EmitBlock(OMPBeforeScanBlock); 1740 } 1741 1742 // Emit loop variables for C++ range loops. 1743 const Stmt *Body = 1744 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1745 // Emit loop body. 1746 emitBody(*this, Body, 1747 OMPLoopDirective::tryToFindNextInnerLoop( 1748 Body, /*TryImperfectlyNestedLoops=*/true), 1749 D.getCollapsedNumber()); 1750 1751 // Jump to the dispatcher at the end of the loop body. 1752 if (IsInscanRegion) 1753 EmitBranch(OMPScanExitBlock); 1754 1755 // The end (updates/cleanups). 1756 EmitBlock(Continue.getBlock()); 1757 BreakContinueStack.pop_back(); 1758 } 1759 1760 void CodeGenFunction::EmitOMPInnerLoop( 1761 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 1762 const Expr *IncExpr, 1763 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1764 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1765 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1766 1767 // Start the loop with a block that tests the condition. 1768 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1769 EmitBlock(CondBlock); 1770 const SourceRange R = S.getSourceRange(); 1771 1772 // If attributes are attached, push to the basic block with them. 1773 const auto &OMPED = cast<OMPExecutableDirective>(S); 1774 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 1775 const Stmt *SS = ICS->getCapturedStmt(); 1776 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 1777 if (AS) 1778 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 1779 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 1780 SourceLocToDebugLoc(R.getEnd())); 1781 else 1782 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1783 SourceLocToDebugLoc(R.getEnd())); 1784 1785 // If there are any cleanups between here and the loop-exit scope, 1786 // create a block to stage a loop exit along. 1787 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1788 if (RequiresCleanup) 1789 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1790 1791 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1792 1793 // Emit condition. 1794 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1795 if (ExitBlock != LoopExit.getBlock()) { 1796 EmitBlock(ExitBlock); 1797 EmitBranchThroughCleanup(LoopExit); 1798 } 1799 1800 EmitBlock(LoopBody); 1801 incrementProfileCounter(&S); 1802 1803 // Create a block for the increment. 1804 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1805 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1806 1807 BodyGen(*this); 1808 1809 // Emit "IV = IV + 1" and a back-edge to the condition block. 1810 EmitBlock(Continue.getBlock()); 1811 EmitIgnoredExpr(IncExpr); 1812 PostIncGen(*this); 1813 BreakContinueStack.pop_back(); 1814 EmitBranch(CondBlock); 1815 LoopStack.pop(); 1816 // Emit the fall-through block. 1817 EmitBlock(LoopExit.getBlock()); 1818 } 1819 1820 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1821 if (!HaveInsertPoint()) 1822 return false; 1823 // Emit inits for the linear variables. 1824 bool HasLinears = false; 1825 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1826 for (const Expr *Init : C->inits()) { 1827 HasLinears = true; 1828 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1829 if (const auto *Ref = 1830 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1831 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1832 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1833 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1834 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1835 VD->getInit()->getType(), VK_LValue, 1836 VD->getInit()->getExprLoc()); 1837 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1838 VD->getType()), 1839 /*capturedByInit=*/false); 1840 EmitAutoVarCleanups(Emission); 1841 } else { 1842 EmitVarDecl(*VD); 1843 } 1844 } 1845 // Emit the linear steps for the linear clauses. 1846 // If a step is not constant, it is pre-calculated before the loop. 1847 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1848 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1849 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1850 // Emit calculation of the linear step. 1851 EmitIgnoredExpr(CS); 1852 } 1853 } 1854 return HasLinears; 1855 } 1856 1857 void CodeGenFunction::EmitOMPLinearClauseFinal( 1858 const OMPLoopDirective &D, 1859 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1860 if (!HaveInsertPoint()) 1861 return; 1862 llvm::BasicBlock *DoneBB = nullptr; 1863 // Emit the final values of the linear variables. 1864 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1865 auto IC = C->varlist_begin(); 1866 for (const Expr *F : C->finals()) { 1867 if (!DoneBB) { 1868 if (llvm::Value *Cond = CondGen(*this)) { 1869 // If the first post-update expression is found, emit conditional 1870 // block if it was requested. 1871 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1872 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1873 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1874 EmitBlock(ThenBB); 1875 } 1876 } 1877 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1878 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1879 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1880 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1881 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1882 CodeGenFunction::OMPPrivateScope VarScope(*this); 1883 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1884 (void)VarScope.Privatize(); 1885 EmitIgnoredExpr(F); 1886 ++IC; 1887 } 1888 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1889 EmitIgnoredExpr(PostUpdate); 1890 } 1891 if (DoneBB) 1892 EmitBlock(DoneBB, /*IsFinished=*/true); 1893 } 1894 1895 static void emitAlignedClause(CodeGenFunction &CGF, 1896 const OMPExecutableDirective &D) { 1897 if (!CGF.HaveInsertPoint()) 1898 return; 1899 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1900 llvm::APInt ClauseAlignment(64, 0); 1901 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1902 auto *AlignmentCI = 1903 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1904 ClauseAlignment = AlignmentCI->getValue(); 1905 } 1906 for (const Expr *E : Clause->varlists()) { 1907 llvm::APInt Alignment(ClauseAlignment); 1908 if (Alignment == 0) { 1909 // OpenMP [2.8.1, Description] 1910 // If no optional parameter is specified, implementation-defined default 1911 // alignments for SIMD instructions on the target platforms are assumed. 1912 Alignment = 1913 CGF.getContext() 1914 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1915 E->getType()->getPointeeType())) 1916 .getQuantity(); 1917 } 1918 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1919 "alignment is not power of 2"); 1920 if (Alignment != 0) { 1921 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1922 CGF.emitAlignmentAssumption( 1923 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1924 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1925 } 1926 } 1927 } 1928 } 1929 1930 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1931 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1932 if (!HaveInsertPoint()) 1933 return; 1934 auto I = S.private_counters().begin(); 1935 for (const Expr *E : S.counters()) { 1936 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1937 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1938 // Emit var without initialization. 1939 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1940 EmitAutoVarCleanups(VarEmission); 1941 LocalDeclMap.erase(PrivateVD); 1942 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1943 return VarEmission.getAllocatedAddress(); 1944 }); 1945 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1946 VD->hasGlobalStorage()) { 1947 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1948 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1949 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1950 E->getType(), VK_LValue, E->getExprLoc()); 1951 return EmitLValue(&DRE).getAddress(*this); 1952 }); 1953 } else { 1954 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1955 return VarEmission.getAllocatedAddress(); 1956 }); 1957 } 1958 ++I; 1959 } 1960 // Privatize extra loop counters used in loops for ordered(n) clauses. 1961 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1962 if (!C->getNumForLoops()) 1963 continue; 1964 for (unsigned I = S.getCollapsedNumber(), 1965 E = C->getLoopNumIterations().size(); 1966 I < E; ++I) { 1967 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1968 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1969 // Override only those variables that can be captured to avoid re-emission 1970 // of the variables declared within the loops. 1971 if (DRE->refersToEnclosingVariableOrCapture()) { 1972 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1973 return CreateMemTemp(DRE->getType(), VD->getName()); 1974 }); 1975 } 1976 } 1977 } 1978 } 1979 1980 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1981 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1982 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1983 if (!CGF.HaveInsertPoint()) 1984 return; 1985 { 1986 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1987 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1988 (void)PreCondScope.Privatize(); 1989 // Get initial values of real counters. 1990 for (const Expr *I : S.inits()) { 1991 CGF.EmitIgnoredExpr(I); 1992 } 1993 } 1994 // Create temp loop control variables with their init values to support 1995 // non-rectangular loops. 1996 CodeGenFunction::OMPMapVars PreCondVars; 1997 for (const Expr * E: S.dependent_counters()) { 1998 if (!E) 1999 continue; 2000 assert(!E->getType().getNonReferenceType()->isRecordType() && 2001 "dependent counter must not be an iterator."); 2002 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2003 Address CounterAddr = 2004 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2005 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2006 } 2007 (void)PreCondVars.apply(CGF); 2008 for (const Expr *E : S.dependent_inits()) { 2009 if (!E) 2010 continue; 2011 CGF.EmitIgnoredExpr(E); 2012 } 2013 // Check that loop is executed at least one time. 2014 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2015 PreCondVars.restore(CGF); 2016 } 2017 2018 void CodeGenFunction::EmitOMPLinearClause( 2019 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2020 if (!HaveInsertPoint()) 2021 return; 2022 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2023 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2024 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2025 for (const Expr *C : LoopDirective->counters()) { 2026 SIMDLCVs.insert( 2027 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2028 } 2029 } 2030 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2031 auto CurPrivate = C->privates().begin(); 2032 for (const Expr *E : C->varlists()) { 2033 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2034 const auto *PrivateVD = 2035 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2036 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2037 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2038 // Emit private VarDecl with copy init. 2039 EmitVarDecl(*PrivateVD); 2040 return GetAddrOfLocalVar(PrivateVD); 2041 }); 2042 assert(IsRegistered && "linear var already registered as private"); 2043 // Silence the warning about unused variable. 2044 (void)IsRegistered; 2045 } else { 2046 EmitVarDecl(*PrivateVD); 2047 } 2048 ++CurPrivate; 2049 } 2050 } 2051 } 2052 2053 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2054 const OMPExecutableDirective &D, 2055 bool IsMonotonic) { 2056 if (!CGF.HaveInsertPoint()) 2057 return; 2058 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2059 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2060 /*ignoreResult=*/true); 2061 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2062 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2063 // In presence of finite 'safelen', it may be unsafe to mark all 2064 // the memory instructions parallel, because loop-carried 2065 // dependences of 'safelen' iterations are possible. 2066 if (!IsMonotonic) 2067 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2068 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2069 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2070 /*ignoreResult=*/true); 2071 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2072 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2073 // In presence of finite 'safelen', it may be unsafe to mark all 2074 // the memory instructions parallel, because loop-carried 2075 // dependences of 'safelen' iterations are possible. 2076 CGF.LoopStack.setParallel(/*Enable=*/false); 2077 } 2078 } 2079 2080 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2081 bool IsMonotonic) { 2082 // Walk clauses and process safelen/lastprivate. 2083 LoopStack.setParallel(!IsMonotonic); 2084 LoopStack.setVectorizeEnable(); 2085 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2086 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2087 if (C->getKind() == OMPC_ORDER_concurrent) 2088 LoopStack.setParallel(/*Enable=*/true); 2089 if ((D.getDirectiveKind() == OMPD_simd || 2090 (getLangOpts().OpenMPSimd && 2091 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2092 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2093 [](const OMPReductionClause *C) { 2094 return C->getModifier() == OMPC_REDUCTION_inscan; 2095 })) 2096 // Disable parallel access in case of prefix sum. 2097 LoopStack.setParallel(/*Enable=*/false); 2098 } 2099 2100 void CodeGenFunction::EmitOMPSimdFinal( 2101 const OMPLoopDirective &D, 2102 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2103 if (!HaveInsertPoint()) 2104 return; 2105 llvm::BasicBlock *DoneBB = nullptr; 2106 auto IC = D.counters().begin(); 2107 auto IPC = D.private_counters().begin(); 2108 for (const Expr *F : D.finals()) { 2109 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2110 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2111 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2112 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2113 OrigVD->hasGlobalStorage() || CED) { 2114 if (!DoneBB) { 2115 if (llvm::Value *Cond = CondGen(*this)) { 2116 // If the first post-update expression is found, emit conditional 2117 // block if it was requested. 2118 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2119 DoneBB = createBasicBlock(".omp.final.done"); 2120 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2121 EmitBlock(ThenBB); 2122 } 2123 } 2124 Address OrigAddr = Address::invalid(); 2125 if (CED) { 2126 OrigAddr = 2127 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2128 } else { 2129 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2130 /*RefersToEnclosingVariableOrCapture=*/false, 2131 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2132 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2133 } 2134 OMPPrivateScope VarScope(*this); 2135 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2136 (void)VarScope.Privatize(); 2137 EmitIgnoredExpr(F); 2138 } 2139 ++IC; 2140 ++IPC; 2141 } 2142 if (DoneBB) 2143 EmitBlock(DoneBB, /*IsFinished=*/true); 2144 } 2145 2146 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2147 const OMPLoopDirective &S, 2148 CodeGenFunction::JumpDest LoopExit) { 2149 CGF.EmitOMPLoopBody(S, LoopExit); 2150 CGF.EmitStopPoint(&S); 2151 } 2152 2153 /// Emit a helper variable and return corresponding lvalue. 2154 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2155 const DeclRefExpr *Helper) { 2156 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2157 CGF.EmitVarDecl(*VDecl); 2158 return CGF.EmitLValue(Helper); 2159 } 2160 2161 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2162 const RegionCodeGenTy &SimdInitGen, 2163 const RegionCodeGenTy &BodyCodeGen) { 2164 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2165 PrePostActionTy &) { 2166 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2167 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2168 SimdInitGen(CGF); 2169 2170 BodyCodeGen(CGF); 2171 }; 2172 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2173 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2174 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2175 2176 BodyCodeGen(CGF); 2177 }; 2178 const Expr *IfCond = nullptr; 2179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2180 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2181 if (CGF.getLangOpts().OpenMP >= 50 && 2182 (C->getNameModifier() == OMPD_unknown || 2183 C->getNameModifier() == OMPD_simd)) { 2184 IfCond = C->getCondition(); 2185 break; 2186 } 2187 } 2188 } 2189 if (IfCond) { 2190 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2191 } else { 2192 RegionCodeGenTy ThenRCG(ThenGen); 2193 ThenRCG(CGF); 2194 } 2195 } 2196 2197 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2198 PrePostActionTy &Action) { 2199 Action.Enter(CGF); 2200 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2201 "Expected simd directive"); 2202 OMPLoopScope PreInitScope(CGF, S); 2203 // if (PreCond) { 2204 // for (IV in 0..LastIteration) BODY; 2205 // <Final counter/linear vars updates>; 2206 // } 2207 // 2208 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2209 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2210 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2211 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2212 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2213 } 2214 2215 // Emit: if (PreCond) - begin. 2216 // If the condition constant folds and can be elided, avoid emitting the 2217 // whole loop. 2218 bool CondConstant; 2219 llvm::BasicBlock *ContBlock = nullptr; 2220 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2221 if (!CondConstant) 2222 return; 2223 } else { 2224 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2225 ContBlock = CGF.createBasicBlock("simd.if.end"); 2226 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2227 CGF.getProfileCount(&S)); 2228 CGF.EmitBlock(ThenBlock); 2229 CGF.incrementProfileCounter(&S); 2230 } 2231 2232 // Emit the loop iteration variable. 2233 const Expr *IVExpr = S.getIterationVariable(); 2234 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2235 CGF.EmitVarDecl(*IVDecl); 2236 CGF.EmitIgnoredExpr(S.getInit()); 2237 2238 // Emit the iterations count variable. 2239 // If it is not a variable, Sema decided to calculate iterations count on 2240 // each iteration (e.g., it is foldable into a constant). 2241 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2242 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2243 // Emit calculation of the iterations count. 2244 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2245 } 2246 2247 emitAlignedClause(CGF, S); 2248 (void)CGF.EmitOMPLinearClauseInit(S); 2249 { 2250 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2251 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2252 CGF.EmitOMPLinearClause(S, LoopScope); 2253 CGF.EmitOMPPrivateClause(S, LoopScope); 2254 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2255 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2256 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2257 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2258 (void)LoopScope.Privatize(); 2259 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2260 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2261 2262 emitCommonSimdLoop( 2263 CGF, S, 2264 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2265 CGF.EmitOMPSimdInit(S); 2266 }, 2267 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2268 CGF.EmitOMPInnerLoop( 2269 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2270 [&S](CodeGenFunction &CGF) { 2271 emitOMPLoopBodyWithStopPoint(CGF, S, 2272 CodeGenFunction::JumpDest()); 2273 }, 2274 [](CodeGenFunction &) {}); 2275 }); 2276 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2277 // Emit final copy of the lastprivate variables at the end of loops. 2278 if (HasLastprivateClause) 2279 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2280 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2281 emitPostUpdateForReductionClause(CGF, S, 2282 [](CodeGenFunction &) { return nullptr; }); 2283 } 2284 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2285 // Emit: if (PreCond) - end. 2286 if (ContBlock) { 2287 CGF.EmitBranch(ContBlock); 2288 CGF.EmitBlock(ContBlock, true); 2289 } 2290 } 2291 2292 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2293 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2294 OMPFirstScanLoop = true; 2295 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2296 emitOMPSimdRegion(CGF, S, Action); 2297 }; 2298 { 2299 auto LPCRegion = 2300 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2301 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2302 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2303 } 2304 // Check for outer lastprivate conditional update. 2305 checkForLastprivateConditionalUpdate(*this, S); 2306 } 2307 2308 void CodeGenFunction::EmitOMPOuterLoop( 2309 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2310 CodeGenFunction::OMPPrivateScope &LoopScope, 2311 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2312 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2313 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2314 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2315 2316 const Expr *IVExpr = S.getIterationVariable(); 2317 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2318 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2319 2320 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2321 2322 // Start the loop with a block that tests the condition. 2323 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2324 EmitBlock(CondBlock); 2325 const SourceRange R = S.getSourceRange(); 2326 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2327 SourceLocToDebugLoc(R.getEnd())); 2328 2329 llvm::Value *BoolCondVal = nullptr; 2330 if (!DynamicOrOrdered) { 2331 // UB = min(UB, GlobalUB) or 2332 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2333 // 'distribute parallel for') 2334 EmitIgnoredExpr(LoopArgs.EUB); 2335 // IV = LB 2336 EmitIgnoredExpr(LoopArgs.Init); 2337 // IV < UB 2338 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2339 } else { 2340 BoolCondVal = 2341 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2342 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2343 } 2344 2345 // If there are any cleanups between here and the loop-exit scope, 2346 // create a block to stage a loop exit along. 2347 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2348 if (LoopScope.requiresCleanups()) 2349 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2350 2351 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2352 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2353 if (ExitBlock != LoopExit.getBlock()) { 2354 EmitBlock(ExitBlock); 2355 EmitBranchThroughCleanup(LoopExit); 2356 } 2357 EmitBlock(LoopBody); 2358 2359 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2360 // LB for loop condition and emitted it above). 2361 if (DynamicOrOrdered) 2362 EmitIgnoredExpr(LoopArgs.Init); 2363 2364 // Create a block for the increment. 2365 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2366 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2367 2368 emitCommonSimdLoop( 2369 *this, S, 2370 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2371 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2372 // with dynamic/guided scheduling and without ordered clause. 2373 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2374 CGF.LoopStack.setParallel(!IsMonotonic); 2375 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2376 if (C->getKind() == OMPC_ORDER_concurrent) 2377 CGF.LoopStack.setParallel(/*Enable=*/true); 2378 } else { 2379 CGF.EmitOMPSimdInit(S, IsMonotonic); 2380 } 2381 }, 2382 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2383 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2384 SourceLocation Loc = S.getBeginLoc(); 2385 // when 'distribute' is not combined with a 'for': 2386 // while (idx <= UB) { BODY; ++idx; } 2387 // when 'distribute' is combined with a 'for' 2388 // (e.g. 'distribute parallel for') 2389 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2390 CGF.EmitOMPInnerLoop( 2391 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2392 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2393 CodeGenLoop(CGF, S, LoopExit); 2394 }, 2395 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2396 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2397 }); 2398 }); 2399 2400 EmitBlock(Continue.getBlock()); 2401 BreakContinueStack.pop_back(); 2402 if (!DynamicOrOrdered) { 2403 // Emit "LB = LB + Stride", "UB = UB + Stride". 2404 EmitIgnoredExpr(LoopArgs.NextLB); 2405 EmitIgnoredExpr(LoopArgs.NextUB); 2406 } 2407 2408 EmitBranch(CondBlock); 2409 LoopStack.pop(); 2410 // Emit the fall-through block. 2411 EmitBlock(LoopExit.getBlock()); 2412 2413 // Tell the runtime we are done. 2414 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2415 if (!DynamicOrOrdered) 2416 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2417 S.getDirectiveKind()); 2418 }; 2419 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2420 } 2421 2422 void CodeGenFunction::EmitOMPForOuterLoop( 2423 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2424 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2425 const OMPLoopArguments &LoopArgs, 2426 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2427 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2428 2429 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2430 const bool DynamicOrOrdered = 2431 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2432 2433 assert((Ordered || 2434 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2435 LoopArgs.Chunk != nullptr)) && 2436 "static non-chunked schedule does not need outer loop"); 2437 2438 // Emit outer loop. 2439 // 2440 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2441 // When schedule(dynamic,chunk_size) is specified, the iterations are 2442 // distributed to threads in the team in chunks as the threads request them. 2443 // Each thread executes a chunk of iterations, then requests another chunk, 2444 // until no chunks remain to be distributed. Each chunk contains chunk_size 2445 // iterations, except for the last chunk to be distributed, which may have 2446 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2447 // 2448 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2449 // to threads in the team in chunks as the executing threads request them. 2450 // Each thread executes a chunk of iterations, then requests another chunk, 2451 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2452 // each chunk is proportional to the number of unassigned iterations divided 2453 // by the number of threads in the team, decreasing to 1. For a chunk_size 2454 // with value k (greater than 1), the size of each chunk is determined in the 2455 // same way, with the restriction that the chunks do not contain fewer than k 2456 // iterations (except for the last chunk to be assigned, which may have fewer 2457 // than k iterations). 2458 // 2459 // When schedule(auto) is specified, the decision regarding scheduling is 2460 // delegated to the compiler and/or runtime system. The programmer gives the 2461 // implementation the freedom to choose any possible mapping of iterations to 2462 // threads in the team. 2463 // 2464 // When schedule(runtime) is specified, the decision regarding scheduling is 2465 // deferred until run time, and the schedule and chunk size are taken from the 2466 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2467 // implementation defined 2468 // 2469 // while(__kmpc_dispatch_next(&LB, &UB)) { 2470 // idx = LB; 2471 // while (idx <= UB) { BODY; ++idx; 2472 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2473 // } // inner loop 2474 // } 2475 // 2476 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2477 // When schedule(static, chunk_size) is specified, iterations are divided into 2478 // chunks of size chunk_size, and the chunks are assigned to the threads in 2479 // the team in a round-robin fashion in the order of the thread number. 2480 // 2481 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2482 // while (idx <= UB) { BODY; ++idx; } // inner loop 2483 // LB = LB + ST; 2484 // UB = UB + ST; 2485 // } 2486 // 2487 2488 const Expr *IVExpr = S.getIterationVariable(); 2489 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2490 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2491 2492 if (DynamicOrOrdered) { 2493 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2494 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2495 llvm::Value *LBVal = DispatchBounds.first; 2496 llvm::Value *UBVal = DispatchBounds.second; 2497 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2498 LoopArgs.Chunk}; 2499 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2500 IVSigned, Ordered, DipatchRTInputValues); 2501 } else { 2502 CGOpenMPRuntime::StaticRTInput StaticInit( 2503 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2504 LoopArgs.ST, LoopArgs.Chunk); 2505 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2506 ScheduleKind, StaticInit); 2507 } 2508 2509 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2510 const unsigned IVSize, 2511 const bool IVSigned) { 2512 if (Ordered) { 2513 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2514 IVSigned); 2515 } 2516 }; 2517 2518 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2519 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2520 OuterLoopArgs.IncExpr = S.getInc(); 2521 OuterLoopArgs.Init = S.getInit(); 2522 OuterLoopArgs.Cond = S.getCond(); 2523 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2524 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2525 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2526 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2527 } 2528 2529 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2530 const unsigned IVSize, const bool IVSigned) {} 2531 2532 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2533 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2534 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2535 const CodeGenLoopTy &CodeGenLoopContent) { 2536 2537 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2538 2539 // Emit outer loop. 2540 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2541 // dynamic 2542 // 2543 2544 const Expr *IVExpr = S.getIterationVariable(); 2545 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2546 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2547 2548 CGOpenMPRuntime::StaticRTInput StaticInit( 2549 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2550 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2551 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2552 2553 // for combined 'distribute' and 'for' the increment expression of distribute 2554 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2555 Expr *IncExpr; 2556 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2557 IncExpr = S.getDistInc(); 2558 else 2559 IncExpr = S.getInc(); 2560 2561 // this routine is shared by 'omp distribute parallel for' and 2562 // 'omp distribute': select the right EUB expression depending on the 2563 // directive 2564 OMPLoopArguments OuterLoopArgs; 2565 OuterLoopArgs.LB = LoopArgs.LB; 2566 OuterLoopArgs.UB = LoopArgs.UB; 2567 OuterLoopArgs.ST = LoopArgs.ST; 2568 OuterLoopArgs.IL = LoopArgs.IL; 2569 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2570 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2571 ? S.getCombinedEnsureUpperBound() 2572 : S.getEnsureUpperBound(); 2573 OuterLoopArgs.IncExpr = IncExpr; 2574 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2575 ? S.getCombinedInit() 2576 : S.getInit(); 2577 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2578 ? S.getCombinedCond() 2579 : S.getCond(); 2580 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2581 ? S.getCombinedNextLowerBound() 2582 : S.getNextLowerBound(); 2583 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2584 ? S.getCombinedNextUpperBound() 2585 : S.getNextUpperBound(); 2586 2587 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2588 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2589 emitEmptyOrdered); 2590 } 2591 2592 static std::pair<LValue, LValue> 2593 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2594 const OMPExecutableDirective &S) { 2595 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2596 LValue LB = 2597 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2598 LValue UB = 2599 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2600 2601 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2602 // parallel for') we need to use the 'distribute' 2603 // chunk lower and upper bounds rather than the whole loop iteration 2604 // space. These are parameters to the outlined function for 'parallel' 2605 // and we copy the bounds of the previous schedule into the 2606 // the current ones. 2607 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2608 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2609 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2610 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2611 PrevLBVal = CGF.EmitScalarConversion( 2612 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2613 LS.getIterationVariable()->getType(), 2614 LS.getPrevLowerBoundVariable()->getExprLoc()); 2615 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2616 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2617 PrevUBVal = CGF.EmitScalarConversion( 2618 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2619 LS.getIterationVariable()->getType(), 2620 LS.getPrevUpperBoundVariable()->getExprLoc()); 2621 2622 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2623 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2624 2625 return {LB, UB}; 2626 } 2627 2628 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2629 /// we need to use the LB and UB expressions generated by the worksharing 2630 /// code generation support, whereas in non combined situations we would 2631 /// just emit 0 and the LastIteration expression 2632 /// This function is necessary due to the difference of the LB and UB 2633 /// types for the RT emission routines for 'for_static_init' and 2634 /// 'for_dispatch_init' 2635 static std::pair<llvm::Value *, llvm::Value *> 2636 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2637 const OMPExecutableDirective &S, 2638 Address LB, Address UB) { 2639 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2640 const Expr *IVExpr = LS.getIterationVariable(); 2641 // when implementing a dynamic schedule for a 'for' combined with a 2642 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2643 // is not normalized as each team only executes its own assigned 2644 // distribute chunk 2645 QualType IteratorTy = IVExpr->getType(); 2646 llvm::Value *LBVal = 2647 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2648 llvm::Value *UBVal = 2649 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2650 return {LBVal, UBVal}; 2651 } 2652 2653 static void emitDistributeParallelForDistributeInnerBoundParams( 2654 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2655 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2656 const auto &Dir = cast<OMPLoopDirective>(S); 2657 LValue LB = 2658 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2659 llvm::Value *LBCast = 2660 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2661 CGF.SizeTy, /*isSigned=*/false); 2662 CapturedVars.push_back(LBCast); 2663 LValue UB = 2664 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2665 2666 llvm::Value *UBCast = 2667 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2668 CGF.SizeTy, /*isSigned=*/false); 2669 CapturedVars.push_back(UBCast); 2670 } 2671 2672 static void 2673 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2674 const OMPLoopDirective &S, 2675 CodeGenFunction::JumpDest LoopExit) { 2676 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2677 PrePostActionTy &Action) { 2678 Action.Enter(CGF); 2679 bool HasCancel = false; 2680 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2681 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2682 HasCancel = D->hasCancel(); 2683 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2684 HasCancel = D->hasCancel(); 2685 else if (const auto *D = 2686 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2687 HasCancel = D->hasCancel(); 2688 } 2689 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2690 HasCancel); 2691 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2692 emitDistributeParallelForInnerBounds, 2693 emitDistributeParallelForDispatchBounds); 2694 }; 2695 2696 emitCommonOMPParallelDirective( 2697 CGF, S, 2698 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2699 CGInlinedWorksharingLoop, 2700 emitDistributeParallelForDistributeInnerBoundParams); 2701 } 2702 2703 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2704 const OMPDistributeParallelForDirective &S) { 2705 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2706 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2707 S.getDistInc()); 2708 }; 2709 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2710 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2711 } 2712 2713 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2714 const OMPDistributeParallelForSimdDirective &S) { 2715 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2716 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2717 S.getDistInc()); 2718 }; 2719 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2720 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2721 } 2722 2723 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2724 const OMPDistributeSimdDirective &S) { 2725 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2726 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2727 }; 2728 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2729 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2730 } 2731 2732 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2733 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2734 // Emit SPMD target parallel for region as a standalone region. 2735 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2736 emitOMPSimdRegion(CGF, S, Action); 2737 }; 2738 llvm::Function *Fn; 2739 llvm::Constant *Addr; 2740 // Emit target region as a standalone region. 2741 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2742 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2743 assert(Fn && Addr && "Target device function emission failed."); 2744 } 2745 2746 void CodeGenFunction::EmitOMPTargetSimdDirective( 2747 const OMPTargetSimdDirective &S) { 2748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2749 emitOMPSimdRegion(CGF, S, Action); 2750 }; 2751 emitCommonOMPTargetDirective(*this, S, CodeGen); 2752 } 2753 2754 namespace { 2755 struct ScheduleKindModifiersTy { 2756 OpenMPScheduleClauseKind Kind; 2757 OpenMPScheduleClauseModifier M1; 2758 OpenMPScheduleClauseModifier M2; 2759 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2760 OpenMPScheduleClauseModifier M1, 2761 OpenMPScheduleClauseModifier M2) 2762 : Kind(Kind), M1(M1), M2(M2) {} 2763 }; 2764 } // namespace 2765 2766 bool CodeGenFunction::EmitOMPWorksharingLoop( 2767 const OMPLoopDirective &S, Expr *EUB, 2768 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2769 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2770 // Emit the loop iteration variable. 2771 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2772 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2773 EmitVarDecl(*IVDecl); 2774 2775 // Emit the iterations count variable. 2776 // If it is not a variable, Sema decided to calculate iterations count on each 2777 // iteration (e.g., it is foldable into a constant). 2778 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2779 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2780 // Emit calculation of the iterations count. 2781 EmitIgnoredExpr(S.getCalcLastIteration()); 2782 } 2783 2784 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2785 2786 bool HasLastprivateClause; 2787 // Check pre-condition. 2788 { 2789 OMPLoopScope PreInitScope(*this, S); 2790 // Skip the entire loop if we don't meet the precondition. 2791 // If the condition constant folds and can be elided, avoid emitting the 2792 // whole loop. 2793 bool CondConstant; 2794 llvm::BasicBlock *ContBlock = nullptr; 2795 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2796 if (!CondConstant) 2797 return false; 2798 } else { 2799 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2800 ContBlock = createBasicBlock("omp.precond.end"); 2801 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2802 getProfileCount(&S)); 2803 EmitBlock(ThenBlock); 2804 incrementProfileCounter(&S); 2805 } 2806 2807 RunCleanupsScope DoacrossCleanupScope(*this); 2808 bool Ordered = false; 2809 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2810 if (OrderedClause->getNumForLoops()) 2811 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2812 else 2813 Ordered = true; 2814 } 2815 2816 llvm::DenseSet<const Expr *> EmittedFinals; 2817 emitAlignedClause(*this, S); 2818 bool HasLinears = EmitOMPLinearClauseInit(S); 2819 // Emit helper vars inits. 2820 2821 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2822 LValue LB = Bounds.first; 2823 LValue UB = Bounds.second; 2824 LValue ST = 2825 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2826 LValue IL = 2827 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2828 2829 // Emit 'then' code. 2830 { 2831 OMPPrivateScope LoopScope(*this); 2832 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2833 // Emit implicit barrier to synchronize threads and avoid data races on 2834 // initialization of firstprivate variables and post-update of 2835 // lastprivate variables. 2836 CGM.getOpenMPRuntime().emitBarrierCall( 2837 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2838 /*ForceSimpleCall=*/true); 2839 } 2840 EmitOMPPrivateClause(S, LoopScope); 2841 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2842 *this, S, EmitLValue(S.getIterationVariable())); 2843 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2844 EmitOMPReductionClauseInit(S, LoopScope); 2845 EmitOMPPrivateLoopCounters(S, LoopScope); 2846 EmitOMPLinearClause(S, LoopScope); 2847 (void)LoopScope.Privatize(); 2848 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2849 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2850 2851 // Detect the loop schedule kind and chunk. 2852 const Expr *ChunkExpr = nullptr; 2853 OpenMPScheduleTy ScheduleKind; 2854 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2855 ScheduleKind.Schedule = C->getScheduleKind(); 2856 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2857 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2858 ChunkExpr = C->getChunkSize(); 2859 } else { 2860 // Default behaviour for schedule clause. 2861 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2862 *this, S, ScheduleKind.Schedule, ChunkExpr); 2863 } 2864 bool HasChunkSizeOne = false; 2865 llvm::Value *Chunk = nullptr; 2866 if (ChunkExpr) { 2867 Chunk = EmitScalarExpr(ChunkExpr); 2868 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2869 S.getIterationVariable()->getType(), 2870 S.getBeginLoc()); 2871 Expr::EvalResult Result; 2872 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2873 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2874 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2875 } 2876 } 2877 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2878 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2879 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2880 // If the static schedule kind is specified or if the ordered clause is 2881 // specified, and if no monotonic modifier is specified, the effect will 2882 // be as if the monotonic modifier was specified. 2883 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2884 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2885 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2886 bool IsMonotonic = 2887 Ordered || 2888 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2889 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 2890 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 2891 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 2892 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2893 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2894 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2895 /* Chunked */ Chunk != nullptr) || 2896 StaticChunkedOne) && 2897 !Ordered) { 2898 JumpDest LoopExit = 2899 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2900 emitCommonSimdLoop( 2901 *this, S, 2902 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2903 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2904 CGF.EmitOMPSimdInit(S, IsMonotonic); 2905 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2906 if (C->getKind() == OMPC_ORDER_concurrent) 2907 CGF.LoopStack.setParallel(/*Enable=*/true); 2908 } 2909 }, 2910 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2911 &S, ScheduleKind, LoopExit, 2912 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2913 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2914 // When no chunk_size is specified, the iteration space is divided 2915 // into chunks that are approximately equal in size, and at most 2916 // one chunk is distributed to each thread. Note that the size of 2917 // the chunks is unspecified in this case. 2918 CGOpenMPRuntime::StaticRTInput StaticInit( 2919 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2920 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2921 StaticChunkedOne ? Chunk : nullptr); 2922 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2923 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2924 StaticInit); 2925 // UB = min(UB, GlobalUB); 2926 if (!StaticChunkedOne) 2927 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2928 // IV = LB; 2929 CGF.EmitIgnoredExpr(S.getInit()); 2930 // For unchunked static schedule generate: 2931 // 2932 // while (idx <= UB) { 2933 // BODY; 2934 // ++idx; 2935 // } 2936 // 2937 // For static schedule with chunk one: 2938 // 2939 // while (IV <= PrevUB) { 2940 // BODY; 2941 // IV += ST; 2942 // } 2943 CGF.EmitOMPInnerLoop( 2944 S, LoopScope.requiresCleanups(), 2945 StaticChunkedOne ? S.getCombinedParForInDistCond() 2946 : S.getCond(), 2947 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2948 [&S, LoopExit](CodeGenFunction &CGF) { 2949 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 2950 }, 2951 [](CodeGenFunction &) {}); 2952 }); 2953 EmitBlock(LoopExit.getBlock()); 2954 // Tell the runtime we are done. 2955 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2956 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2957 S.getDirectiveKind()); 2958 }; 2959 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2960 } else { 2961 // Emit the outer loop, which requests its work chunk [LB..UB] from 2962 // runtime and runs the inner loop to process it. 2963 const OMPLoopArguments LoopArguments( 2964 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2965 IL.getAddress(*this), Chunk, EUB); 2966 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2967 LoopArguments, CGDispatchBounds); 2968 } 2969 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2970 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2971 return CGF.Builder.CreateIsNotNull( 2972 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2973 }); 2974 } 2975 EmitOMPReductionClauseFinal( 2976 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2977 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2978 : /*Parallel only*/ OMPD_parallel); 2979 // Emit post-update of the reduction variables if IsLastIter != 0. 2980 emitPostUpdateForReductionClause( 2981 *this, S, [IL, &S](CodeGenFunction &CGF) { 2982 return CGF.Builder.CreateIsNotNull( 2983 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2984 }); 2985 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2986 if (HasLastprivateClause) 2987 EmitOMPLastprivateClauseFinal( 2988 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2989 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2990 } 2991 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2992 return CGF.Builder.CreateIsNotNull( 2993 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2994 }); 2995 DoacrossCleanupScope.ForceCleanup(); 2996 // We're now done with the loop, so jump to the continuation block. 2997 if (ContBlock) { 2998 EmitBranch(ContBlock); 2999 EmitBlock(ContBlock, /*IsFinished=*/true); 3000 } 3001 } 3002 return HasLastprivateClause; 3003 } 3004 3005 /// The following two functions generate expressions for the loop lower 3006 /// and upper bounds in case of static and dynamic (dispatch) schedule 3007 /// of the associated 'for' or 'distribute' loop. 3008 static std::pair<LValue, LValue> 3009 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3010 const auto &LS = cast<OMPLoopDirective>(S); 3011 LValue LB = 3012 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3013 LValue UB = 3014 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3015 return {LB, UB}; 3016 } 3017 3018 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3019 /// consider the lower and upper bound expressions generated by the 3020 /// worksharing loop support, but we use 0 and the iteration space size as 3021 /// constants 3022 static std::pair<llvm::Value *, llvm::Value *> 3023 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3024 Address LB, Address UB) { 3025 const auto &LS = cast<OMPLoopDirective>(S); 3026 const Expr *IVExpr = LS.getIterationVariable(); 3027 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3028 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3029 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3030 return {LBVal, UBVal}; 3031 } 3032 3033 /// Emits the code for the directive with inscan reductions. 3034 /// The code is the following: 3035 /// \code 3036 /// size num_iters = <num_iters>; 3037 /// <type> buffer[num_iters]; 3038 /// #pragma omp ... 3039 /// for (i: 0..<num_iters>) { 3040 /// <input phase>; 3041 /// buffer[i] = red; 3042 /// } 3043 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3044 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3045 /// buffer[i] op= buffer[i-pow(2,k)]; 3046 /// #pragma omp ... 3047 /// for (0..<num_iters>) { 3048 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3049 /// <scan phase>; 3050 /// } 3051 /// \endcode 3052 static void emitScanBasedDirective( 3053 CodeGenFunction &CGF, const OMPLoopDirective &S, 3054 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3055 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3056 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3057 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3058 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3059 SmallVector<const Expr *, 4> Shareds; 3060 SmallVector<const Expr *, 4> Privates; 3061 SmallVector<const Expr *, 4> ReductionOps; 3062 SmallVector<const Expr *, 4> LHSs; 3063 SmallVector<const Expr *, 4> RHSs; 3064 SmallVector<const Expr *, 4> CopyOps; 3065 SmallVector<const Expr *, 4> CopyArrayTemps; 3066 SmallVector<const Expr *, 4> CopyArrayElems; 3067 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3068 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3069 "Only inscan reductions are expected."); 3070 Shareds.append(C->varlist_begin(), C->varlist_end()); 3071 Privates.append(C->privates().begin(), C->privates().end()); 3072 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3073 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3074 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3075 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 3076 CopyArrayTemps.append(C->copy_array_temps().begin(), 3077 C->copy_array_temps().end()); 3078 CopyArrayElems.append(C->copy_array_elems().begin(), 3079 C->copy_array_elems().end()); 3080 } 3081 { 3082 // Emit buffers for each reduction variables. 3083 // ReductionCodeGen is required to emit correctly the code for array 3084 // reductions. 3085 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3086 unsigned Count = 0; 3087 auto *ITA = CopyArrayTemps.begin(); 3088 for (const Expr *IRef : Privates) { 3089 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3090 // Emit variably modified arrays, used for arrays/array sections 3091 // reductions. 3092 if (PrivateVD->getType()->isVariablyModifiedType()) { 3093 RedCG.emitSharedOrigLValue(CGF, Count); 3094 RedCG.emitAggregateType(CGF, Count); 3095 } 3096 CodeGenFunction::OpaqueValueMapping DimMapping( 3097 CGF, 3098 cast<OpaqueValueExpr>( 3099 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3100 ->getSizeExpr()), 3101 RValue::get(OMPScanNumIterations)); 3102 // Emit temp buffer. 3103 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3104 ++ITA; 3105 ++Count; 3106 } 3107 } 3108 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3109 { 3110 // Emit loop with input phase: 3111 // #pragma omp ... 3112 // for (i: 0..<num_iters>) { 3113 // <input phase>; 3114 // buffer[i] = red; 3115 // } 3116 CGF.OMPFirstScanLoop = true; 3117 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3118 FirstGen(CGF); 3119 } 3120 // Emit prefix reduction: 3121 // for (int k = 0; k <= ceil(log2(n)); ++k) 3122 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3123 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3124 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3125 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3126 llvm::Value *Arg = 3127 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3128 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3129 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3130 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3131 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3132 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3133 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3134 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3135 CGF.EmitBlock(LoopBB); 3136 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3137 // size pow2k = 1; 3138 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3139 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3140 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3141 // for (size i = n - 1; i >= 2 ^ k; --i) 3142 // tmp[i] op= tmp[i-pow2k]; 3143 llvm::BasicBlock *InnerLoopBB = 3144 CGF.createBasicBlock("omp.inner.log.scan.body"); 3145 llvm::BasicBlock *InnerExitBB = 3146 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3147 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3148 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3149 CGF.EmitBlock(InnerLoopBB); 3150 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3151 IVal->addIncoming(NMin1, LoopBB); 3152 { 3153 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3154 auto *ILHS = LHSs.begin(); 3155 auto *IRHS = RHSs.begin(); 3156 for (const Expr *CopyArrayElem : CopyArrayElems) { 3157 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3158 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3159 Address LHSAddr = Address::invalid(); 3160 { 3161 CodeGenFunction::OpaqueValueMapping IdxMapping( 3162 CGF, 3163 cast<OpaqueValueExpr>( 3164 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3165 RValue::get(IVal)); 3166 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3167 } 3168 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3169 Address RHSAddr = Address::invalid(); 3170 { 3171 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3172 CodeGenFunction::OpaqueValueMapping IdxMapping( 3173 CGF, 3174 cast<OpaqueValueExpr>( 3175 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3176 RValue::get(OffsetIVal)); 3177 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3178 } 3179 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3180 ++ILHS; 3181 ++IRHS; 3182 } 3183 PrivScope.Privatize(); 3184 CGF.CGM.getOpenMPRuntime().emitReduction( 3185 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3186 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3187 } 3188 llvm::Value *NextIVal = 3189 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3190 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3191 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3192 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3193 CGF.EmitBlock(InnerExitBB); 3194 llvm::Value *Next = 3195 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3196 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3197 // pow2k <<= 1; 3198 llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3199 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3200 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3201 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3202 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3203 CGF.EmitBlock(ExitBB); 3204 3205 CGF.OMPFirstScanLoop = false; 3206 SecondGen(CGF); 3207 } 3208 3209 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3210 const OMPLoopDirective &S, 3211 bool HasCancel) { 3212 bool HasLastprivates; 3213 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3214 [](const OMPReductionClause *C) { 3215 return C->getModifier() == OMPC_REDUCTION_inscan; 3216 })) { 3217 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3218 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3219 OMPLoopScope LoopScope(CGF, S); 3220 return CGF.EmitScalarExpr(S.getNumIterations()); 3221 }; 3222 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3223 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3224 CGF, S.getDirectiveKind(), HasCancel); 3225 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3226 emitForLoopBounds, 3227 emitDispatchForLoopBounds); 3228 // Emit an implicit barrier at the end. 3229 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3230 OMPD_for); 3231 }; 3232 const auto &&SecondGen = [&S, HasCancel, 3233 &HasLastprivates](CodeGenFunction &CGF) { 3234 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3235 CGF, S.getDirectiveKind(), HasCancel); 3236 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3237 emitForLoopBounds, 3238 emitDispatchForLoopBounds); 3239 }; 3240 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3241 } else { 3242 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3243 HasCancel); 3244 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3245 emitForLoopBounds, 3246 emitDispatchForLoopBounds); 3247 } 3248 return HasLastprivates; 3249 } 3250 3251 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3252 bool HasLastprivates = false; 3253 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3254 PrePostActionTy &) { 3255 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3256 }; 3257 { 3258 auto LPCRegion = 3259 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3260 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3261 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3262 S.hasCancel()); 3263 } 3264 3265 // Emit an implicit barrier at the end. 3266 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3267 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3268 // Check for outer lastprivate conditional update. 3269 checkForLastprivateConditionalUpdate(*this, S); 3270 } 3271 3272 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3273 bool HasLastprivates = false; 3274 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3275 PrePostActionTy &) { 3276 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3277 }; 3278 { 3279 auto LPCRegion = 3280 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3281 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3282 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3283 } 3284 3285 // Emit an implicit barrier at the end. 3286 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3287 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3288 // Check for outer lastprivate conditional update. 3289 checkForLastprivateConditionalUpdate(*this, S); 3290 } 3291 3292 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3293 const Twine &Name, 3294 llvm::Value *Init = nullptr) { 3295 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3296 if (Init) 3297 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3298 return LVal; 3299 } 3300 3301 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3302 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3303 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3304 bool HasLastprivates = false; 3305 auto &&CodeGen = [&S, CapturedStmt, CS, 3306 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3307 const ASTContext &C = CGF.getContext(); 3308 QualType KmpInt32Ty = 3309 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3310 // Emit helper vars inits. 3311 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3312 CGF.Builder.getInt32(0)); 3313 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3314 ? CGF.Builder.getInt32(CS->size() - 1) 3315 : CGF.Builder.getInt32(0); 3316 LValue UB = 3317 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3318 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3319 CGF.Builder.getInt32(1)); 3320 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3321 CGF.Builder.getInt32(0)); 3322 // Loop counter. 3323 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3324 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3325 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3326 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3327 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3328 // Generate condition for loop. 3329 BinaryOperator *Cond = BinaryOperator::Create( 3330 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3331 S.getBeginLoc(), FPOptions(C.getLangOpts())); 3332 // Increment for loop counter. 3333 UnaryOperator *Inc = UnaryOperator::Create( 3334 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3335 S.getBeginLoc(), true, FPOptions(C.getLangOpts())); 3336 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3337 // Iterate through all sections and emit a switch construct: 3338 // switch (IV) { 3339 // case 0: 3340 // <SectionStmt[0]>; 3341 // break; 3342 // ... 3343 // case <NumSection> - 1: 3344 // <SectionStmt[<NumSection> - 1]>; 3345 // break; 3346 // } 3347 // .omp.sections.exit: 3348 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3349 llvm::SwitchInst *SwitchStmt = 3350 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3351 ExitBB, CS == nullptr ? 1 : CS->size()); 3352 if (CS) { 3353 unsigned CaseNumber = 0; 3354 for (const Stmt *SubStmt : CS->children()) { 3355 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3356 CGF.EmitBlock(CaseBB); 3357 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3358 CGF.EmitStmt(SubStmt); 3359 CGF.EmitBranch(ExitBB); 3360 ++CaseNumber; 3361 } 3362 } else { 3363 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3364 CGF.EmitBlock(CaseBB); 3365 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3366 CGF.EmitStmt(CapturedStmt); 3367 CGF.EmitBranch(ExitBB); 3368 } 3369 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3370 }; 3371 3372 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3373 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3374 // Emit implicit barrier to synchronize threads and avoid data races on 3375 // initialization of firstprivate variables and post-update of lastprivate 3376 // variables. 3377 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3378 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3379 /*ForceSimpleCall=*/true); 3380 } 3381 CGF.EmitOMPPrivateClause(S, LoopScope); 3382 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3383 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3384 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3385 (void)LoopScope.Privatize(); 3386 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3387 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3388 3389 // Emit static non-chunked loop. 3390 OpenMPScheduleTy ScheduleKind; 3391 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3392 CGOpenMPRuntime::StaticRTInput StaticInit( 3393 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3394 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3395 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3396 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3397 // UB = min(UB, GlobalUB); 3398 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3399 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3400 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3401 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3402 // IV = LB; 3403 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3404 // while (idx <= UB) { BODY; ++idx; } 3405 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3406 [](CodeGenFunction &) {}); 3407 // Tell the runtime we are done. 3408 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3409 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3410 S.getDirectiveKind()); 3411 }; 3412 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3413 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3414 // Emit post-update of the reduction variables if IsLastIter != 0. 3415 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3416 return CGF.Builder.CreateIsNotNull( 3417 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3418 }); 3419 3420 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3421 if (HasLastprivates) 3422 CGF.EmitOMPLastprivateClauseFinal( 3423 S, /*NoFinals=*/false, 3424 CGF.Builder.CreateIsNotNull( 3425 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3426 }; 3427 3428 bool HasCancel = false; 3429 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3430 HasCancel = OSD->hasCancel(); 3431 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3432 HasCancel = OPSD->hasCancel(); 3433 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3434 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3435 HasCancel); 3436 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3437 // clause. Otherwise the barrier will be generated by the codegen for the 3438 // directive. 3439 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3440 // Emit implicit barrier to synchronize threads and avoid data races on 3441 // initialization of firstprivate variables. 3442 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3443 OMPD_unknown); 3444 } 3445 } 3446 3447 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3448 { 3449 auto LPCRegion = 3450 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3451 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3452 EmitSections(S); 3453 } 3454 // Emit an implicit barrier at the end. 3455 if (!S.getSingleClause<OMPNowaitClause>()) { 3456 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3457 OMPD_sections); 3458 } 3459 // Check for outer lastprivate conditional update. 3460 checkForLastprivateConditionalUpdate(*this, S); 3461 } 3462 3463 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3464 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3465 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3466 }; 3467 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3468 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3469 S.hasCancel()); 3470 } 3471 3472 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3473 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3474 llvm::SmallVector<const Expr *, 8> DestExprs; 3475 llvm::SmallVector<const Expr *, 8> SrcExprs; 3476 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3477 // Check if there are any 'copyprivate' clauses associated with this 3478 // 'single' construct. 3479 // Build a list of copyprivate variables along with helper expressions 3480 // (<source>, <destination>, <destination>=<source> expressions) 3481 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3482 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3483 DestExprs.append(C->destination_exprs().begin(), 3484 C->destination_exprs().end()); 3485 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3486 AssignmentOps.append(C->assignment_ops().begin(), 3487 C->assignment_ops().end()); 3488 } 3489 // Emit code for 'single' region along with 'copyprivate' clauses 3490 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3491 Action.Enter(CGF); 3492 OMPPrivateScope SingleScope(CGF); 3493 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3494 CGF.EmitOMPPrivateClause(S, SingleScope); 3495 (void)SingleScope.Privatize(); 3496 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3497 }; 3498 { 3499 auto LPCRegion = 3500 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3501 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3502 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3503 CopyprivateVars, DestExprs, 3504 SrcExprs, AssignmentOps); 3505 } 3506 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3507 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3508 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3509 CGM.getOpenMPRuntime().emitBarrierCall( 3510 *this, S.getBeginLoc(), 3511 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3512 } 3513 // Check for outer lastprivate conditional update. 3514 checkForLastprivateConditionalUpdate(*this, S); 3515 } 3516 3517 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3518 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3519 Action.Enter(CGF); 3520 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3521 }; 3522 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3523 } 3524 3525 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3526 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3527 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3528 3529 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3530 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3531 3532 auto FiniCB = [this](InsertPointTy IP) { 3533 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3534 }; 3535 3536 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3537 InsertPointTy CodeGenIP, 3538 llvm::BasicBlock &FiniBB) { 3539 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3540 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3541 CodeGenIP, FiniBB); 3542 }; 3543 3544 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3545 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3546 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3547 3548 return; 3549 } 3550 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3551 emitMaster(*this, S); 3552 } 3553 3554 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3555 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3556 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3557 3558 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3559 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3560 const Expr *Hint = nullptr; 3561 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3562 Hint = HintClause->getHint(); 3563 3564 // TODO: This is slightly different from what's currently being done in 3565 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3566 // about typing is final. 3567 llvm::Value *HintInst = nullptr; 3568 if (Hint) 3569 HintInst = 3570 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3571 3572 auto FiniCB = [this](InsertPointTy IP) { 3573 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3574 }; 3575 3576 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3577 InsertPointTy CodeGenIP, 3578 llvm::BasicBlock &FiniBB) { 3579 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3580 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3581 CodeGenIP, FiniBB); 3582 }; 3583 3584 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3585 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3586 Builder.restoreIP(OMPBuilder->CreateCritical( 3587 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3588 HintInst)); 3589 3590 return; 3591 } 3592 3593 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3594 Action.Enter(CGF); 3595 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3596 }; 3597 const Expr *Hint = nullptr; 3598 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3599 Hint = HintClause->getHint(); 3600 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3601 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3602 S.getDirectiveName().getAsString(), 3603 CodeGen, S.getBeginLoc(), Hint); 3604 } 3605 3606 void CodeGenFunction::EmitOMPParallelForDirective( 3607 const OMPParallelForDirective &S) { 3608 // Emit directive as a combined directive that consists of two implicit 3609 // directives: 'parallel' with 'for' directive. 3610 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3611 Action.Enter(CGF); 3612 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 3613 }; 3614 { 3615 auto LPCRegion = 3616 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3617 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3618 emitEmptyBoundParameters); 3619 } 3620 // Check for outer lastprivate conditional update. 3621 checkForLastprivateConditionalUpdate(*this, S); 3622 } 3623 3624 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3625 const OMPParallelForSimdDirective &S) { 3626 // Emit directive as a combined directive that consists of two implicit 3627 // directives: 'parallel' with 'for' directive. 3628 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3629 Action.Enter(CGF); 3630 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 3631 emitDispatchForLoopBounds); 3632 }; 3633 { 3634 auto LPCRegion = 3635 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3636 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 3637 emitEmptyBoundParameters); 3638 } 3639 // Check for outer lastprivate conditional update. 3640 checkForLastprivateConditionalUpdate(*this, S); 3641 } 3642 3643 void CodeGenFunction::EmitOMPParallelMasterDirective( 3644 const OMPParallelMasterDirective &S) { 3645 // Emit directive as a combined directive that consists of two implicit 3646 // directives: 'parallel' with 'master' directive. 3647 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3648 Action.Enter(CGF); 3649 OMPPrivateScope PrivateScope(CGF); 3650 bool Copyins = CGF.EmitOMPCopyinClause(S); 3651 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3652 if (Copyins) { 3653 // Emit implicit barrier to synchronize threads and avoid data races on 3654 // propagation master's thread values of threadprivate variables to local 3655 // instances of that variables of all other implicit threads. 3656 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3657 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3658 /*ForceSimpleCall=*/true); 3659 } 3660 CGF.EmitOMPPrivateClause(S, PrivateScope); 3661 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3662 (void)PrivateScope.Privatize(); 3663 emitMaster(CGF, S); 3664 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3665 }; 3666 { 3667 auto LPCRegion = 3668 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3669 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3670 emitEmptyBoundParameters); 3671 emitPostUpdateForReductionClause(*this, S, 3672 [](CodeGenFunction &) { return nullptr; }); 3673 } 3674 // Check for outer lastprivate conditional update. 3675 checkForLastprivateConditionalUpdate(*this, S); 3676 } 3677 3678 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3679 const OMPParallelSectionsDirective &S) { 3680 // Emit directive as a combined directive that consists of two implicit 3681 // directives: 'parallel' with 'sections' directive. 3682 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3683 Action.Enter(CGF); 3684 CGF.EmitSections(S); 3685 }; 3686 { 3687 auto LPCRegion = 3688 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3689 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3690 emitEmptyBoundParameters); 3691 } 3692 // Check for outer lastprivate conditional update. 3693 checkForLastprivateConditionalUpdate(*this, S); 3694 } 3695 3696 void CodeGenFunction::EmitOMPTaskBasedDirective( 3697 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3698 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3699 OMPTaskDataTy &Data) { 3700 // Emit outlined function for task construct. 3701 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3702 auto I = CS->getCapturedDecl()->param_begin(); 3703 auto PartId = std::next(I); 3704 auto TaskT = std::next(I, 4); 3705 // Check if the task is final 3706 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3707 // If the condition constant folds and can be elided, try to avoid emitting 3708 // the condition and the dead arm of the if/else. 3709 const Expr *Cond = Clause->getCondition(); 3710 bool CondConstant; 3711 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3712 Data.Final.setInt(CondConstant); 3713 else 3714 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3715 } else { 3716 // By default the task is not final. 3717 Data.Final.setInt(/*IntVal=*/false); 3718 } 3719 // Check if the task has 'priority' clause. 3720 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3721 const Expr *Prio = Clause->getPriority(); 3722 Data.Priority.setInt(/*IntVal=*/true); 3723 Data.Priority.setPointer(EmitScalarConversion( 3724 EmitScalarExpr(Prio), Prio->getType(), 3725 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3726 Prio->getExprLoc())); 3727 } 3728 // The first function argument for tasks is a thread id, the second one is a 3729 // part id (0 for tied tasks, >=0 for untied task). 3730 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3731 // Get list of private variables. 3732 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3733 auto IRef = C->varlist_begin(); 3734 for (const Expr *IInit : C->private_copies()) { 3735 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3736 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3737 Data.PrivateVars.push_back(*IRef); 3738 Data.PrivateCopies.push_back(IInit); 3739 } 3740 ++IRef; 3741 } 3742 } 3743 EmittedAsPrivate.clear(); 3744 // Get list of firstprivate variables. 3745 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3746 auto IRef = C->varlist_begin(); 3747 auto IElemInitRef = C->inits().begin(); 3748 for (const Expr *IInit : C->private_copies()) { 3749 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3750 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3751 Data.FirstprivateVars.push_back(*IRef); 3752 Data.FirstprivateCopies.push_back(IInit); 3753 Data.FirstprivateInits.push_back(*IElemInitRef); 3754 } 3755 ++IRef; 3756 ++IElemInitRef; 3757 } 3758 } 3759 // Get list of lastprivate variables (for taskloops). 3760 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3761 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3762 auto IRef = C->varlist_begin(); 3763 auto ID = C->destination_exprs().begin(); 3764 for (const Expr *IInit : C->private_copies()) { 3765 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3766 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3767 Data.LastprivateVars.push_back(*IRef); 3768 Data.LastprivateCopies.push_back(IInit); 3769 } 3770 LastprivateDstsOrigs.insert( 3771 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3772 cast<DeclRefExpr>(*IRef)}); 3773 ++IRef; 3774 ++ID; 3775 } 3776 } 3777 SmallVector<const Expr *, 4> LHSs; 3778 SmallVector<const Expr *, 4> RHSs; 3779 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3780 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 3781 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 3782 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 3783 Data.ReductionOps.append(C->reduction_ops().begin(), 3784 C->reduction_ops().end()); 3785 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3786 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3787 } 3788 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3789 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3790 // Build list of dependences. 3791 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 3792 OMPTaskDataTy::DependData &DD = 3793 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 3794 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 3795 } 3796 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3797 CapturedRegion](CodeGenFunction &CGF, 3798 PrePostActionTy &Action) { 3799 // Set proper addresses for generated private copies. 3800 OMPPrivateScope Scope(CGF); 3801 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 3802 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3803 !Data.LastprivateVars.empty()) { 3804 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3805 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3806 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3807 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3808 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3809 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3810 CS->getCapturedDecl()->getParam(PrivatesParam))); 3811 // Map privates. 3812 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3813 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3814 CallArgs.push_back(PrivatesPtr); 3815 for (const Expr *E : Data.PrivateVars) { 3816 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3817 Address PrivatePtr = CGF.CreateMemTemp( 3818 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3819 PrivatePtrs.emplace_back(VD, PrivatePtr); 3820 CallArgs.push_back(PrivatePtr.getPointer()); 3821 } 3822 for (const Expr *E : Data.FirstprivateVars) { 3823 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3824 Address PrivatePtr = 3825 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3826 ".firstpriv.ptr.addr"); 3827 PrivatePtrs.emplace_back(VD, PrivatePtr); 3828 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 3829 CallArgs.push_back(PrivatePtr.getPointer()); 3830 } 3831 for (const Expr *E : Data.LastprivateVars) { 3832 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3833 Address PrivatePtr = 3834 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3835 ".lastpriv.ptr.addr"); 3836 PrivatePtrs.emplace_back(VD, PrivatePtr); 3837 CallArgs.push_back(PrivatePtr.getPointer()); 3838 } 3839 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3840 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3841 for (const auto &Pair : LastprivateDstsOrigs) { 3842 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3843 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3844 /*RefersToEnclosingVariableOrCapture=*/ 3845 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3846 Pair.second->getType(), VK_LValue, 3847 Pair.second->getExprLoc()); 3848 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3849 return CGF.EmitLValue(&DRE).getAddress(CGF); 3850 }); 3851 } 3852 for (const auto &Pair : PrivatePtrs) { 3853 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3854 CGF.getContext().getDeclAlign(Pair.first)); 3855 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3856 } 3857 } 3858 if (Data.Reductions) { 3859 OMPPrivateScope FirstprivateScope(CGF); 3860 for (const auto &Pair : FirstprivatePtrs) { 3861 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3862 CGF.getContext().getDeclAlign(Pair.first)); 3863 FirstprivateScope.addPrivate(Pair.first, 3864 [Replacement]() { return Replacement; }); 3865 } 3866 (void)FirstprivateScope.Privatize(); 3867 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3868 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 3869 Data.ReductionCopies, Data.ReductionOps); 3870 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3871 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3872 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3873 RedCG.emitSharedOrigLValue(CGF, Cnt); 3874 RedCG.emitAggregateType(CGF, Cnt); 3875 // FIXME: This must removed once the runtime library is fixed. 3876 // Emit required threadprivate variables for 3877 // initializer/combiner/finalizer. 3878 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3879 RedCG, Cnt); 3880 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3881 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3882 Replacement = 3883 Address(CGF.EmitScalarConversion( 3884 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3885 CGF.getContext().getPointerType( 3886 Data.ReductionCopies[Cnt]->getType()), 3887 Data.ReductionCopies[Cnt]->getExprLoc()), 3888 Replacement.getAlignment()); 3889 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3890 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3891 [Replacement]() { return Replacement; }); 3892 } 3893 } 3894 // Privatize all private variables except for in_reduction items. 3895 (void)Scope.Privatize(); 3896 SmallVector<const Expr *, 4> InRedVars; 3897 SmallVector<const Expr *, 4> InRedPrivs; 3898 SmallVector<const Expr *, 4> InRedOps; 3899 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3900 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3901 auto IPriv = C->privates().begin(); 3902 auto IRed = C->reduction_ops().begin(); 3903 auto ITD = C->taskgroup_descriptors().begin(); 3904 for (const Expr *Ref : C->varlists()) { 3905 InRedVars.emplace_back(Ref); 3906 InRedPrivs.emplace_back(*IPriv); 3907 InRedOps.emplace_back(*IRed); 3908 TaskgroupDescriptors.emplace_back(*ITD); 3909 std::advance(IPriv, 1); 3910 std::advance(IRed, 1); 3911 std::advance(ITD, 1); 3912 } 3913 } 3914 // Privatize in_reduction items here, because taskgroup descriptors must be 3915 // privatized earlier. 3916 OMPPrivateScope InRedScope(CGF); 3917 if (!InRedVars.empty()) { 3918 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 3919 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3920 RedCG.emitSharedOrigLValue(CGF, Cnt); 3921 RedCG.emitAggregateType(CGF, Cnt); 3922 // The taskgroup descriptor variable is always implicit firstprivate and 3923 // privatized already during processing of the firstprivates. 3924 // FIXME: This must removed once the runtime library is fixed. 3925 // Emit required threadprivate variables for 3926 // initializer/combiner/finalizer. 3927 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3928 RedCG, Cnt); 3929 llvm::Value *ReductionsPtr; 3930 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 3931 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 3932 TRExpr->getExprLoc()); 3933 } else { 3934 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 3935 } 3936 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3937 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3938 Replacement = Address( 3939 CGF.EmitScalarConversion( 3940 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3941 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3942 InRedPrivs[Cnt]->getExprLoc()), 3943 Replacement.getAlignment()); 3944 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3945 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3946 [Replacement]() { return Replacement; }); 3947 } 3948 } 3949 (void)InRedScope.Privatize(); 3950 3951 Action.Enter(CGF); 3952 BodyGen(CGF); 3953 }; 3954 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3955 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3956 Data.NumberOfParts); 3957 OMPLexicalScope Scope(*this, S, llvm::None, 3958 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3959 !isOpenMPSimdDirective(S.getDirectiveKind())); 3960 TaskGen(*this, OutlinedFn, Data); 3961 } 3962 3963 static ImplicitParamDecl * 3964 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3965 QualType Ty, CapturedDecl *CD, 3966 SourceLocation Loc) { 3967 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3968 ImplicitParamDecl::Other); 3969 auto *OrigRef = DeclRefExpr::Create( 3970 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3971 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3972 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3973 ImplicitParamDecl::Other); 3974 auto *PrivateRef = DeclRefExpr::Create( 3975 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3976 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3977 QualType ElemType = C.getBaseElementType(Ty); 3978 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3979 ImplicitParamDecl::Other); 3980 auto *InitRef = DeclRefExpr::Create( 3981 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3982 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3983 PrivateVD->setInitStyle(VarDecl::CInit); 3984 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3985 InitRef, /*BasePath=*/nullptr, 3986 VK_RValue)); 3987 Data.FirstprivateVars.emplace_back(OrigRef); 3988 Data.FirstprivateCopies.emplace_back(PrivateRef); 3989 Data.FirstprivateInits.emplace_back(InitRef); 3990 return OrigVD; 3991 } 3992 3993 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3994 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3995 OMPTargetDataInfo &InputInfo) { 3996 // Emit outlined function for task construct. 3997 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3998 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3999 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4000 auto I = CS->getCapturedDecl()->param_begin(); 4001 auto PartId = std::next(I); 4002 auto TaskT = std::next(I, 4); 4003 OMPTaskDataTy Data; 4004 // The task is not final. 4005 Data.Final.setInt(/*IntVal=*/false); 4006 // Get list of firstprivate variables. 4007 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4008 auto IRef = C->varlist_begin(); 4009 auto IElemInitRef = C->inits().begin(); 4010 for (auto *IInit : C->private_copies()) { 4011 Data.FirstprivateVars.push_back(*IRef); 4012 Data.FirstprivateCopies.push_back(IInit); 4013 Data.FirstprivateInits.push_back(*IElemInitRef); 4014 ++IRef; 4015 ++IElemInitRef; 4016 } 4017 } 4018 OMPPrivateScope TargetScope(*this); 4019 VarDecl *BPVD = nullptr; 4020 VarDecl *PVD = nullptr; 4021 VarDecl *SVD = nullptr; 4022 if (InputInfo.NumberOfTargetItems > 0) { 4023 auto *CD = CapturedDecl::Create( 4024 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4025 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4026 QualType BaseAndPointersType = getContext().getConstantArrayType( 4027 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4028 /*IndexTypeQuals=*/0); 4029 BPVD = createImplicitFirstprivateForType( 4030 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4031 PVD = createImplicitFirstprivateForType( 4032 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4033 QualType SizesType = getContext().getConstantArrayType( 4034 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4035 ArrSize, nullptr, ArrayType::Normal, 4036 /*IndexTypeQuals=*/0); 4037 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4038 S.getBeginLoc()); 4039 TargetScope.addPrivate( 4040 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4041 TargetScope.addPrivate(PVD, 4042 [&InputInfo]() { return InputInfo.PointersArray; }); 4043 TargetScope.addPrivate(SVD, 4044 [&InputInfo]() { return InputInfo.SizesArray; }); 4045 } 4046 (void)TargetScope.Privatize(); 4047 // Build list of dependences. 4048 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4049 OMPTaskDataTy::DependData &DD = 4050 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4051 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4052 } 4053 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 4054 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4055 // Set proper addresses for generated private copies. 4056 OMPPrivateScope Scope(CGF); 4057 if (!Data.FirstprivateVars.empty()) { 4058 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 4059 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 4060 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4061 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4062 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4063 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4064 CS->getCapturedDecl()->getParam(PrivatesParam))); 4065 // Map privates. 4066 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4067 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4068 CallArgs.push_back(PrivatesPtr); 4069 for (const Expr *E : Data.FirstprivateVars) { 4070 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4071 Address PrivatePtr = 4072 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4073 ".firstpriv.ptr.addr"); 4074 PrivatePtrs.emplace_back(VD, PrivatePtr); 4075 CallArgs.push_back(PrivatePtr.getPointer()); 4076 } 4077 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4078 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4079 for (const auto &Pair : PrivatePtrs) { 4080 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4081 CGF.getContext().getDeclAlign(Pair.first)); 4082 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4083 } 4084 } 4085 // Privatize all private variables except for in_reduction items. 4086 (void)Scope.Privatize(); 4087 if (InputInfo.NumberOfTargetItems > 0) { 4088 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4089 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4090 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4091 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4092 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4093 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4094 } 4095 4096 Action.Enter(CGF); 4097 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4098 BodyGen(CGF); 4099 }; 4100 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4101 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4102 Data.NumberOfParts); 4103 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4104 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4105 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4106 SourceLocation()); 4107 4108 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4109 SharedsTy, CapturedStruct, &IfCond, Data); 4110 } 4111 4112 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4113 // Emit outlined function for task construct. 4114 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4115 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4116 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4117 const Expr *IfCond = nullptr; 4118 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4119 if (C->getNameModifier() == OMPD_unknown || 4120 C->getNameModifier() == OMPD_task) { 4121 IfCond = C->getCondition(); 4122 break; 4123 } 4124 } 4125 4126 OMPTaskDataTy Data; 4127 // Check if we should emit tied or untied task. 4128 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4129 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4130 CGF.EmitStmt(CS->getCapturedStmt()); 4131 }; 4132 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4133 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4134 const OMPTaskDataTy &Data) { 4135 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4136 SharedsTy, CapturedStruct, IfCond, 4137 Data); 4138 }; 4139 auto LPCRegion = 4140 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4141 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4142 } 4143 4144 void CodeGenFunction::EmitOMPTaskyieldDirective( 4145 const OMPTaskyieldDirective &S) { 4146 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4147 } 4148 4149 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4150 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4151 } 4152 4153 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4154 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4155 } 4156 4157 void CodeGenFunction::EmitOMPTaskgroupDirective( 4158 const OMPTaskgroupDirective &S) { 4159 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4160 Action.Enter(CGF); 4161 if (const Expr *E = S.getReductionRef()) { 4162 SmallVector<const Expr *, 4> LHSs; 4163 SmallVector<const Expr *, 4> RHSs; 4164 OMPTaskDataTy Data; 4165 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4166 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4167 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4168 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4169 Data.ReductionOps.append(C->reduction_ops().begin(), 4170 C->reduction_ops().end()); 4171 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4172 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4173 } 4174 llvm::Value *ReductionDesc = 4175 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4176 LHSs, RHSs, Data); 4177 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4178 CGF.EmitVarDecl(*VD); 4179 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4180 /*Volatile=*/false, E->getType()); 4181 } 4182 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4183 }; 4184 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4185 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4186 } 4187 4188 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4189 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4190 ? llvm::AtomicOrdering::NotAtomic 4191 : llvm::AtomicOrdering::AcquireRelease; 4192 CGM.getOpenMPRuntime().emitFlush( 4193 *this, 4194 [&S]() -> ArrayRef<const Expr *> { 4195 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4196 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4197 FlushClause->varlist_end()); 4198 return llvm::None; 4199 }(), 4200 S.getBeginLoc(), AO); 4201 } 4202 4203 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4204 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4205 LValue DOLVal = EmitLValue(DO->getDepobj()); 4206 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4207 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4208 DC->getModifier()); 4209 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4210 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4211 *this, Dependencies, DC->getBeginLoc()); 4212 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4213 return; 4214 } 4215 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4216 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4217 return; 4218 } 4219 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4220 CGM.getOpenMPRuntime().emitUpdateClause( 4221 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4222 return; 4223 } 4224 } 4225 4226 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4227 if (!OMPParentLoopDirectiveForScan) 4228 return; 4229 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4230 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4231 SmallVector<const Expr *, 4> Shareds; 4232 SmallVector<const Expr *, 4> Privates; 4233 SmallVector<const Expr *, 4> LHSs; 4234 SmallVector<const Expr *, 4> RHSs; 4235 SmallVector<const Expr *, 4> ReductionOps; 4236 SmallVector<const Expr *, 4> CopyOps; 4237 SmallVector<const Expr *, 4> CopyArrayTemps; 4238 SmallVector<const Expr *, 4> CopyArrayElems; 4239 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4240 if (C->getModifier() != OMPC_REDUCTION_inscan) 4241 continue; 4242 Shareds.append(C->varlist_begin(), C->varlist_end()); 4243 Privates.append(C->privates().begin(), C->privates().end()); 4244 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4245 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4246 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4247 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4248 CopyArrayTemps.append(C->copy_array_temps().begin(), 4249 C->copy_array_temps().end()); 4250 CopyArrayElems.append(C->copy_array_elems().begin(), 4251 C->copy_array_elems().end()); 4252 } 4253 if (ParentDir.getDirectiveKind() == OMPD_simd || 4254 (getLangOpts().OpenMPSimd && 4255 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4256 // For simd directive and simd-based directives in simd only mode, use the 4257 // following codegen: 4258 // int x = 0; 4259 // #pragma omp simd reduction(inscan, +: x) 4260 // for (..) { 4261 // <first part> 4262 // #pragma omp scan inclusive(x) 4263 // <second part> 4264 // } 4265 // is transformed to: 4266 // int x = 0; 4267 // for (..) { 4268 // int x_priv = 0; 4269 // <first part> 4270 // x = x_priv + x; 4271 // x_priv = x; 4272 // <second part> 4273 // } 4274 // and 4275 // int x = 0; 4276 // #pragma omp simd reduction(inscan, +: x) 4277 // for (..) { 4278 // <first part> 4279 // #pragma omp scan exclusive(x) 4280 // <second part> 4281 // } 4282 // to 4283 // int x = 0; 4284 // for (..) { 4285 // int x_priv = 0; 4286 // <second part> 4287 // int temp = x; 4288 // x = x_priv + x; 4289 // x_priv = temp; 4290 // <first part> 4291 // } 4292 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4293 EmitBranch(IsInclusive 4294 ? OMPScanReduce 4295 : BreakContinueStack.back().ContinueBlock.getBlock()); 4296 EmitBlock(OMPScanDispatch); 4297 { 4298 // New scope for correct construction/destruction of temp variables for 4299 // exclusive scan. 4300 LexicalScope Scope(*this, S.getSourceRange()); 4301 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4302 EmitBlock(OMPScanReduce); 4303 if (!IsInclusive) { 4304 // Create temp var and copy LHS value to this temp value. 4305 // TMP = LHS; 4306 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4307 const Expr *PrivateExpr = Privates[I]; 4308 const Expr *TempExpr = CopyArrayTemps[I]; 4309 EmitAutoVarDecl( 4310 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4311 LValue DestLVal = EmitLValue(TempExpr); 4312 LValue SrcLVal = EmitLValue(LHSs[I]); 4313 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4314 SrcLVal.getAddress(*this), 4315 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4316 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4317 CopyOps[I]); 4318 } 4319 } 4320 CGM.getOpenMPRuntime().emitReduction( 4321 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4322 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4323 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4324 const Expr *PrivateExpr = Privates[I]; 4325 LValue DestLVal; 4326 LValue SrcLVal; 4327 if (IsInclusive) { 4328 DestLVal = EmitLValue(RHSs[I]); 4329 SrcLVal = EmitLValue(LHSs[I]); 4330 } else { 4331 const Expr *TempExpr = CopyArrayTemps[I]; 4332 DestLVal = EmitLValue(RHSs[I]); 4333 SrcLVal = EmitLValue(TempExpr); 4334 } 4335 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4336 SrcLVal.getAddress(*this), 4337 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4338 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4339 CopyOps[I]); 4340 } 4341 } 4342 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4343 OMPScanExitBlock = IsInclusive 4344 ? BreakContinueStack.back().ContinueBlock.getBlock() 4345 : OMPScanReduce; 4346 EmitBlock(OMPAfterScanBlock); 4347 return; 4348 } 4349 if (!IsInclusive) { 4350 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4351 EmitBlock(OMPScanExitBlock); 4352 } 4353 if (OMPFirstScanLoop) { 4354 // Emit buffer[i] = red; at the end of the input phase. 4355 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4356 .getIterationVariable() 4357 ->IgnoreParenImpCasts(); 4358 LValue IdxLVal = EmitLValue(IVExpr); 4359 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4360 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4361 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4362 const Expr *PrivateExpr = Privates[I]; 4363 const Expr *OrigExpr = Shareds[I]; 4364 const Expr *CopyArrayElem = CopyArrayElems[I]; 4365 OpaqueValueMapping IdxMapping( 4366 *this, 4367 cast<OpaqueValueExpr>( 4368 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4369 RValue::get(IdxVal)); 4370 LValue DestLVal = EmitLValue(CopyArrayElem); 4371 LValue SrcLVal = EmitLValue(OrigExpr); 4372 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4373 SrcLVal.getAddress(*this), 4374 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4375 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4376 CopyOps[I]); 4377 } 4378 } 4379 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4380 if (IsInclusive) { 4381 EmitBlock(OMPScanExitBlock); 4382 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4383 } 4384 EmitBlock(OMPScanDispatch); 4385 if (!OMPFirstScanLoop) { 4386 // Emit red = buffer[i]; at the entrance to the scan phase. 4387 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4388 .getIterationVariable() 4389 ->IgnoreParenImpCasts(); 4390 LValue IdxLVal = EmitLValue(IVExpr); 4391 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4392 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4393 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4394 if (!IsInclusive) { 4395 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4396 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4397 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4398 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4399 EmitBlock(ContBB); 4400 // Use idx - 1 iteration for exclusive scan. 4401 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4402 } 4403 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4404 const Expr *PrivateExpr = Privates[I]; 4405 const Expr *OrigExpr = Shareds[I]; 4406 const Expr *CopyArrayElem = CopyArrayElems[I]; 4407 OpaqueValueMapping IdxMapping( 4408 *this, 4409 cast<OpaqueValueExpr>( 4410 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4411 RValue::get(IdxVal)); 4412 LValue SrcLVal = EmitLValue(CopyArrayElem); 4413 LValue DestLVal = EmitLValue(OrigExpr); 4414 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4415 SrcLVal.getAddress(*this), 4416 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4417 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4418 CopyOps[I]); 4419 } 4420 if (!IsInclusive) { 4421 EmitBlock(ExclusiveExitBB); 4422 } 4423 } 4424 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4425 : OMPAfterScanBlock); 4426 EmitBlock(OMPAfterScanBlock); 4427 } 4428 4429 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4430 const CodeGenLoopTy &CodeGenLoop, 4431 Expr *IncExpr) { 4432 // Emit the loop iteration variable. 4433 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4434 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4435 EmitVarDecl(*IVDecl); 4436 4437 // Emit the iterations count variable. 4438 // If it is not a variable, Sema decided to calculate iterations count on each 4439 // iteration (e.g., it is foldable into a constant). 4440 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4441 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4442 // Emit calculation of the iterations count. 4443 EmitIgnoredExpr(S.getCalcLastIteration()); 4444 } 4445 4446 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4447 4448 bool HasLastprivateClause = false; 4449 // Check pre-condition. 4450 { 4451 OMPLoopScope PreInitScope(*this, S); 4452 // Skip the entire loop if we don't meet the precondition. 4453 // If the condition constant folds and can be elided, avoid emitting the 4454 // whole loop. 4455 bool CondConstant; 4456 llvm::BasicBlock *ContBlock = nullptr; 4457 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4458 if (!CondConstant) 4459 return; 4460 } else { 4461 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4462 ContBlock = createBasicBlock("omp.precond.end"); 4463 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4464 getProfileCount(&S)); 4465 EmitBlock(ThenBlock); 4466 incrementProfileCounter(&S); 4467 } 4468 4469 emitAlignedClause(*this, S); 4470 // Emit 'then' code. 4471 { 4472 // Emit helper vars inits. 4473 4474 LValue LB = EmitOMPHelperVar( 4475 *this, cast<DeclRefExpr>( 4476 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4477 ? S.getCombinedLowerBoundVariable() 4478 : S.getLowerBoundVariable()))); 4479 LValue UB = EmitOMPHelperVar( 4480 *this, cast<DeclRefExpr>( 4481 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4482 ? S.getCombinedUpperBoundVariable() 4483 : S.getUpperBoundVariable()))); 4484 LValue ST = 4485 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 4486 LValue IL = 4487 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 4488 4489 OMPPrivateScope LoopScope(*this); 4490 if (EmitOMPFirstprivateClause(S, LoopScope)) { 4491 // Emit implicit barrier to synchronize threads and avoid data races 4492 // on initialization of firstprivate variables and post-update of 4493 // lastprivate variables. 4494 CGM.getOpenMPRuntime().emitBarrierCall( 4495 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4496 /*ForceSimpleCall=*/true); 4497 } 4498 EmitOMPPrivateClause(S, LoopScope); 4499 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4500 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4501 !isOpenMPTeamsDirective(S.getDirectiveKind())) 4502 EmitOMPReductionClauseInit(S, LoopScope); 4503 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 4504 EmitOMPPrivateLoopCounters(S, LoopScope); 4505 (void)LoopScope.Privatize(); 4506 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4507 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 4508 4509 // Detect the distribute schedule kind and chunk. 4510 llvm::Value *Chunk = nullptr; 4511 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 4512 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 4513 ScheduleKind = C->getDistScheduleKind(); 4514 if (const Expr *Ch = C->getChunkSize()) { 4515 Chunk = EmitScalarExpr(Ch); 4516 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 4517 S.getIterationVariable()->getType(), 4518 S.getBeginLoc()); 4519 } 4520 } else { 4521 // Default behaviour for dist_schedule clause. 4522 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 4523 *this, S, ScheduleKind, Chunk); 4524 } 4525 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 4526 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 4527 4528 // OpenMP [2.10.8, distribute Construct, Description] 4529 // If dist_schedule is specified, kind must be static. If specified, 4530 // iterations are divided into chunks of size chunk_size, chunks are 4531 // assigned to the teams of the league in a round-robin fashion in the 4532 // order of the team number. When no chunk_size is specified, the 4533 // iteration space is divided into chunks that are approximately equal 4534 // in size, and at most one chunk is distributed to each team of the 4535 // league. The size of the chunks is unspecified in this case. 4536 bool StaticChunked = RT.isStaticChunked( 4537 ScheduleKind, /* Chunked */ Chunk != nullptr) && 4538 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 4539 if (RT.isStaticNonchunked(ScheduleKind, 4540 /* Chunked */ Chunk != nullptr) || 4541 StaticChunked) { 4542 CGOpenMPRuntime::StaticRTInput StaticInit( 4543 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 4544 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4545 StaticChunked ? Chunk : nullptr); 4546 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 4547 StaticInit); 4548 JumpDest LoopExit = 4549 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 4550 // UB = min(UB, GlobalUB); 4551 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4552 ? S.getCombinedEnsureUpperBound() 4553 : S.getEnsureUpperBound()); 4554 // IV = LB; 4555 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4556 ? S.getCombinedInit() 4557 : S.getInit()); 4558 4559 const Expr *Cond = 4560 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4561 ? S.getCombinedCond() 4562 : S.getCond(); 4563 4564 if (StaticChunked) 4565 Cond = S.getCombinedDistCond(); 4566 4567 // For static unchunked schedules generate: 4568 // 4569 // 1. For distribute alone, codegen 4570 // while (idx <= UB) { 4571 // BODY; 4572 // ++idx; 4573 // } 4574 // 4575 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 4576 // while (idx <= UB) { 4577 // <CodeGen rest of pragma>(LB, UB); 4578 // idx += ST; 4579 // } 4580 // 4581 // For static chunk one schedule generate: 4582 // 4583 // while (IV <= GlobalUB) { 4584 // <CodeGen rest of pragma>(LB, UB); 4585 // LB += ST; 4586 // UB += ST; 4587 // UB = min(UB, GlobalUB); 4588 // IV = LB; 4589 // } 4590 // 4591 emitCommonSimdLoop( 4592 *this, S, 4593 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4594 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4595 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 4596 }, 4597 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 4598 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 4599 CGF.EmitOMPInnerLoop( 4600 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4601 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4602 CodeGenLoop(CGF, S, LoopExit); 4603 }, 4604 [&S, StaticChunked](CodeGenFunction &CGF) { 4605 if (StaticChunked) { 4606 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4607 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4608 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4609 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4610 } 4611 }); 4612 }); 4613 EmitBlock(LoopExit.getBlock()); 4614 // Tell the runtime we are done. 4615 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4616 } else { 4617 // Emit the outer loop, which requests its work chunk [LB..UB] from 4618 // runtime and runs the inner loop to process it. 4619 const OMPLoopArguments LoopArguments = { 4620 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4621 IL.getAddress(*this), Chunk}; 4622 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4623 CodeGenLoop); 4624 } 4625 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4626 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4627 return CGF.Builder.CreateIsNotNull( 4628 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4629 }); 4630 } 4631 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4632 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4633 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4634 EmitOMPReductionClauseFinal(S, OMPD_simd); 4635 // Emit post-update of the reduction variables if IsLastIter != 0. 4636 emitPostUpdateForReductionClause( 4637 *this, S, [IL, &S](CodeGenFunction &CGF) { 4638 return CGF.Builder.CreateIsNotNull( 4639 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4640 }); 4641 } 4642 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4643 if (HasLastprivateClause) { 4644 EmitOMPLastprivateClauseFinal( 4645 S, /*NoFinals=*/false, 4646 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4647 } 4648 } 4649 4650 // We're now done with the loop, so jump to the continuation block. 4651 if (ContBlock) { 4652 EmitBranch(ContBlock); 4653 EmitBlock(ContBlock, true); 4654 } 4655 } 4656 } 4657 4658 void CodeGenFunction::EmitOMPDistributeDirective( 4659 const OMPDistributeDirective &S) { 4660 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4661 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4662 }; 4663 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4664 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4665 } 4666 4667 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4668 const CapturedStmt *S, 4669 SourceLocation Loc) { 4670 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4671 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4672 CGF.CapturedStmtInfo = &CapStmtInfo; 4673 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4674 Fn->setDoesNotRecurse(); 4675 return Fn; 4676 } 4677 4678 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4679 if (S.hasClausesOfKind<OMPDependClause>()) { 4680 assert(!S.getAssociatedStmt() && 4681 "No associated statement must be in ordered depend construct."); 4682 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4683 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4684 return; 4685 } 4686 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4687 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4688 PrePostActionTy &Action) { 4689 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4690 if (C) { 4691 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4692 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4693 llvm::Function *OutlinedFn = 4694 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4695 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4696 OutlinedFn, CapturedVars); 4697 } else { 4698 Action.Enter(CGF); 4699 CGF.EmitStmt(CS->getCapturedStmt()); 4700 } 4701 }; 4702 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4703 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4704 } 4705 4706 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4707 QualType SrcType, QualType DestType, 4708 SourceLocation Loc) { 4709 assert(CGF.hasScalarEvaluationKind(DestType) && 4710 "DestType must have scalar evaluation kind."); 4711 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4712 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4713 DestType, Loc) 4714 : CGF.EmitComplexToScalarConversion( 4715 Val.getComplexVal(), SrcType, DestType, Loc); 4716 } 4717 4718 static CodeGenFunction::ComplexPairTy 4719 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4720 QualType DestType, SourceLocation Loc) { 4721 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4722 "DestType must have complex evaluation kind."); 4723 CodeGenFunction::ComplexPairTy ComplexVal; 4724 if (Val.isScalar()) { 4725 // Convert the input element to the element type of the complex. 4726 QualType DestElementType = 4727 DestType->castAs<ComplexType>()->getElementType(); 4728 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4729 Val.getScalarVal(), SrcType, DestElementType, Loc); 4730 ComplexVal = CodeGenFunction::ComplexPairTy( 4731 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4732 } else { 4733 assert(Val.isComplex() && "Must be a scalar or complex."); 4734 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4735 QualType DestElementType = 4736 DestType->castAs<ComplexType>()->getElementType(); 4737 ComplexVal.first = CGF.EmitScalarConversion( 4738 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4739 ComplexVal.second = CGF.EmitScalarConversion( 4740 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4741 } 4742 return ComplexVal; 4743 } 4744 4745 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4746 LValue LVal, RValue RVal) { 4747 if (LVal.isGlobalReg()) 4748 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4749 else 4750 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4751 } 4752 4753 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4754 llvm::AtomicOrdering AO, LValue LVal, 4755 SourceLocation Loc) { 4756 if (LVal.isGlobalReg()) 4757 return CGF.EmitLoadOfLValue(LVal, Loc); 4758 return CGF.EmitAtomicLoad( 4759 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4760 LVal.isVolatile()); 4761 } 4762 4763 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4764 QualType RValTy, SourceLocation Loc) { 4765 switch (getEvaluationKind(LVal.getType())) { 4766 case TEK_Scalar: 4767 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4768 *this, RVal, RValTy, LVal.getType(), Loc)), 4769 LVal); 4770 break; 4771 case TEK_Complex: 4772 EmitStoreOfComplex( 4773 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4774 /*isInit=*/false); 4775 break; 4776 case TEK_Aggregate: 4777 llvm_unreachable("Must be a scalar or complex."); 4778 } 4779 } 4780 4781 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4782 const Expr *X, const Expr *V, 4783 SourceLocation Loc) { 4784 // v = x; 4785 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4786 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4787 LValue XLValue = CGF.EmitLValue(X); 4788 LValue VLValue = CGF.EmitLValue(V); 4789 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4790 // OpenMP, 2.17.7, atomic Construct 4791 // If the read or capture clause is specified and the acquire, acq_rel, or 4792 // seq_cst clause is specified then the strong flush on exit from the atomic 4793 // operation is also an acquire flush. 4794 switch (AO) { 4795 case llvm::AtomicOrdering::Acquire: 4796 case llvm::AtomicOrdering::AcquireRelease: 4797 case llvm::AtomicOrdering::SequentiallyConsistent: 4798 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4799 llvm::AtomicOrdering::Acquire); 4800 break; 4801 case llvm::AtomicOrdering::Monotonic: 4802 case llvm::AtomicOrdering::Release: 4803 break; 4804 case llvm::AtomicOrdering::NotAtomic: 4805 case llvm::AtomicOrdering::Unordered: 4806 llvm_unreachable("Unexpected ordering."); 4807 } 4808 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4809 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4810 } 4811 4812 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4813 llvm::AtomicOrdering AO, const Expr *X, 4814 const Expr *E, SourceLocation Loc) { 4815 // x = expr; 4816 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4817 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4818 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4819 // OpenMP, 2.17.7, atomic Construct 4820 // If the write, update, or capture clause is specified and the release, 4821 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4822 // the atomic operation is also a release flush. 4823 switch (AO) { 4824 case llvm::AtomicOrdering::Release: 4825 case llvm::AtomicOrdering::AcquireRelease: 4826 case llvm::AtomicOrdering::SequentiallyConsistent: 4827 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4828 llvm::AtomicOrdering::Release); 4829 break; 4830 case llvm::AtomicOrdering::Acquire: 4831 case llvm::AtomicOrdering::Monotonic: 4832 break; 4833 case llvm::AtomicOrdering::NotAtomic: 4834 case llvm::AtomicOrdering::Unordered: 4835 llvm_unreachable("Unexpected ordering."); 4836 } 4837 } 4838 4839 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4840 RValue Update, 4841 BinaryOperatorKind BO, 4842 llvm::AtomicOrdering AO, 4843 bool IsXLHSInRHSPart) { 4844 ASTContext &Context = CGF.getContext(); 4845 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4846 // expression is simple and atomic is allowed for the given type for the 4847 // target platform. 4848 if (BO == BO_Comma || !Update.isScalar() || 4849 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4850 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4851 (Update.getScalarVal()->getType() != 4852 X.getAddress(CGF).getElementType())) || 4853 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4854 !Context.getTargetInfo().hasBuiltinAtomic( 4855 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4856 return std::make_pair(false, RValue::get(nullptr)); 4857 4858 llvm::AtomicRMWInst::BinOp RMWOp; 4859 switch (BO) { 4860 case BO_Add: 4861 RMWOp = llvm::AtomicRMWInst::Add; 4862 break; 4863 case BO_Sub: 4864 if (!IsXLHSInRHSPart) 4865 return std::make_pair(false, RValue::get(nullptr)); 4866 RMWOp = llvm::AtomicRMWInst::Sub; 4867 break; 4868 case BO_And: 4869 RMWOp = llvm::AtomicRMWInst::And; 4870 break; 4871 case BO_Or: 4872 RMWOp = llvm::AtomicRMWInst::Or; 4873 break; 4874 case BO_Xor: 4875 RMWOp = llvm::AtomicRMWInst::Xor; 4876 break; 4877 case BO_LT: 4878 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4879 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4880 : llvm::AtomicRMWInst::Max) 4881 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4882 : llvm::AtomicRMWInst::UMax); 4883 break; 4884 case BO_GT: 4885 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4886 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4887 : llvm::AtomicRMWInst::Min) 4888 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4889 : llvm::AtomicRMWInst::UMin); 4890 break; 4891 case BO_Assign: 4892 RMWOp = llvm::AtomicRMWInst::Xchg; 4893 break; 4894 case BO_Mul: 4895 case BO_Div: 4896 case BO_Rem: 4897 case BO_Shl: 4898 case BO_Shr: 4899 case BO_LAnd: 4900 case BO_LOr: 4901 return std::make_pair(false, RValue::get(nullptr)); 4902 case BO_PtrMemD: 4903 case BO_PtrMemI: 4904 case BO_LE: 4905 case BO_GE: 4906 case BO_EQ: 4907 case BO_NE: 4908 case BO_Cmp: 4909 case BO_AddAssign: 4910 case BO_SubAssign: 4911 case BO_AndAssign: 4912 case BO_OrAssign: 4913 case BO_XorAssign: 4914 case BO_MulAssign: 4915 case BO_DivAssign: 4916 case BO_RemAssign: 4917 case BO_ShlAssign: 4918 case BO_ShrAssign: 4919 case BO_Comma: 4920 llvm_unreachable("Unsupported atomic update operation"); 4921 } 4922 llvm::Value *UpdateVal = Update.getScalarVal(); 4923 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4924 UpdateVal = CGF.Builder.CreateIntCast( 4925 IC, X.getAddress(CGF).getElementType(), 4926 X.getType()->hasSignedIntegerRepresentation()); 4927 } 4928 llvm::Value *Res = 4929 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4930 return std::make_pair(true, RValue::get(Res)); 4931 } 4932 4933 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4934 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4935 llvm::AtomicOrdering AO, SourceLocation Loc, 4936 const llvm::function_ref<RValue(RValue)> CommonGen) { 4937 // Update expressions are allowed to have the following forms: 4938 // x binop= expr; -> xrval + expr; 4939 // x++, ++x -> xrval + 1; 4940 // x--, --x -> xrval - 1; 4941 // x = x binop expr; -> xrval binop expr 4942 // x = expr Op x; - > expr binop xrval; 4943 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4944 if (!Res.first) { 4945 if (X.isGlobalReg()) { 4946 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4947 // 'xrval'. 4948 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4949 } else { 4950 // Perform compare-and-swap procedure. 4951 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4952 } 4953 } 4954 return Res; 4955 } 4956 4957 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4958 llvm::AtomicOrdering AO, const Expr *X, 4959 const Expr *E, const Expr *UE, 4960 bool IsXLHSInRHSPart, SourceLocation Loc) { 4961 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4962 "Update expr in 'atomic update' must be a binary operator."); 4963 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4964 // Update expressions are allowed to have the following forms: 4965 // x binop= expr; -> xrval + expr; 4966 // x++, ++x -> xrval + 1; 4967 // x--, --x -> xrval - 1; 4968 // x = x binop expr; -> xrval binop expr 4969 // x = expr Op x; - > expr binop xrval; 4970 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4971 LValue XLValue = CGF.EmitLValue(X); 4972 RValue ExprRValue = CGF.EmitAnyExpr(E); 4973 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4974 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4975 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4976 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4977 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4978 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4979 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4980 return CGF.EmitAnyExpr(UE); 4981 }; 4982 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4983 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4984 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4985 // OpenMP, 2.17.7, atomic Construct 4986 // If the write, update, or capture clause is specified and the release, 4987 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4988 // the atomic operation is also a release flush. 4989 switch (AO) { 4990 case llvm::AtomicOrdering::Release: 4991 case llvm::AtomicOrdering::AcquireRelease: 4992 case llvm::AtomicOrdering::SequentiallyConsistent: 4993 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4994 llvm::AtomicOrdering::Release); 4995 break; 4996 case llvm::AtomicOrdering::Acquire: 4997 case llvm::AtomicOrdering::Monotonic: 4998 break; 4999 case llvm::AtomicOrdering::NotAtomic: 5000 case llvm::AtomicOrdering::Unordered: 5001 llvm_unreachable("Unexpected ordering."); 5002 } 5003 } 5004 5005 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5006 QualType SourceType, QualType ResType, 5007 SourceLocation Loc) { 5008 switch (CGF.getEvaluationKind(ResType)) { 5009 case TEK_Scalar: 5010 return RValue::get( 5011 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5012 case TEK_Complex: { 5013 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5014 return RValue::getComplex(Res.first, Res.second); 5015 } 5016 case TEK_Aggregate: 5017 break; 5018 } 5019 llvm_unreachable("Must be a scalar or complex."); 5020 } 5021 5022 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5023 llvm::AtomicOrdering AO, 5024 bool IsPostfixUpdate, const Expr *V, 5025 const Expr *X, const Expr *E, 5026 const Expr *UE, bool IsXLHSInRHSPart, 5027 SourceLocation Loc) { 5028 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5029 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5030 RValue NewVVal; 5031 LValue VLValue = CGF.EmitLValue(V); 5032 LValue XLValue = CGF.EmitLValue(X); 5033 RValue ExprRValue = CGF.EmitAnyExpr(E); 5034 QualType NewVValType; 5035 if (UE) { 5036 // 'x' is updated with some additional value. 5037 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5038 "Update expr in 'atomic capture' must be a binary operator."); 5039 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5040 // Update expressions are allowed to have the following forms: 5041 // x binop= expr; -> xrval + expr; 5042 // x++, ++x -> xrval + 1; 5043 // x--, --x -> xrval - 1; 5044 // x = x binop expr; -> xrval binop expr 5045 // x = expr Op x; - > expr binop xrval; 5046 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5047 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5048 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5049 NewVValType = XRValExpr->getType(); 5050 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5051 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5052 IsPostfixUpdate](RValue XRValue) { 5053 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5054 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5055 RValue Res = CGF.EmitAnyExpr(UE); 5056 NewVVal = IsPostfixUpdate ? XRValue : Res; 5057 return Res; 5058 }; 5059 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5060 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5061 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5062 if (Res.first) { 5063 // 'atomicrmw' instruction was generated. 5064 if (IsPostfixUpdate) { 5065 // Use old value from 'atomicrmw'. 5066 NewVVal = Res.second; 5067 } else { 5068 // 'atomicrmw' does not provide new value, so evaluate it using old 5069 // value of 'x'. 5070 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5071 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5072 NewVVal = CGF.EmitAnyExpr(UE); 5073 } 5074 } 5075 } else { 5076 // 'x' is simply rewritten with some 'expr'. 5077 NewVValType = X->getType().getNonReferenceType(); 5078 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5079 X->getType().getNonReferenceType(), Loc); 5080 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5081 NewVVal = XRValue; 5082 return ExprRValue; 5083 }; 5084 // Try to perform atomicrmw xchg, otherwise simple exchange. 5085 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5086 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5087 Loc, Gen); 5088 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5089 if (Res.first) { 5090 // 'atomicrmw' instruction was generated. 5091 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5092 } 5093 } 5094 // Emit post-update store to 'v' of old/new 'x' value. 5095 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5096 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5097 // OpenMP, 2.17.7, atomic Construct 5098 // If the write, update, or capture clause is specified and the release, 5099 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5100 // the atomic operation is also a release flush. 5101 // If the read or capture clause is specified and the acquire, acq_rel, or 5102 // seq_cst clause is specified then the strong flush on exit from the atomic 5103 // operation is also an acquire flush. 5104 switch (AO) { 5105 case llvm::AtomicOrdering::Release: 5106 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5107 llvm::AtomicOrdering::Release); 5108 break; 5109 case llvm::AtomicOrdering::Acquire: 5110 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5111 llvm::AtomicOrdering::Acquire); 5112 break; 5113 case llvm::AtomicOrdering::AcquireRelease: 5114 case llvm::AtomicOrdering::SequentiallyConsistent: 5115 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5116 llvm::AtomicOrdering::AcquireRelease); 5117 break; 5118 case llvm::AtomicOrdering::Monotonic: 5119 break; 5120 case llvm::AtomicOrdering::NotAtomic: 5121 case llvm::AtomicOrdering::Unordered: 5122 llvm_unreachable("Unexpected ordering."); 5123 } 5124 } 5125 5126 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5127 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5128 const Expr *X, const Expr *V, const Expr *E, 5129 const Expr *UE, bool IsXLHSInRHSPart, 5130 SourceLocation Loc) { 5131 switch (Kind) { 5132 case OMPC_read: 5133 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5134 break; 5135 case OMPC_write: 5136 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5137 break; 5138 case OMPC_unknown: 5139 case OMPC_update: 5140 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5141 break; 5142 case OMPC_capture: 5143 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5144 IsXLHSInRHSPart, Loc); 5145 break; 5146 case OMPC_if: 5147 case OMPC_final: 5148 case OMPC_num_threads: 5149 case OMPC_private: 5150 case OMPC_firstprivate: 5151 case OMPC_lastprivate: 5152 case OMPC_reduction: 5153 case OMPC_task_reduction: 5154 case OMPC_in_reduction: 5155 case OMPC_safelen: 5156 case OMPC_simdlen: 5157 case OMPC_allocator: 5158 case OMPC_allocate: 5159 case OMPC_collapse: 5160 case OMPC_default: 5161 case OMPC_seq_cst: 5162 case OMPC_acq_rel: 5163 case OMPC_acquire: 5164 case OMPC_release: 5165 case OMPC_relaxed: 5166 case OMPC_shared: 5167 case OMPC_linear: 5168 case OMPC_aligned: 5169 case OMPC_copyin: 5170 case OMPC_copyprivate: 5171 case OMPC_flush: 5172 case OMPC_depobj: 5173 case OMPC_proc_bind: 5174 case OMPC_schedule: 5175 case OMPC_ordered: 5176 case OMPC_nowait: 5177 case OMPC_untied: 5178 case OMPC_threadprivate: 5179 case OMPC_depend: 5180 case OMPC_mergeable: 5181 case OMPC_device: 5182 case OMPC_threads: 5183 case OMPC_simd: 5184 case OMPC_map: 5185 case OMPC_num_teams: 5186 case OMPC_thread_limit: 5187 case OMPC_priority: 5188 case OMPC_grainsize: 5189 case OMPC_nogroup: 5190 case OMPC_num_tasks: 5191 case OMPC_hint: 5192 case OMPC_dist_schedule: 5193 case OMPC_defaultmap: 5194 case OMPC_uniform: 5195 case OMPC_to: 5196 case OMPC_from: 5197 case OMPC_use_device_ptr: 5198 case OMPC_use_device_addr: 5199 case OMPC_is_device_ptr: 5200 case OMPC_unified_address: 5201 case OMPC_unified_shared_memory: 5202 case OMPC_reverse_offload: 5203 case OMPC_dynamic_allocators: 5204 case OMPC_atomic_default_mem_order: 5205 case OMPC_device_type: 5206 case OMPC_match: 5207 case OMPC_nontemporal: 5208 case OMPC_order: 5209 case OMPC_destroy: 5210 case OMPC_detach: 5211 case OMPC_inclusive: 5212 case OMPC_exclusive: 5213 case OMPC_uses_allocators: 5214 case OMPC_affinity: 5215 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5216 } 5217 } 5218 5219 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5220 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5221 bool MemOrderingSpecified = false; 5222 if (S.getSingleClause<OMPSeqCstClause>()) { 5223 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5224 MemOrderingSpecified = true; 5225 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5226 AO = llvm::AtomicOrdering::AcquireRelease; 5227 MemOrderingSpecified = true; 5228 } else if (S.getSingleClause<OMPAcquireClause>()) { 5229 AO = llvm::AtomicOrdering::Acquire; 5230 MemOrderingSpecified = true; 5231 } else if (S.getSingleClause<OMPReleaseClause>()) { 5232 AO = llvm::AtomicOrdering::Release; 5233 MemOrderingSpecified = true; 5234 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5235 AO = llvm::AtomicOrdering::Monotonic; 5236 MemOrderingSpecified = true; 5237 } 5238 OpenMPClauseKind Kind = OMPC_unknown; 5239 for (const OMPClause *C : S.clauses()) { 5240 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5241 // if it is first). 5242 if (C->getClauseKind() != OMPC_seq_cst && 5243 C->getClauseKind() != OMPC_acq_rel && 5244 C->getClauseKind() != OMPC_acquire && 5245 C->getClauseKind() != OMPC_release && 5246 C->getClauseKind() != OMPC_relaxed) { 5247 Kind = C->getClauseKind(); 5248 break; 5249 } 5250 } 5251 if (!MemOrderingSpecified) { 5252 llvm::AtomicOrdering DefaultOrder = 5253 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5254 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5255 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5256 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5257 Kind == OMPC_capture)) { 5258 AO = DefaultOrder; 5259 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5260 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5261 AO = llvm::AtomicOrdering::Release; 5262 } else if (Kind == OMPC_read) { 5263 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5264 AO = llvm::AtomicOrdering::Acquire; 5265 } 5266 } 5267 } 5268 5269 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 5270 5271 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 5272 PrePostActionTy &) { 5273 CGF.EmitStopPoint(CS); 5274 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5275 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5276 S.getBeginLoc()); 5277 }; 5278 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5279 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 5280 } 5281 5282 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5283 const OMPExecutableDirective &S, 5284 const RegionCodeGenTy &CodeGen) { 5285 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5286 CodeGenModule &CGM = CGF.CGM; 5287 5288 // On device emit this construct as inlined code. 5289 if (CGM.getLangOpts().OpenMPIsDevice) { 5290 OMPLexicalScope Scope(CGF, S, OMPD_target); 5291 CGM.getOpenMPRuntime().emitInlinedDirective( 5292 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5293 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5294 }); 5295 return; 5296 } 5297 5298 auto LPCRegion = 5299 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5300 llvm::Function *Fn = nullptr; 5301 llvm::Constant *FnID = nullptr; 5302 5303 const Expr *IfCond = nullptr; 5304 // Check for the at most one if clause associated with the target region. 5305 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5306 if (C->getNameModifier() == OMPD_unknown || 5307 C->getNameModifier() == OMPD_target) { 5308 IfCond = C->getCondition(); 5309 break; 5310 } 5311 } 5312 5313 // Check if we have any device clause associated with the directive. 5314 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5315 nullptr, OMPC_DEVICE_unknown); 5316 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5317 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5318 5319 // Check if we have an if clause whose conditional always evaluates to false 5320 // or if we do not have any targets specified. If so the target region is not 5321 // an offload entry point. 5322 bool IsOffloadEntry = true; 5323 if (IfCond) { 5324 bool Val; 5325 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5326 IsOffloadEntry = false; 5327 } 5328 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5329 IsOffloadEntry = false; 5330 5331 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5332 StringRef ParentName; 5333 // In case we have Ctors/Dtors we use the complete type variant to produce 5334 // the mangling of the device outlined kernel. 5335 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5336 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5337 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5338 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5339 else 5340 ParentName = 5341 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5342 5343 // Emit target region as a standalone region. 5344 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5345 IsOffloadEntry, CodeGen); 5346 OMPLexicalScope Scope(CGF, S, OMPD_task); 5347 auto &&SizeEmitter = 5348 [IsOffloadEntry](CodeGenFunction &CGF, 5349 const OMPLoopDirective &D) -> llvm::Value * { 5350 if (IsOffloadEntry) { 5351 OMPLoopScope(CGF, D); 5352 // Emit calculation of the iterations count. 5353 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5354 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5355 /*isSigned=*/false); 5356 return NumIterations; 5357 } 5358 return nullptr; 5359 }; 5360 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5361 SizeEmitter); 5362 } 5363 5364 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5365 PrePostActionTy &Action) { 5366 Action.Enter(CGF); 5367 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5368 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5369 CGF.EmitOMPPrivateClause(S, PrivateScope); 5370 (void)PrivateScope.Privatize(); 5371 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5372 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5373 5374 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5375 } 5376 5377 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5378 StringRef ParentName, 5379 const OMPTargetDirective &S) { 5380 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5381 emitTargetRegion(CGF, S, Action); 5382 }; 5383 llvm::Function *Fn; 5384 llvm::Constant *Addr; 5385 // Emit target region as a standalone region. 5386 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5387 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5388 assert(Fn && Addr && "Target device function emission failed."); 5389 } 5390 5391 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5392 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5393 emitTargetRegion(CGF, S, Action); 5394 }; 5395 emitCommonOMPTargetDirective(*this, S, CodeGen); 5396 } 5397 5398 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5399 const OMPExecutableDirective &S, 5400 OpenMPDirectiveKind InnermostKind, 5401 const RegionCodeGenTy &CodeGen) { 5402 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5403 llvm::Function *OutlinedFn = 5404 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5405 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5406 5407 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5408 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5409 if (NT || TL) { 5410 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5411 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5412 5413 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5414 S.getBeginLoc()); 5415 } 5416 5417 OMPTeamsScope Scope(CGF, S); 5418 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5419 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5420 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5421 CapturedVars); 5422 } 5423 5424 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5425 // Emit teams region as a standalone region. 5426 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5427 Action.Enter(CGF); 5428 OMPPrivateScope PrivateScope(CGF); 5429 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5430 CGF.EmitOMPPrivateClause(S, PrivateScope); 5431 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5432 (void)PrivateScope.Privatize(); 5433 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5434 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5435 }; 5436 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5437 emitPostUpdateForReductionClause(*this, S, 5438 [](CodeGenFunction &) { return nullptr; }); 5439 } 5440 5441 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5442 const OMPTargetTeamsDirective &S) { 5443 auto *CS = S.getCapturedStmt(OMPD_teams); 5444 Action.Enter(CGF); 5445 // Emit teams region as a standalone region. 5446 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5447 Action.Enter(CGF); 5448 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5449 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5450 CGF.EmitOMPPrivateClause(S, PrivateScope); 5451 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5452 (void)PrivateScope.Privatize(); 5453 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5454 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5455 CGF.EmitStmt(CS->getCapturedStmt()); 5456 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5457 }; 5458 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5459 emitPostUpdateForReductionClause(CGF, S, 5460 [](CodeGenFunction &) { return nullptr; }); 5461 } 5462 5463 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5464 CodeGenModule &CGM, StringRef ParentName, 5465 const OMPTargetTeamsDirective &S) { 5466 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5467 emitTargetTeamsRegion(CGF, Action, S); 5468 }; 5469 llvm::Function *Fn; 5470 llvm::Constant *Addr; 5471 // Emit target region as a standalone region. 5472 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5473 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5474 assert(Fn && Addr && "Target device function emission failed."); 5475 } 5476 5477 void CodeGenFunction::EmitOMPTargetTeamsDirective( 5478 const OMPTargetTeamsDirective &S) { 5479 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5480 emitTargetTeamsRegion(CGF, Action, S); 5481 }; 5482 emitCommonOMPTargetDirective(*this, S, CodeGen); 5483 } 5484 5485 static void 5486 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5487 const OMPTargetTeamsDistributeDirective &S) { 5488 Action.Enter(CGF); 5489 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5490 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5491 }; 5492 5493 // Emit teams region as a standalone region. 5494 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5495 PrePostActionTy &Action) { 5496 Action.Enter(CGF); 5497 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5498 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5499 (void)PrivateScope.Privatize(); 5500 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5501 CodeGenDistribute); 5502 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5503 }; 5504 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 5505 emitPostUpdateForReductionClause(CGF, S, 5506 [](CodeGenFunction &) { return nullptr; }); 5507 } 5508 5509 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 5510 CodeGenModule &CGM, StringRef ParentName, 5511 const OMPTargetTeamsDistributeDirective &S) { 5512 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5513 emitTargetTeamsDistributeRegion(CGF, Action, S); 5514 }; 5515 llvm::Function *Fn; 5516 llvm::Constant *Addr; 5517 // Emit target region as a standalone region. 5518 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5519 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5520 assert(Fn && Addr && "Target device function emission failed."); 5521 } 5522 5523 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 5524 const OMPTargetTeamsDistributeDirective &S) { 5525 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5526 emitTargetTeamsDistributeRegion(CGF, Action, S); 5527 }; 5528 emitCommonOMPTargetDirective(*this, S, CodeGen); 5529 } 5530 5531 static void emitTargetTeamsDistributeSimdRegion( 5532 CodeGenFunction &CGF, PrePostActionTy &Action, 5533 const OMPTargetTeamsDistributeSimdDirective &S) { 5534 Action.Enter(CGF); 5535 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5536 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5537 }; 5538 5539 // Emit teams region as a standalone region. 5540 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5541 PrePostActionTy &Action) { 5542 Action.Enter(CGF); 5543 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5544 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5545 (void)PrivateScope.Privatize(); 5546 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5547 CodeGenDistribute); 5548 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5549 }; 5550 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 5551 emitPostUpdateForReductionClause(CGF, S, 5552 [](CodeGenFunction &) { return nullptr; }); 5553 } 5554 5555 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 5556 CodeGenModule &CGM, StringRef ParentName, 5557 const OMPTargetTeamsDistributeSimdDirective &S) { 5558 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5559 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5560 }; 5561 llvm::Function *Fn; 5562 llvm::Constant *Addr; 5563 // Emit target region as a standalone region. 5564 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5565 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5566 assert(Fn && Addr && "Target device function emission failed."); 5567 } 5568 5569 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 5570 const OMPTargetTeamsDistributeSimdDirective &S) { 5571 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5572 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5573 }; 5574 emitCommonOMPTargetDirective(*this, S, CodeGen); 5575 } 5576 5577 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 5578 const OMPTeamsDistributeDirective &S) { 5579 5580 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5581 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5582 }; 5583 5584 // Emit teams region as a standalone region. 5585 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5586 PrePostActionTy &Action) { 5587 Action.Enter(CGF); 5588 OMPPrivateScope PrivateScope(CGF); 5589 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5590 (void)PrivateScope.Privatize(); 5591 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5592 CodeGenDistribute); 5593 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5594 }; 5595 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5596 emitPostUpdateForReductionClause(*this, S, 5597 [](CodeGenFunction &) { return nullptr; }); 5598 } 5599 5600 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5601 const OMPTeamsDistributeSimdDirective &S) { 5602 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5603 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5604 }; 5605 5606 // Emit teams region as a standalone region. 5607 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5608 PrePostActionTy &Action) { 5609 Action.Enter(CGF); 5610 OMPPrivateScope PrivateScope(CGF); 5611 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5612 (void)PrivateScope.Privatize(); 5613 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5614 CodeGenDistribute); 5615 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5616 }; 5617 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5618 emitPostUpdateForReductionClause(*this, S, 5619 [](CodeGenFunction &) { return nullptr; }); 5620 } 5621 5622 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5623 const OMPTeamsDistributeParallelForDirective &S) { 5624 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5625 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5626 S.getDistInc()); 5627 }; 5628 5629 // Emit teams region as a standalone region. 5630 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5631 PrePostActionTy &Action) { 5632 Action.Enter(CGF); 5633 OMPPrivateScope PrivateScope(CGF); 5634 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5635 (void)PrivateScope.Privatize(); 5636 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5637 CodeGenDistribute); 5638 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5639 }; 5640 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5641 emitPostUpdateForReductionClause(*this, S, 5642 [](CodeGenFunction &) { return nullptr; }); 5643 } 5644 5645 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5646 const OMPTeamsDistributeParallelForSimdDirective &S) { 5647 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5648 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5649 S.getDistInc()); 5650 }; 5651 5652 // Emit teams region as a standalone region. 5653 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5654 PrePostActionTy &Action) { 5655 Action.Enter(CGF); 5656 OMPPrivateScope PrivateScope(CGF); 5657 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5658 (void)PrivateScope.Privatize(); 5659 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5660 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5661 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5662 }; 5663 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5664 CodeGen); 5665 emitPostUpdateForReductionClause(*this, S, 5666 [](CodeGenFunction &) { return nullptr; }); 5667 } 5668 5669 static void emitTargetTeamsDistributeParallelForRegion( 5670 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5671 PrePostActionTy &Action) { 5672 Action.Enter(CGF); 5673 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5674 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5675 S.getDistInc()); 5676 }; 5677 5678 // Emit teams region as a standalone region. 5679 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5680 PrePostActionTy &Action) { 5681 Action.Enter(CGF); 5682 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5683 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5684 (void)PrivateScope.Privatize(); 5685 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5686 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5687 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5688 }; 5689 5690 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5691 CodeGenTeams); 5692 emitPostUpdateForReductionClause(CGF, S, 5693 [](CodeGenFunction &) { return nullptr; }); 5694 } 5695 5696 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5697 CodeGenModule &CGM, StringRef ParentName, 5698 const OMPTargetTeamsDistributeParallelForDirective &S) { 5699 // Emit SPMD target teams distribute parallel for region as a standalone 5700 // region. 5701 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5702 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5703 }; 5704 llvm::Function *Fn; 5705 llvm::Constant *Addr; 5706 // Emit target region as a standalone region. 5707 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5708 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5709 assert(Fn && Addr && "Target device function emission failed."); 5710 } 5711 5712 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5713 const OMPTargetTeamsDistributeParallelForDirective &S) { 5714 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5715 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5716 }; 5717 emitCommonOMPTargetDirective(*this, S, CodeGen); 5718 } 5719 5720 static void emitTargetTeamsDistributeParallelForSimdRegion( 5721 CodeGenFunction &CGF, 5722 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5723 PrePostActionTy &Action) { 5724 Action.Enter(CGF); 5725 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5726 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5727 S.getDistInc()); 5728 }; 5729 5730 // Emit teams region as a standalone region. 5731 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5732 PrePostActionTy &Action) { 5733 Action.Enter(CGF); 5734 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5735 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5736 (void)PrivateScope.Privatize(); 5737 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5738 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5739 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5740 }; 5741 5742 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5743 CodeGenTeams); 5744 emitPostUpdateForReductionClause(CGF, S, 5745 [](CodeGenFunction &) { return nullptr; }); 5746 } 5747 5748 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5749 CodeGenModule &CGM, StringRef ParentName, 5750 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5751 // Emit SPMD target teams distribute parallel for simd region as a standalone 5752 // region. 5753 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5754 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5755 }; 5756 llvm::Function *Fn; 5757 llvm::Constant *Addr; 5758 // Emit target region as a standalone region. 5759 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5760 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5761 assert(Fn && Addr && "Target device function emission failed."); 5762 } 5763 5764 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5765 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5766 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5767 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5768 }; 5769 emitCommonOMPTargetDirective(*this, S, CodeGen); 5770 } 5771 5772 void CodeGenFunction::EmitOMPCancellationPointDirective( 5773 const OMPCancellationPointDirective &S) { 5774 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5775 S.getCancelRegion()); 5776 } 5777 5778 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5779 const Expr *IfCond = nullptr; 5780 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5781 if (C->getNameModifier() == OMPD_unknown || 5782 C->getNameModifier() == OMPD_cancel) { 5783 IfCond = C->getCondition(); 5784 break; 5785 } 5786 } 5787 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5788 // TODO: This check is necessary as we only generate `omp parallel` through 5789 // the OpenMPIRBuilder for now. 5790 if (S.getCancelRegion() == OMPD_parallel) { 5791 llvm::Value *IfCondition = nullptr; 5792 if (IfCond) 5793 IfCondition = EmitScalarExpr(IfCond, 5794 /*IgnoreResultAssign=*/true); 5795 return Builder.restoreIP( 5796 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5797 } 5798 } 5799 5800 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5801 S.getCancelRegion()); 5802 } 5803 5804 CodeGenFunction::JumpDest 5805 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5806 if (Kind == OMPD_parallel || Kind == OMPD_task || 5807 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5808 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5809 return ReturnBlock; 5810 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5811 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5812 Kind == OMPD_distribute_parallel_for || 5813 Kind == OMPD_target_parallel_for || 5814 Kind == OMPD_teams_distribute_parallel_for || 5815 Kind == OMPD_target_teams_distribute_parallel_for); 5816 return OMPCancelStack.getExitBlock(); 5817 } 5818 5819 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5820 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 5821 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5822 auto OrigVarIt = C.varlist_begin(); 5823 auto InitIt = C.inits().begin(); 5824 for (const Expr *PvtVarIt : C.private_copies()) { 5825 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5826 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5827 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5828 5829 // In order to identify the right initializer we need to match the 5830 // declaration used by the mapping logic. In some cases we may get 5831 // OMPCapturedExprDecl that refers to the original declaration. 5832 const ValueDecl *MatchingVD = OrigVD; 5833 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5834 // OMPCapturedExprDecl are used to privative fields of the current 5835 // structure. 5836 const auto *ME = cast<MemberExpr>(OED->getInit()); 5837 assert(isa<CXXThisExpr>(ME->getBase()) && 5838 "Base should be the current struct!"); 5839 MatchingVD = ME->getMemberDecl(); 5840 } 5841 5842 // If we don't have information about the current list item, move on to 5843 // the next one. 5844 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5845 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5846 continue; 5847 5848 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5849 InitAddrIt, InitVD, 5850 PvtVD]() { 5851 // Initialize the temporary initialization variable with the address we 5852 // get from the runtime library. We have to cast the source address 5853 // because it is always a void *. References are materialized in the 5854 // privatization scope, so the initialization here disregards the fact 5855 // the original variable is a reference. 5856 QualType AddrQTy = 5857 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5858 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5859 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5860 setAddrOfLocalVar(InitVD, InitAddr); 5861 5862 // Emit private declaration, it will be initialized by the value we 5863 // declaration we just added to the local declarations map. 5864 EmitDecl(*PvtVD); 5865 5866 // The initialization variables reached its purpose in the emission 5867 // of the previous declaration, so we don't need it anymore. 5868 LocalDeclMap.erase(InitVD); 5869 5870 // Return the address of the private variable. 5871 return GetAddrOfLocalVar(PvtVD); 5872 }); 5873 assert(IsRegistered && "firstprivate var already registered as private"); 5874 // Silence the warning about unused variable. 5875 (void)IsRegistered; 5876 5877 ++OrigVarIt; 5878 ++InitIt; 5879 } 5880 } 5881 5882 static const VarDecl *getBaseDecl(const Expr *Ref) { 5883 const Expr *Base = Ref->IgnoreParenImpCasts(); 5884 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 5885 Base = OASE->getBase()->IgnoreParenImpCasts(); 5886 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 5887 Base = ASE->getBase()->IgnoreParenImpCasts(); 5888 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 5889 } 5890 5891 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 5892 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 5893 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5894 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 5895 for (const Expr *Ref : C.varlists()) { 5896 const VarDecl *OrigVD = getBaseDecl(Ref); 5897 if (!Processed.insert(OrigVD).second) 5898 continue; 5899 // In order to identify the right initializer we need to match the 5900 // declaration used by the mapping logic. In some cases we may get 5901 // OMPCapturedExprDecl that refers to the original declaration. 5902 const ValueDecl *MatchingVD = OrigVD; 5903 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5904 // OMPCapturedExprDecl are used to privative fields of the current 5905 // structure. 5906 const auto *ME = cast<MemberExpr>(OED->getInit()); 5907 assert(isa<CXXThisExpr>(ME->getBase()) && 5908 "Base should be the current struct!"); 5909 MatchingVD = ME->getMemberDecl(); 5910 } 5911 5912 // If we don't have information about the current list item, move on to 5913 // the next one. 5914 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5915 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5916 continue; 5917 5918 Address PrivAddr = InitAddrIt->getSecond(); 5919 // For declrefs and variable length array need to load the pointer for 5920 // correct mapping, since the pointer to the data was passed to the runtime. 5921 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 5922 MatchingVD->getType()->isArrayType()) 5923 PrivAddr = 5924 EmitLoadOfPointer(PrivAddr, getContext() 5925 .getPointerType(OrigVD->getType()) 5926 ->castAs<PointerType>()); 5927 llvm::Type *RealTy = 5928 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 5929 ->getPointerTo(); 5930 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 5931 5932 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 5933 } 5934 } 5935 5936 // Generate the instructions for '#pragma omp target data' directive. 5937 void CodeGenFunction::EmitOMPTargetDataDirective( 5938 const OMPTargetDataDirective &S) { 5939 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5940 5941 // Create a pre/post action to signal the privatization of the device pointer. 5942 // This action can be replaced by the OpenMP runtime code generation to 5943 // deactivate privatization. 5944 bool PrivatizeDevicePointers = false; 5945 class DevicePointerPrivActionTy : public PrePostActionTy { 5946 bool &PrivatizeDevicePointers; 5947 5948 public: 5949 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5950 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5951 void Enter(CodeGenFunction &CGF) override { 5952 PrivatizeDevicePointers = true; 5953 } 5954 }; 5955 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5956 5957 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5958 CodeGenFunction &CGF, PrePostActionTy &Action) { 5959 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5960 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5961 }; 5962 5963 // Codegen that selects whether to generate the privatization code or not. 5964 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5965 &InnermostCodeGen](CodeGenFunction &CGF, 5966 PrePostActionTy &Action) { 5967 RegionCodeGenTy RCG(InnermostCodeGen); 5968 PrivatizeDevicePointers = false; 5969 5970 // Call the pre-action to change the status of PrivatizeDevicePointers if 5971 // needed. 5972 Action.Enter(CGF); 5973 5974 if (PrivatizeDevicePointers) { 5975 OMPPrivateScope PrivateScope(CGF); 5976 // Emit all instances of the use_device_ptr clause. 5977 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5978 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5979 Info.CaptureDeviceAddrMap); 5980 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 5981 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 5982 Info.CaptureDeviceAddrMap); 5983 (void)PrivateScope.Privatize(); 5984 RCG(CGF); 5985 } else { 5986 RCG(CGF); 5987 } 5988 }; 5989 5990 // Forward the provided action to the privatization codegen. 5991 RegionCodeGenTy PrivRCG(PrivCodeGen); 5992 PrivRCG.setAction(Action); 5993 5994 // Notwithstanding the body of the region is emitted as inlined directive, 5995 // we don't use an inline scope as changes in the references inside the 5996 // region are expected to be visible outside, so we do not privative them. 5997 OMPLexicalScope Scope(CGF, S); 5998 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5999 PrivRCG); 6000 }; 6001 6002 RegionCodeGenTy RCG(CodeGen); 6003 6004 // If we don't have target devices, don't bother emitting the data mapping 6005 // code. 6006 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6007 RCG(*this); 6008 return; 6009 } 6010 6011 // Check if we have any if clause associated with the directive. 6012 const Expr *IfCond = nullptr; 6013 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6014 IfCond = C->getCondition(); 6015 6016 // Check if we have any device clause associated with the directive. 6017 const Expr *Device = nullptr; 6018 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6019 Device = C->getDevice(); 6020 6021 // Set the action to signal privatization of device pointers. 6022 RCG.setAction(PrivAction); 6023 6024 // Emit region code. 6025 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6026 Info); 6027 } 6028 6029 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6030 const OMPTargetEnterDataDirective &S) { 6031 // If we don't have target devices, don't bother emitting the data mapping 6032 // code. 6033 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6034 return; 6035 6036 // Check if we have any if clause associated with the directive. 6037 const Expr *IfCond = nullptr; 6038 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6039 IfCond = C->getCondition(); 6040 6041 // Check if we have any device clause associated with the directive. 6042 const Expr *Device = nullptr; 6043 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6044 Device = C->getDevice(); 6045 6046 OMPLexicalScope Scope(*this, S, OMPD_task); 6047 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6048 } 6049 6050 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6051 const OMPTargetExitDataDirective &S) { 6052 // If we don't have target devices, don't bother emitting the data mapping 6053 // code. 6054 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6055 return; 6056 6057 // Check if we have any if clause associated with the directive. 6058 const Expr *IfCond = nullptr; 6059 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6060 IfCond = C->getCondition(); 6061 6062 // Check if we have any device clause associated with the directive. 6063 const Expr *Device = nullptr; 6064 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6065 Device = C->getDevice(); 6066 6067 OMPLexicalScope Scope(*this, S, OMPD_task); 6068 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6069 } 6070 6071 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6072 const OMPTargetParallelDirective &S, 6073 PrePostActionTy &Action) { 6074 // Get the captured statement associated with the 'parallel' region. 6075 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6076 Action.Enter(CGF); 6077 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6078 Action.Enter(CGF); 6079 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6080 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6081 CGF.EmitOMPPrivateClause(S, PrivateScope); 6082 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6083 (void)PrivateScope.Privatize(); 6084 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6085 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6086 // TODO: Add support for clauses. 6087 CGF.EmitStmt(CS->getCapturedStmt()); 6088 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6089 }; 6090 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6091 emitEmptyBoundParameters); 6092 emitPostUpdateForReductionClause(CGF, S, 6093 [](CodeGenFunction &) { return nullptr; }); 6094 } 6095 6096 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6097 CodeGenModule &CGM, StringRef ParentName, 6098 const OMPTargetParallelDirective &S) { 6099 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6100 emitTargetParallelRegion(CGF, S, Action); 6101 }; 6102 llvm::Function *Fn; 6103 llvm::Constant *Addr; 6104 // Emit target region as a standalone region. 6105 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6106 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6107 assert(Fn && Addr && "Target device function emission failed."); 6108 } 6109 6110 void CodeGenFunction::EmitOMPTargetParallelDirective( 6111 const OMPTargetParallelDirective &S) { 6112 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6113 emitTargetParallelRegion(CGF, S, Action); 6114 }; 6115 emitCommonOMPTargetDirective(*this, S, CodeGen); 6116 } 6117 6118 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6119 const OMPTargetParallelForDirective &S, 6120 PrePostActionTy &Action) { 6121 Action.Enter(CGF); 6122 // Emit directive as a combined directive that consists of two implicit 6123 // directives: 'parallel' with 'for' directive. 6124 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6125 Action.Enter(CGF); 6126 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6127 CGF, OMPD_target_parallel_for, S.hasCancel()); 6128 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6129 emitDispatchForLoopBounds); 6130 }; 6131 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6132 emitEmptyBoundParameters); 6133 } 6134 6135 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6136 CodeGenModule &CGM, StringRef ParentName, 6137 const OMPTargetParallelForDirective &S) { 6138 // Emit SPMD target parallel for region as a standalone region. 6139 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6140 emitTargetParallelForRegion(CGF, S, Action); 6141 }; 6142 llvm::Function *Fn; 6143 llvm::Constant *Addr; 6144 // Emit target region as a standalone region. 6145 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6146 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6147 assert(Fn && Addr && "Target device function emission failed."); 6148 } 6149 6150 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6151 const OMPTargetParallelForDirective &S) { 6152 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6153 emitTargetParallelForRegion(CGF, S, Action); 6154 }; 6155 emitCommonOMPTargetDirective(*this, S, CodeGen); 6156 } 6157 6158 static void 6159 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6160 const OMPTargetParallelForSimdDirective &S, 6161 PrePostActionTy &Action) { 6162 Action.Enter(CGF); 6163 // Emit directive as a combined directive that consists of two implicit 6164 // directives: 'parallel' with 'for' directive. 6165 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6166 Action.Enter(CGF); 6167 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6168 emitDispatchForLoopBounds); 6169 }; 6170 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6171 emitEmptyBoundParameters); 6172 } 6173 6174 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6175 CodeGenModule &CGM, StringRef ParentName, 6176 const OMPTargetParallelForSimdDirective &S) { 6177 // Emit SPMD target parallel for region as a standalone region. 6178 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6179 emitTargetParallelForSimdRegion(CGF, S, Action); 6180 }; 6181 llvm::Function *Fn; 6182 llvm::Constant *Addr; 6183 // Emit target region as a standalone region. 6184 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6185 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6186 assert(Fn && Addr && "Target device function emission failed."); 6187 } 6188 6189 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6190 const OMPTargetParallelForSimdDirective &S) { 6191 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6192 emitTargetParallelForSimdRegion(CGF, S, Action); 6193 }; 6194 emitCommonOMPTargetDirective(*this, S, CodeGen); 6195 } 6196 6197 /// Emit a helper variable and return corresponding lvalue. 6198 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6199 const ImplicitParamDecl *PVD, 6200 CodeGenFunction::OMPPrivateScope &Privates) { 6201 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6202 Privates.addPrivate(VDecl, 6203 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6204 } 6205 6206 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6207 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6208 // Emit outlined function for task construct. 6209 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6210 Address CapturedStruct = Address::invalid(); 6211 { 6212 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6213 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6214 } 6215 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6216 const Expr *IfCond = nullptr; 6217 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6218 if (C->getNameModifier() == OMPD_unknown || 6219 C->getNameModifier() == OMPD_taskloop) { 6220 IfCond = C->getCondition(); 6221 break; 6222 } 6223 } 6224 6225 OMPTaskDataTy Data; 6226 // Check if taskloop must be emitted without taskgroup. 6227 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6228 // TODO: Check if we should emit tied or untied task. 6229 Data.Tied = true; 6230 // Set scheduling for taskloop 6231 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6232 // grainsize clause 6233 Data.Schedule.setInt(/*IntVal=*/false); 6234 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6235 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6236 // num_tasks clause 6237 Data.Schedule.setInt(/*IntVal=*/true); 6238 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6239 } 6240 6241 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6242 // if (PreCond) { 6243 // for (IV in 0..LastIteration) BODY; 6244 // <Final counter/linear vars updates>; 6245 // } 6246 // 6247 6248 // Emit: if (PreCond) - begin. 6249 // If the condition constant folds and can be elided, avoid emitting the 6250 // whole loop. 6251 bool CondConstant; 6252 llvm::BasicBlock *ContBlock = nullptr; 6253 OMPLoopScope PreInitScope(CGF, S); 6254 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6255 if (!CondConstant) 6256 return; 6257 } else { 6258 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6259 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6260 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6261 CGF.getProfileCount(&S)); 6262 CGF.EmitBlock(ThenBlock); 6263 CGF.incrementProfileCounter(&S); 6264 } 6265 6266 (void)CGF.EmitOMPLinearClauseInit(S); 6267 6268 OMPPrivateScope LoopScope(CGF); 6269 // Emit helper vars inits. 6270 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6271 auto *I = CS->getCapturedDecl()->param_begin(); 6272 auto *LBP = std::next(I, LowerBound); 6273 auto *UBP = std::next(I, UpperBound); 6274 auto *STP = std::next(I, Stride); 6275 auto *LIP = std::next(I, LastIter); 6276 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6277 LoopScope); 6278 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6279 LoopScope); 6280 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6281 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6282 LoopScope); 6283 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6284 CGF.EmitOMPLinearClause(S, LoopScope); 6285 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6286 (void)LoopScope.Privatize(); 6287 // Emit the loop iteration variable. 6288 const Expr *IVExpr = S.getIterationVariable(); 6289 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6290 CGF.EmitVarDecl(*IVDecl); 6291 CGF.EmitIgnoredExpr(S.getInit()); 6292 6293 // Emit the iterations count variable. 6294 // If it is not a variable, Sema decided to calculate iterations count on 6295 // each iteration (e.g., it is foldable into a constant). 6296 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6297 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6298 // Emit calculation of the iterations count. 6299 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6300 } 6301 6302 { 6303 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6304 emitCommonSimdLoop( 6305 CGF, S, 6306 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6307 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6308 CGF.EmitOMPSimdInit(S); 6309 }, 6310 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6311 CGF.EmitOMPInnerLoop( 6312 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6313 [&S](CodeGenFunction &CGF) { 6314 emitOMPLoopBodyWithStopPoint(CGF, S, 6315 CodeGenFunction::JumpDest()); 6316 }, 6317 [](CodeGenFunction &) {}); 6318 }); 6319 } 6320 // Emit: if (PreCond) - end. 6321 if (ContBlock) { 6322 CGF.EmitBranch(ContBlock); 6323 CGF.EmitBlock(ContBlock, true); 6324 } 6325 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6326 if (HasLastprivateClause) { 6327 CGF.EmitOMPLastprivateClauseFinal( 6328 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6329 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6330 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6331 (*LIP)->getType(), S.getBeginLoc()))); 6332 } 6333 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6334 return CGF.Builder.CreateIsNotNull( 6335 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6336 (*LIP)->getType(), S.getBeginLoc())); 6337 }); 6338 }; 6339 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6340 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6341 const OMPTaskDataTy &Data) { 6342 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6343 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6344 OMPLoopScope PreInitScope(CGF, S); 6345 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6346 OutlinedFn, SharedsTy, 6347 CapturedStruct, IfCond, Data); 6348 }; 6349 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6350 CodeGen); 6351 }; 6352 if (Data.Nogroup) { 6353 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6354 } else { 6355 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6356 *this, 6357 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6358 PrePostActionTy &Action) { 6359 Action.Enter(CGF); 6360 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6361 Data); 6362 }, 6363 S.getBeginLoc()); 6364 } 6365 } 6366 6367 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6368 auto LPCRegion = 6369 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6370 EmitOMPTaskLoopBasedDirective(S); 6371 } 6372 6373 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6374 const OMPTaskLoopSimdDirective &S) { 6375 auto LPCRegion = 6376 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6377 OMPLexicalScope Scope(*this, S); 6378 EmitOMPTaskLoopBasedDirective(S); 6379 } 6380 6381 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6382 const OMPMasterTaskLoopDirective &S) { 6383 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6384 Action.Enter(CGF); 6385 EmitOMPTaskLoopBasedDirective(S); 6386 }; 6387 auto LPCRegion = 6388 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6389 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6390 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6391 } 6392 6393 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6394 const OMPMasterTaskLoopSimdDirective &S) { 6395 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6396 Action.Enter(CGF); 6397 EmitOMPTaskLoopBasedDirective(S); 6398 }; 6399 auto LPCRegion = 6400 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6401 OMPLexicalScope Scope(*this, S); 6402 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6403 } 6404 6405 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6406 const OMPParallelMasterTaskLoopDirective &S) { 6407 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6408 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6409 PrePostActionTy &Action) { 6410 Action.Enter(CGF); 6411 CGF.EmitOMPTaskLoopBasedDirective(S); 6412 }; 6413 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6414 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6415 S.getBeginLoc()); 6416 }; 6417 auto LPCRegion = 6418 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6419 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6420 emitEmptyBoundParameters); 6421 } 6422 6423 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6424 const OMPParallelMasterTaskLoopSimdDirective &S) { 6425 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6426 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6427 PrePostActionTy &Action) { 6428 Action.Enter(CGF); 6429 CGF.EmitOMPTaskLoopBasedDirective(S); 6430 }; 6431 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6432 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6433 S.getBeginLoc()); 6434 }; 6435 auto LPCRegion = 6436 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6437 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6438 emitEmptyBoundParameters); 6439 } 6440 6441 // Generate the instructions for '#pragma omp target update' directive. 6442 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6443 const OMPTargetUpdateDirective &S) { 6444 // If we don't have target devices, don't bother emitting the data mapping 6445 // code. 6446 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6447 return; 6448 6449 // Check if we have any if clause associated with the directive. 6450 const Expr *IfCond = nullptr; 6451 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6452 IfCond = C->getCondition(); 6453 6454 // Check if we have any device clause associated with the directive. 6455 const Expr *Device = nullptr; 6456 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6457 Device = C->getDevice(); 6458 6459 OMPLexicalScope Scope(*this, S, OMPD_task); 6460 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6461 } 6462 6463 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6464 const OMPExecutableDirective &D) { 6465 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6466 EmitOMPScanDirective(*SD); 6467 return; 6468 } 6469 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6470 return; 6471 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6472 OMPPrivateScope GlobalsScope(CGF); 6473 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 6474 // Capture global firstprivates to avoid crash. 6475 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 6476 for (const Expr *Ref : C->varlists()) { 6477 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 6478 if (!DRE) 6479 continue; 6480 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 6481 if (!VD || VD->hasLocalStorage()) 6482 continue; 6483 if (!CGF.LocalDeclMap.count(VD)) { 6484 LValue GlobLVal = CGF.EmitLValue(Ref); 6485 GlobalsScope.addPrivate( 6486 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6487 } 6488 } 6489 } 6490 } 6491 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 6492 (void)GlobalsScope.Privatize(); 6493 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 6494 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 6495 } else { 6496 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 6497 for (const Expr *E : LD->counters()) { 6498 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 6499 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 6500 LValue GlobLVal = CGF.EmitLValue(E); 6501 GlobalsScope.addPrivate( 6502 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6503 } 6504 if (isa<OMPCapturedExprDecl>(VD)) { 6505 // Emit only those that were not explicitly referenced in clauses. 6506 if (!CGF.LocalDeclMap.count(VD)) 6507 CGF.EmitVarDecl(*VD); 6508 } 6509 } 6510 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 6511 if (!C->getNumForLoops()) 6512 continue; 6513 for (unsigned I = LD->getCollapsedNumber(), 6514 E = C->getLoopNumIterations().size(); 6515 I < E; ++I) { 6516 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 6517 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 6518 // Emit only those that were not explicitly referenced in clauses. 6519 if (!CGF.LocalDeclMap.count(VD)) 6520 CGF.EmitVarDecl(*VD); 6521 } 6522 } 6523 } 6524 } 6525 (void)GlobalsScope.Privatize(); 6526 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 6527 } 6528 }; 6529 { 6530 auto LPCRegion = 6531 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 6532 OMPSimdLexicalScope Scope(*this, D); 6533 CGM.getOpenMPRuntime().emitInlinedDirective( 6534 *this, 6535 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 6536 : D.getDirectiveKind(), 6537 CodeGen); 6538 } 6539 // Check for outer lastprivate conditional update. 6540 checkForLastprivateConditionalUpdate(*this, D); 6541 } 6542