1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit OpenMP nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCleanup.h" 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/Attr.h" 20 #include "clang/AST/DeclOpenMP.h" 21 #include "clang/AST/OpenMPClause.h" 22 #include "clang/AST/Stmt.h" 23 #include "clang/AST/StmtOpenMP.h" 24 #include "clang/Basic/OpenMPKinds.h" 25 #include "clang/Basic/PrettyStackTrace.h" 26 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/Support/AtomicOrdering.h" 30 using namespace clang; 31 using namespace CodeGen; 32 using namespace llvm::omp; 33 34 static const VarDecl *getBaseDecl(const Expr *Ref); 35 36 namespace { 37 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 38 /// for captured expressions. 39 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 40 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 41 for (const auto *C : S.clauses()) { 42 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 43 if (const auto *PreInit = 44 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 45 for (const auto *I : PreInit->decls()) { 46 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 47 CGF.EmitVarDecl(cast<VarDecl>(*I)); 48 } else { 49 CodeGenFunction::AutoVarEmission Emission = 50 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 51 CGF.EmitAutoVarCleanups(Emission); 52 } 53 } 54 } 55 } 56 } 57 } 58 CodeGenFunction::OMPPrivateScope InlinedShareds; 59 60 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 61 return CGF.LambdaCaptureFields.lookup(VD) || 62 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 63 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 64 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 65 } 66 67 public: 68 OMPLexicalScope( 69 CodeGenFunction &CGF, const OMPExecutableDirective &S, 70 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None, 71 const bool EmitPreInitStmt = true) 72 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 73 InlinedShareds(CGF) { 74 if (EmitPreInitStmt) 75 emitPreInitStmt(CGF, S); 76 if (!CapturedRegion.hasValue()) 77 return; 78 assert(S.hasAssociatedStmt() && 79 "Expected associated statement for inlined directive."); 80 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion); 81 for (const auto &C : CS->captures()) { 82 if (C.capturesVariable() || C.capturesVariableByCopy()) { 83 auto *VD = C.getCapturedVar(); 84 assert(VD == VD->getCanonicalDecl() && 85 "Canonical decl must be captured."); 86 DeclRefExpr DRE( 87 CGF.getContext(), const_cast<VarDecl *>(VD), 88 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo && 89 InlinedShareds.isGlobalVarCaptured(VD)), 90 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); 91 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 92 return CGF.EmitLValue(&DRE).getAddress(CGF); 93 }); 94 } 95 } 96 (void)InlinedShareds.Privatize(); 97 } 98 }; 99 100 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 101 /// for captured expressions. 102 class OMPParallelScope final : public OMPLexicalScope { 103 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 104 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 105 return !(isOpenMPTargetExecutionDirective(Kind) || 106 isOpenMPLoopBoundSharingDirective(Kind)) && 107 isOpenMPParallelDirective(Kind); 108 } 109 110 public: 111 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 112 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 113 EmitPreInitStmt(S)) {} 114 }; 115 116 /// Lexical scope for OpenMP teams construct, that handles correct codegen 117 /// for captured expressions. 118 class OMPTeamsScope final : public OMPLexicalScope { 119 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 120 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 121 return !isOpenMPTargetExecutionDirective(Kind) && 122 isOpenMPTeamsDirective(Kind); 123 } 124 125 public: 126 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 127 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None, 128 EmitPreInitStmt(S)) {} 129 }; 130 131 /// Private scope for OpenMP loop-based directives, that supports capturing 132 /// of used expression from loop statement. 133 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 134 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 135 CodeGenFunction::OMPMapVars PreCondVars; 136 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 137 for (const auto *E : S.counters()) { 138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 139 EmittedAsPrivate.insert(VD->getCanonicalDecl()); 140 (void)PreCondVars.setVarAddr( 141 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType())); 142 } 143 // Mark private vars as undefs. 144 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 145 for (const Expr *IRef : C->varlists()) { 146 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 147 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 148 (void)PreCondVars.setVarAddr( 149 CGF, OrigVD, 150 Address(llvm::UndefValue::get( 151 CGF.ConvertTypeForMem(CGF.getContext().getPointerType( 152 OrigVD->getType().getNonReferenceType()))), 153 CGF.getContext().getDeclAlign(OrigVD))); 154 } 155 } 156 } 157 (void)PreCondVars.apply(CGF); 158 // Emit init, __range and __end variables for C++ range loops. 159 const Stmt *Body = 160 S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 161 for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) { 162 Body = OMPLoopDirective::tryToFindNextInnerLoop( 163 Body, /*TryImperfectlyNestedLoops=*/true); 164 if (auto *For = dyn_cast<ForStmt>(Body)) { 165 Body = For->getBody(); 166 } else { 167 assert(isa<CXXForRangeStmt>(Body) && 168 "Expected canonical for loop or range-based for loop."); 169 auto *CXXFor = cast<CXXForRangeStmt>(Body); 170 if (const Stmt *Init = CXXFor->getInit()) 171 CGF.EmitStmt(Init); 172 CGF.EmitStmt(CXXFor->getRangeStmt()); 173 CGF.EmitStmt(CXXFor->getEndStmt()); 174 Body = CXXFor->getBody(); 175 } 176 } 177 if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) { 178 for (const auto *I : PreInits->decls()) 179 CGF.EmitVarDecl(cast<VarDecl>(*I)); 180 } 181 PreCondVars.restore(CGF); 182 } 183 184 public: 185 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 186 : CodeGenFunction::RunCleanupsScope(CGF) { 187 emitPreInitStmt(CGF, S); 188 } 189 }; 190 191 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { 192 CodeGenFunction::OMPPrivateScope InlinedShareds; 193 194 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 195 return CGF.LambdaCaptureFields.lookup(VD) || 196 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 197 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) && 198 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD)); 199 } 200 201 public: 202 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 203 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 204 InlinedShareds(CGF) { 205 for (const auto *C : S.clauses()) { 206 if (const auto *CPI = OMPClauseWithPreInit::get(C)) { 207 if (const auto *PreInit = 208 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 209 for (const auto *I : PreInit->decls()) { 210 if (!I->hasAttr<OMPCaptureNoInitAttr>()) { 211 CGF.EmitVarDecl(cast<VarDecl>(*I)); 212 } else { 213 CodeGenFunction::AutoVarEmission Emission = 214 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 215 CGF.EmitAutoVarCleanups(Emission); 216 } 217 } 218 } 219 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) { 220 for (const Expr *E : UDP->varlists()) { 221 const Decl *D = cast<DeclRefExpr>(E)->getDecl(); 222 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 223 CGF.EmitVarDecl(*OED); 224 } 225 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) { 226 for (const Expr *E : UDP->varlists()) { 227 const Decl *D = getBaseDecl(E); 228 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D)) 229 CGF.EmitVarDecl(*OED); 230 } 231 } 232 } 233 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 234 CGF.EmitOMPPrivateClause(S, InlinedShareds); 235 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) { 236 if (const Expr *E = TG->getReductionRef()) 237 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())); 238 } 239 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt()); 240 while (CS) { 241 for (auto &C : CS->captures()) { 242 if (C.capturesVariable() || C.capturesVariableByCopy()) { 243 auto *VD = C.getCapturedVar(); 244 assert(VD == VD->getCanonicalDecl() && 245 "Canonical decl must be captured."); 246 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD), 247 isCapturedVar(CGF, VD) || 248 (CGF.CapturedStmtInfo && 249 InlinedShareds.isGlobalVarCaptured(VD)), 250 VD->getType().getNonReferenceType(), VK_LValue, 251 C.getLocation()); 252 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 253 return CGF.EmitLValue(&DRE).getAddress(CGF); 254 }); 255 } 256 } 257 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt()); 258 } 259 (void)InlinedShareds.Privatize(); 260 } 261 }; 262 263 } // namespace 264 265 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 266 const OMPExecutableDirective &S, 267 const RegionCodeGenTy &CodeGen); 268 269 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 270 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 271 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 272 OrigVD = OrigVD->getCanonicalDecl(); 273 bool IsCaptured = 274 LambdaCaptureFields.lookup(OrigVD) || 275 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 276 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 277 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured, 278 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 279 return EmitLValue(&DRE); 280 } 281 } 282 return EmitLValue(E); 283 } 284 285 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 286 ASTContext &C = getContext(); 287 llvm::Value *Size = nullptr; 288 auto SizeInChars = C.getTypeSizeInChars(Ty); 289 if (SizeInChars.isZero()) { 290 // getTypeSizeInChars() returns 0 for a VLA. 291 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) { 292 VlaSizePair VlaSize = getVLASize(VAT); 293 Ty = VlaSize.Type; 294 Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) 295 : VlaSize.NumElts; 296 } 297 SizeInChars = C.getTypeSizeInChars(Ty); 298 if (SizeInChars.isZero()) 299 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 300 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 301 } 302 return CGM.getSize(SizeInChars); 303 } 304 305 void CodeGenFunction::GenerateOpenMPCapturedVars( 306 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 307 const RecordDecl *RD = S.getCapturedRecordDecl(); 308 auto CurField = RD->field_begin(); 309 auto CurCap = S.captures().begin(); 310 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 311 E = S.capture_init_end(); 312 I != E; ++I, ++CurField, ++CurCap) { 313 if (CurField->hasCapturedVLAType()) { 314 const VariableArrayType *VAT = CurField->getCapturedVLAType(); 315 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()]; 316 CapturedVars.push_back(Val); 317 } else if (CurCap->capturesThis()) { 318 CapturedVars.push_back(CXXThisValue); 319 } else if (CurCap->capturesVariableByCopy()) { 320 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation()); 321 322 // If the field is not a pointer, we need to save the actual value 323 // and load it as a void pointer. 324 if (!CurField->getType()->isAnyPointerType()) { 325 ASTContext &Ctx = getContext(); 326 Address DstAddr = CreateMemTemp( 327 Ctx.getUIntPtrType(), 328 Twine(CurCap->getCapturedVar()->getName(), ".casted")); 329 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 330 331 llvm::Value *SrcAddrVal = EmitScalarConversion( 332 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 333 Ctx.getPointerType(CurField->getType()), CurCap->getLocation()); 334 LValue SrcLV = 335 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 336 337 // Store the value using the source type pointer. 338 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 339 340 // Load the value using the destination type pointer. 341 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation()); 342 } 343 CapturedVars.push_back(CV); 344 } else { 345 assert(CurCap->capturesVariable() && "Expected capture by reference."); 346 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); 347 } 348 } 349 } 350 351 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, 352 QualType DstType, StringRef Name, 353 LValue AddrLV) { 354 ASTContext &Ctx = CGF.getContext(); 355 356 llvm::Value *CastedPtr = CGF.EmitScalarConversion( 357 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), 358 Ctx.getPointerType(DstType), Loc); 359 Address TmpAddr = 360 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 361 .getAddress(CGF); 362 return TmpAddr; 363 } 364 365 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 366 if (T->isLValueReferenceType()) 367 return C.getLValueReferenceType( 368 getCanonicalParamType(C, T.getNonReferenceType()), 369 /*SpelledAsLValue=*/false); 370 if (T->isPointerType()) 371 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 372 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) { 373 if (const auto *VLA = dyn_cast<VariableArrayType>(A)) 374 return getCanonicalParamType(C, VLA->getElementType()); 375 if (!A->isVariablyModifiedType()) 376 return C.getCanonicalType(T); 377 } 378 return C.getCanonicalParamType(T); 379 } 380 381 namespace { 382 /// Contains required data for proper outlined function codegen. 383 struct FunctionOptions { 384 /// Captured statement for which the function is generated. 385 const CapturedStmt *S = nullptr; 386 /// true if cast to/from UIntPtr is required for variables captured by 387 /// value. 388 const bool UIntPtrCastRequired = true; 389 /// true if only casted arguments must be registered as local args or VLA 390 /// sizes. 391 const bool RegisterCastedArgsOnly = false; 392 /// Name of the generated function. 393 const StringRef FunctionName; 394 /// Location of the non-debug version of the outlined function. 395 SourceLocation Loc; 396 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 397 bool RegisterCastedArgsOnly, StringRef FunctionName, 398 SourceLocation Loc) 399 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 400 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 401 FunctionName(FunctionName), Loc(Loc) {} 402 }; 403 } // namespace 404 405 static llvm::Function *emitOutlinedFunctionPrologue( 406 CodeGenFunction &CGF, FunctionArgList &Args, 407 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 408 &LocalAddrs, 409 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 410 &VLASizes, 411 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 412 const CapturedDecl *CD = FO.S->getCapturedDecl(); 413 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 414 assert(CD->hasBody() && "missing CapturedDecl body"); 415 416 CXXThisValue = nullptr; 417 // Build the argument list. 418 CodeGenModule &CGM = CGF.CGM; 419 ASTContext &Ctx = CGM.getContext(); 420 FunctionArgList TargetArgs; 421 Args.append(CD->param_begin(), 422 std::next(CD->param_begin(), CD->getContextParamPosition())); 423 TargetArgs.append( 424 CD->param_begin(), 425 std::next(CD->param_begin(), CD->getContextParamPosition())); 426 auto I = FO.S->captures().begin(); 427 FunctionDecl *DebugFunctionDecl = nullptr; 428 if (!FO.UIntPtrCastRequired) { 429 FunctionProtoType::ExtProtoInfo EPI; 430 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI); 431 DebugFunctionDecl = FunctionDecl::Create( 432 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(), 433 SourceLocation(), DeclarationName(), FunctionTy, 434 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static, 435 /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false); 436 } 437 for (const FieldDecl *FD : RD->fields()) { 438 QualType ArgType = FD->getType(); 439 IdentifierInfo *II = nullptr; 440 VarDecl *CapVar = nullptr; 441 442 // If this is a capture by copy and the type is not a pointer, the outlined 443 // function argument type should be uintptr and the value properly casted to 444 // uintptr. This is necessary given that the runtime library is only able to 445 // deal with pointers. We can pass in the same way the VLA type sizes to the 446 // outlined function. 447 if (FO.UIntPtrCastRequired && 448 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 449 I->capturesVariableArrayType())) 450 ArgType = Ctx.getUIntPtrType(); 451 452 if (I->capturesVariable() || I->capturesVariableByCopy()) { 453 CapVar = I->getCapturedVar(); 454 II = CapVar->getIdentifier(); 455 } else if (I->capturesThis()) { 456 II = &Ctx.Idents.get("this"); 457 } else { 458 assert(I->capturesVariableArrayType()); 459 II = &Ctx.Idents.get("vla"); 460 } 461 if (ArgType->isVariablyModifiedType()) 462 ArgType = getCanonicalParamType(Ctx, ArgType); 463 VarDecl *Arg; 464 if (DebugFunctionDecl && (CapVar || I->capturesThis())) { 465 Arg = ParmVarDecl::Create( 466 Ctx, DebugFunctionDecl, 467 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(), 468 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType, 469 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 470 } else { 471 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), 472 II, ArgType, ImplicitParamDecl::Other); 473 } 474 Args.emplace_back(Arg); 475 // Do not cast arguments if we emit function with non-original types. 476 TargetArgs.emplace_back( 477 FO.UIntPtrCastRequired 478 ? Arg 479 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 480 ++I; 481 } 482 Args.append( 483 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 484 CD->param_end()); 485 TargetArgs.append( 486 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 487 CD->param_end()); 488 489 // Create the function declaration. 490 const CGFunctionInfo &FuncInfo = 491 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 492 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 493 494 auto *F = 495 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 496 FO.FunctionName, &CGM.getModule()); 497 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 498 if (CD->isNothrow()) 499 F->setDoesNotThrow(); 500 F->setDoesNotRecurse(); 501 502 // Generate the function. 503 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 504 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(), 505 FO.UIntPtrCastRequired ? FO.Loc 506 : CD->getBody()->getBeginLoc()); 507 unsigned Cnt = CD->getContextParamPosition(); 508 I = FO.S->captures().begin(); 509 for (const FieldDecl *FD : RD->fields()) { 510 // Do not map arguments if we emit function with non-original types. 511 Address LocalAddr(Address::invalid()); 512 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 513 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 514 TargetArgs[Cnt]); 515 } else { 516 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 517 } 518 // If we are capturing a pointer by copy we don't need to do anything, just 519 // use the value that we get from the arguments. 520 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 521 const VarDecl *CurVD = I->getCapturedVar(); 522 if (!FO.RegisterCastedArgsOnly) 523 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 524 ++Cnt; 525 ++I; 526 continue; 527 } 528 529 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), 530 AlignmentSource::Decl); 531 if (FD->hasCapturedVLAType()) { 532 if (FO.UIntPtrCastRequired) { 533 ArgLVal = CGF.MakeAddrLValue( 534 castValueFromUintptr(CGF, I->getLocation(), FD->getType(), 535 Args[Cnt]->getName(), ArgLVal), 536 FD->getType(), AlignmentSource::Decl); 537 } 538 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 539 const VariableArrayType *VAT = FD->getCapturedVLAType(); 540 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg); 541 } else if (I->capturesVariable()) { 542 const VarDecl *Var = I->getCapturedVar(); 543 QualType VarTy = Var->getType(); 544 Address ArgAddr = ArgLVal.getAddress(CGF); 545 if (ArgLVal.getType()->isLValueReferenceType()) { 546 ArgAddr = CGF.EmitLoadOfReference(ArgLVal); 547 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 548 assert(ArgLVal.getType()->isPointerType()); 549 ArgAddr = CGF.EmitLoadOfPointer( 550 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 551 } 552 if (!FO.RegisterCastedArgsOnly) { 553 LocalAddrs.insert( 554 {Args[Cnt], 555 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 556 } 557 } else if (I->capturesVariableByCopy()) { 558 assert(!FD->getType()->isAnyPointerType() && 559 "Not expecting a captured pointer."); 560 const VarDecl *Var = I->getCapturedVar(); 561 LocalAddrs.insert({Args[Cnt], 562 {Var, FO.UIntPtrCastRequired 563 ? castValueFromUintptr( 564 CGF, I->getLocation(), FD->getType(), 565 Args[Cnt]->getName(), ArgLVal) 566 : ArgLVal.getAddress(CGF)}}); 567 } else { 568 // If 'this' is captured, load it into CXXThisValue. 569 assert(I->capturesThis()); 570 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); 571 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); 572 } 573 ++Cnt; 574 ++I; 575 } 576 577 return F; 578 } 579 580 llvm::Function * 581 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, 582 SourceLocation Loc) { 583 assert( 584 CapturedStmtInfo && 585 "CapturedStmtInfo should be set when generating the captured function"); 586 const CapturedDecl *CD = S.getCapturedDecl(); 587 // Build the argument list. 588 bool NeedWrapperFunction = 589 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo(); 590 FunctionArgList Args; 591 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 592 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 593 SmallString<256> Buffer; 594 llvm::raw_svector_ostream Out(Buffer); 595 Out << CapturedStmtInfo->getHelperName(); 596 if (NeedWrapperFunction) 597 Out << "_debug__"; 598 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 599 Out.str(), Loc); 600 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 601 VLASizes, CXXThisValue, FO); 602 CodeGenFunction::OMPPrivateScope LocalScope(*this); 603 for (const auto &LocalAddrPair : LocalAddrs) { 604 if (LocalAddrPair.second.first) { 605 LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() { 606 return LocalAddrPair.second.second; 607 }); 608 } 609 } 610 (void)LocalScope.Privatize(); 611 for (const auto &VLASizePair : VLASizes) 612 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 613 PGO.assignRegionCounters(GlobalDecl(CD), F); 614 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 615 (void)LocalScope.ForceCleanup(); 616 FinishFunction(CD->getBodyRBrace()); 617 if (!NeedWrapperFunction) 618 return F; 619 620 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 621 /*RegisterCastedArgsOnly=*/true, 622 CapturedStmtInfo->getHelperName(), Loc); 623 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 624 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo; 625 Args.clear(); 626 LocalAddrs.clear(); 627 VLASizes.clear(); 628 llvm::Function *WrapperF = 629 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 630 WrapperCGF.CXXThisValue, WrapperFO); 631 llvm::SmallVector<llvm::Value *, 4> CallArgs; 632 for (const auto *Arg : Args) { 633 llvm::Value *CallArg; 634 auto I = LocalAddrs.find(Arg); 635 if (I != LocalAddrs.end()) { 636 LValue LV = WrapperCGF.MakeAddrLValue( 637 I->second.second, 638 I->second.first ? I->second.first->getType() : Arg->getType(), 639 AlignmentSource::Decl); 640 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 641 } else { 642 auto EI = VLASizes.find(Arg); 643 if (EI != VLASizes.end()) { 644 CallArg = EI->second.second; 645 } else { 646 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 647 Arg->getType(), 648 AlignmentSource::Decl); 649 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc()); 650 } 651 } 652 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType())); 653 } 654 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs); 655 WrapperCGF.FinishFunction(); 656 return WrapperF; 657 } 658 659 //===----------------------------------------------------------------------===// 660 // OpenMP Directive Emission 661 //===----------------------------------------------------------------------===// 662 void CodeGenFunction::EmitOMPAggregateAssign( 663 Address DestAddr, Address SrcAddr, QualType OriginalType, 664 const llvm::function_ref<void(Address, Address)> CopyGen) { 665 // Perform element-by-element initialization. 666 QualType ElementTy; 667 668 // Drill down to the base element type on both arrays. 669 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 670 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 671 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 672 673 llvm::Value *SrcBegin = SrcAddr.getPointer(); 674 llvm::Value *DestBegin = DestAddr.getPointer(); 675 // Cast from pointer to array type to pointer to single element. 676 llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements); 677 // The basic structure here is a while-do loop. 678 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body"); 679 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done"); 680 llvm::Value *IsEmpty = 681 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 682 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 683 684 // Enter the loop body, making that address the current address. 685 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); 686 EmitBlock(BodyBB); 687 688 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 689 690 llvm::PHINode *SrcElementPHI = 691 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 692 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 693 Address SrcElementCurrent = 694 Address(SrcElementPHI, 695 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 696 697 llvm::PHINode *DestElementPHI = 698 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 699 DestElementPHI->addIncoming(DestBegin, EntryBB); 700 Address DestElementCurrent = 701 Address(DestElementPHI, 702 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 703 704 // Emit copy. 705 CopyGen(DestElementCurrent, SrcElementCurrent); 706 707 // Shift the address forward by one element. 708 llvm::Value *DestElementNext = Builder.CreateConstGEP1_32( 709 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 710 llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32( 711 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 712 // Check whether we've reached the end. 713 llvm::Value *Done = 714 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 715 Builder.CreateCondBr(Done, DoneBB, BodyBB); 716 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 717 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 718 719 // Done. 720 EmitBlock(DoneBB, /*IsFinished=*/true); 721 } 722 723 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 724 Address SrcAddr, const VarDecl *DestVD, 725 const VarDecl *SrcVD, const Expr *Copy) { 726 if (OriginalType->isArrayType()) { 727 const auto *BO = dyn_cast<BinaryOperator>(Copy); 728 if (BO && BO->getOpcode() == BO_Assign) { 729 // Perform simple memcpy for simple copying. 730 LValue Dest = MakeAddrLValue(DestAddr, OriginalType); 731 LValue Src = MakeAddrLValue(SrcAddr, OriginalType); 732 EmitAggregateAssign(Dest, Src, OriginalType); 733 } else { 734 // For arrays with complex element types perform element by element 735 // copying. 736 EmitOMPAggregateAssign( 737 DestAddr, SrcAddr, OriginalType, 738 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 739 // Working with the single array element, so have to remap 740 // destination and source variables to corresponding array 741 // elements. 742 CodeGenFunction::OMPPrivateScope Remap(*this); 743 Remap.addPrivate(DestVD, [DestElement]() { return DestElement; }); 744 Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; }); 745 (void)Remap.Privatize(); 746 EmitIgnoredExpr(Copy); 747 }); 748 } 749 } else { 750 // Remap pseudo source variable to private copy. 751 CodeGenFunction::OMPPrivateScope Remap(*this); 752 Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; }); 753 Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; }); 754 (void)Remap.Privatize(); 755 // Emit copying of the whole variable. 756 EmitIgnoredExpr(Copy); 757 } 758 } 759 760 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 761 OMPPrivateScope &PrivateScope) { 762 if (!HaveInsertPoint()) 763 return false; 764 bool DeviceConstTarget = 765 getLangOpts().OpenMPIsDevice && 766 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 767 bool FirstprivateIsLastprivate = false; 768 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates; 769 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 770 for (const auto *D : C->varlists()) 771 Lastprivates.try_emplace( 772 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(), 773 C->getKind()); 774 } 775 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 776 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 777 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind()); 778 // Force emission of the firstprivate copy if the directive does not emit 779 // outlined function, like omp for, omp simd, omp distribute etc. 780 bool MustEmitFirstprivateCopy = 781 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown; 782 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 783 const auto *IRef = C->varlist_begin(); 784 const auto *InitsRef = C->inits().begin(); 785 for (const Expr *IInit : C->private_copies()) { 786 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 787 bool ThisFirstprivateIsLastprivate = 788 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 789 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD); 790 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 791 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD && 792 !FD->getType()->isReferenceType() && 793 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 794 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 795 ++IRef; 796 ++InitsRef; 797 continue; 798 } 799 // Do not emit copy for firstprivate constant variables in target regions, 800 // captured by reference. 801 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) && 802 FD && FD->getType()->isReferenceType() && 803 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) { 804 (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this, 805 OrigVD); 806 ++IRef; 807 ++InitsRef; 808 continue; 809 } 810 FirstprivateIsLastprivate = 811 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 812 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 813 const auto *VDInit = 814 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 815 bool IsRegistered; 816 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 817 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 818 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 819 LValue OriginalLVal; 820 if (!FD) { 821 // Check if the firstprivate variable is just a constant value. 822 ConstantEmission CE = tryEmitAsConstant(&DRE); 823 if (CE && !CE.isReference()) { 824 // Constant value, no need to create a copy. 825 ++IRef; 826 ++InitsRef; 827 continue; 828 } 829 if (CE && CE.isReference()) { 830 OriginalLVal = CE.getReferenceLValue(*this, &DRE); 831 } else { 832 assert(!CE && "Expected non-constant firstprivate."); 833 OriginalLVal = EmitLValue(&DRE); 834 } 835 } else { 836 OriginalLVal = EmitLValue(&DRE); 837 } 838 QualType Type = VD->getType(); 839 if (Type->isArrayType()) { 840 // Emit VarDecl with copy init for arrays. 841 // Get the address of the original variable captured in current 842 // captured region. 843 IsRegistered = PrivateScope.addPrivate( 844 OrigVD, [this, VD, Type, OriginalLVal, VDInit]() { 845 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 846 const Expr *Init = VD->getInit(); 847 if (!isa<CXXConstructExpr>(Init) || 848 isTrivialInitializer(Init)) { 849 // Perform simple memcpy. 850 LValue Dest = 851 MakeAddrLValue(Emission.getAllocatedAddress(), Type); 852 EmitAggregateAssign(Dest, OriginalLVal, Type); 853 } else { 854 EmitOMPAggregateAssign( 855 Emission.getAllocatedAddress(), 856 OriginalLVal.getAddress(*this), Type, 857 [this, VDInit, Init](Address DestElement, 858 Address SrcElement) { 859 // Clean up any temporaries needed by the 860 // initialization. 861 RunCleanupsScope InitScope(*this); 862 // Emit initialization for single element. 863 setAddrOfLocalVar(VDInit, SrcElement); 864 EmitAnyExprToMem(Init, DestElement, 865 Init->getType().getQualifiers(), 866 /*IsInitializer*/ false); 867 LocalDeclMap.erase(VDInit); 868 }); 869 } 870 EmitAutoVarCleanups(Emission); 871 return Emission.getAllocatedAddress(); 872 }); 873 } else { 874 Address OriginalAddr = OriginalLVal.getAddress(*this); 875 IsRegistered = 876 PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD, 877 ThisFirstprivateIsLastprivate, 878 OrigVD, &Lastprivates, IRef]() { 879 // Emit private VarDecl with copy init. 880 // Remap temp VDInit variable to the address of the original 881 // variable (for proper handling of captured global variables). 882 setAddrOfLocalVar(VDInit, OriginalAddr); 883 EmitDecl(*VD); 884 LocalDeclMap.erase(VDInit); 885 if (ThisFirstprivateIsLastprivate && 886 Lastprivates[OrigVD->getCanonicalDecl()] == 887 OMPC_LASTPRIVATE_conditional) { 888 // Create/init special variable for lastprivate conditionals. 889 Address VDAddr = 890 CGM.getOpenMPRuntime().emitLastprivateConditionalInit( 891 *this, OrigVD); 892 llvm::Value *V = EmitLoadOfScalar( 893 MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(), 894 AlignmentSource::Decl), 895 (*IRef)->getExprLoc()); 896 EmitStoreOfScalar(V, 897 MakeAddrLValue(VDAddr, (*IRef)->getType(), 898 AlignmentSource::Decl)); 899 LocalDeclMap.erase(VD); 900 setAddrOfLocalVar(VD, VDAddr); 901 return VDAddr; 902 } 903 return GetAddrOfLocalVar(VD); 904 }); 905 } 906 assert(IsRegistered && 907 "firstprivate var already registered as private"); 908 // Silence the warning about unused variable. 909 (void)IsRegistered; 910 } 911 ++IRef; 912 ++InitsRef; 913 } 914 } 915 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 916 } 917 918 void CodeGenFunction::EmitOMPPrivateClause( 919 const OMPExecutableDirective &D, 920 CodeGenFunction::OMPPrivateScope &PrivateScope) { 921 if (!HaveInsertPoint()) 922 return; 923 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 924 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 925 auto IRef = C->varlist_begin(); 926 for (const Expr *IInit : C->private_copies()) { 927 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 928 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 929 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 930 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() { 931 // Emit private VarDecl with copy init. 932 EmitDecl(*VD); 933 return GetAddrOfLocalVar(VD); 934 }); 935 assert(IsRegistered && "private var already registered as private"); 936 // Silence the warning about unused variable. 937 (void)IsRegistered; 938 } 939 ++IRef; 940 } 941 } 942 } 943 944 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 945 if (!HaveInsertPoint()) 946 return false; 947 // threadprivate_var1 = master_threadprivate_var1; 948 // operator=(threadprivate_var2, master_threadprivate_var2); 949 // ... 950 // __kmpc_barrier(&loc, global_tid); 951 llvm::DenseSet<const VarDecl *> CopiedVars; 952 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 953 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 954 auto IRef = C->varlist_begin(); 955 auto ISrcRef = C->source_exprs().begin(); 956 auto IDestRef = C->destination_exprs().begin(); 957 for (const Expr *AssignOp : C->assignment_ops()) { 958 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 959 QualType Type = VD->getType(); 960 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 961 // Get the address of the master variable. If we are emitting code with 962 // TLS support, the address is passed from the master as field in the 963 // captured declaration. 964 Address MasterAddr = Address::invalid(); 965 if (getLangOpts().OpenMPUseTLS && 966 getContext().getTargetInfo().isTLSSupported()) { 967 assert(CapturedStmtInfo->lookup(VD) && 968 "Copyin threadprivates should have been captured!"); 969 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true, 970 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 971 MasterAddr = EmitLValue(&DRE).getAddress(*this); 972 LocalDeclMap.erase(VD); 973 } else { 974 MasterAddr = 975 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 976 : CGM.GetAddrOfGlobal(VD), 977 getContext().getDeclAlign(VD)); 978 } 979 // Get the address of the threadprivate variable. 980 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); 981 if (CopiedVars.size() == 1) { 982 // At first check if current thread is a master thread. If it is, no 983 // need to copy data. 984 CopyBegin = createBasicBlock("copyin.not.master"); 985 CopyEnd = createBasicBlock("copyin.not.master.end"); 986 Builder.CreateCondBr( 987 Builder.CreateICmpNE( 988 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 989 Builder.CreatePtrToInt(PrivateAddr.getPointer(), 990 CGM.IntPtrTy)), 991 CopyBegin, CopyEnd); 992 EmitBlock(CopyBegin); 993 } 994 const auto *SrcVD = 995 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 996 const auto *DestVD = 997 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 998 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 999 } 1000 ++IRef; 1001 ++ISrcRef; 1002 ++IDestRef; 1003 } 1004 } 1005 if (CopyEnd) { 1006 // Exit out of copying procedure for non-master thread. 1007 EmitBlock(CopyEnd, /*IsFinished=*/true); 1008 return true; 1009 } 1010 return false; 1011 } 1012 1013 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 1014 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 1015 if (!HaveInsertPoint()) 1016 return false; 1017 bool HasAtLeastOneLastprivate = false; 1018 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1019 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1020 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 1021 for (const Expr *C : LoopDirective->counters()) { 1022 SIMDLCVs.insert( 1023 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1024 } 1025 } 1026 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1027 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1028 HasAtLeastOneLastprivate = true; 1029 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 1030 !getLangOpts().OpenMPSimd) 1031 break; 1032 const auto *IRef = C->varlist_begin(); 1033 const auto *IDestRef = C->destination_exprs().begin(); 1034 for (const Expr *IInit : C->private_copies()) { 1035 // Keep the address of the original variable for future update at the end 1036 // of the loop. 1037 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1038 // Taskloops do not require additional initialization, it is done in 1039 // runtime support library. 1040 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 1041 const auto *DestVD = 1042 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1043 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() { 1044 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1045 /*RefersToEnclosingVariableOrCapture=*/ 1046 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1047 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 1048 return EmitLValue(&DRE).getAddress(*this); 1049 }); 1050 // Check if the variable is also a firstprivate: in this case IInit is 1051 // not generated. Initialization of this variable will happen in codegen 1052 // for 'firstprivate' clause. 1053 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 1054 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 1055 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C, 1056 OrigVD]() { 1057 if (C->getKind() == OMPC_LASTPRIVATE_conditional) { 1058 Address VDAddr = 1059 CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this, 1060 OrigVD); 1061 setAddrOfLocalVar(VD, VDAddr); 1062 return VDAddr; 1063 } 1064 // Emit private VarDecl with copy init. 1065 EmitDecl(*VD); 1066 return GetAddrOfLocalVar(VD); 1067 }); 1068 assert(IsRegistered && 1069 "lastprivate var already registered as private"); 1070 (void)IsRegistered; 1071 } 1072 } 1073 ++IRef; 1074 ++IDestRef; 1075 } 1076 } 1077 return HasAtLeastOneLastprivate; 1078 } 1079 1080 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 1081 const OMPExecutableDirective &D, bool NoFinals, 1082 llvm::Value *IsLastIterCond) { 1083 if (!HaveInsertPoint()) 1084 return; 1085 // Emit following code: 1086 // if (<IsLastIterCond>) { 1087 // orig_var1 = private_orig_var1; 1088 // ... 1089 // orig_varn = private_orig_varn; 1090 // } 1091 llvm::BasicBlock *ThenBB = nullptr; 1092 llvm::BasicBlock *DoneBB = nullptr; 1093 if (IsLastIterCond) { 1094 // Emit implicit barrier if at least one lastprivate conditional is found 1095 // and this is not a simd mode. 1096 if (!getLangOpts().OpenMPSimd && 1097 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(), 1098 [](const OMPLastprivateClause *C) { 1099 return C->getKind() == OMPC_LASTPRIVATE_conditional; 1100 })) { 1101 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(), 1102 OMPD_unknown, 1103 /*EmitChecks=*/false, 1104 /*ForceSimpleCall=*/true); 1105 } 1106 ThenBB = createBasicBlock(".omp.lastprivate.then"); 1107 DoneBB = createBasicBlock(".omp.lastprivate.done"); 1108 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 1109 EmitBlock(ThenBB); 1110 } 1111 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 1112 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 1113 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 1114 auto IC = LoopDirective->counters().begin(); 1115 for (const Expr *F : LoopDirective->finals()) { 1116 const auto *D = 1117 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 1118 if (NoFinals) 1119 AlreadyEmittedVars.insert(D); 1120 else 1121 LoopCountersAndUpdates[D] = F; 1122 ++IC; 1123 } 1124 } 1125 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 1126 auto IRef = C->varlist_begin(); 1127 auto ISrcRef = C->source_exprs().begin(); 1128 auto IDestRef = C->destination_exprs().begin(); 1129 for (const Expr *AssignOp : C->assignment_ops()) { 1130 const auto *PrivateVD = 1131 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1132 QualType Type = PrivateVD->getType(); 1133 const auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 1134 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 1135 // If lastprivate variable is a loop control variable for loop-based 1136 // directive, update its value before copyin back to original 1137 // variable. 1138 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 1139 EmitIgnoredExpr(FinalExpr); 1140 const auto *SrcVD = 1141 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 1142 const auto *DestVD = 1143 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 1144 // Get the address of the private variable. 1145 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 1146 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 1147 PrivateAddr = 1148 Address(Builder.CreateLoad(PrivateAddr), 1149 CGM.getNaturalTypeAlignment(RefTy->getPointeeType())); 1150 // Store the last value to the private copy in the last iteration. 1151 if (C->getKind() == OMPC_LASTPRIVATE_conditional) 1152 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate( 1153 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD, 1154 (*IRef)->getExprLoc()); 1155 // Get the address of the original variable. 1156 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 1157 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 1158 } 1159 ++IRef; 1160 ++ISrcRef; 1161 ++IDestRef; 1162 } 1163 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1164 EmitIgnoredExpr(PostUpdate); 1165 } 1166 if (IsLastIterCond) 1167 EmitBlock(DoneBB, /*IsFinished=*/true); 1168 } 1169 1170 void CodeGenFunction::EmitOMPReductionClauseInit( 1171 const OMPExecutableDirective &D, 1172 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) { 1173 if (!HaveInsertPoint()) 1174 return; 1175 SmallVector<const Expr *, 4> Shareds; 1176 SmallVector<const Expr *, 4> Privates; 1177 SmallVector<const Expr *, 4> ReductionOps; 1178 SmallVector<const Expr *, 4> LHSs; 1179 SmallVector<const Expr *, 4> RHSs; 1180 OMPTaskDataTy Data; 1181 SmallVector<const Expr *, 4> TaskLHSs; 1182 SmallVector<const Expr *, 4> TaskRHSs; 1183 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1184 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan)) 1185 continue; 1186 Shareds.append(C->varlist_begin(), C->varlist_end()); 1187 Privates.append(C->privates().begin(), C->privates().end()); 1188 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1189 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1190 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1191 if (C->getModifier() == OMPC_REDUCTION_task) { 1192 Data.ReductionVars.append(C->privates().begin(), C->privates().end()); 1193 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 1194 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 1195 Data.ReductionOps.append(C->reduction_ops().begin(), 1196 C->reduction_ops().end()); 1197 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1198 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1199 } 1200 } 1201 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 1202 unsigned Count = 0; 1203 auto *ILHS = LHSs.begin(); 1204 auto *IRHS = RHSs.begin(); 1205 auto *IPriv = Privates.begin(); 1206 for (const Expr *IRef : Shareds) { 1207 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 1208 // Emit private VarDecl with reduction init. 1209 RedCG.emitSharedOrigLValue(*this, Count); 1210 RedCG.emitAggregateType(*this, Count); 1211 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD); 1212 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 1213 RedCG.getSharedLValue(Count), 1214 [&Emission](CodeGenFunction &CGF) { 1215 CGF.EmitAutoVarInit(Emission); 1216 return true; 1217 }); 1218 EmitAutoVarCleanups(Emission); 1219 Address BaseAddr = RedCG.adjustPrivateAddress( 1220 *this, Count, Emission.getAllocatedAddress()); 1221 bool IsRegistered = PrivateScope.addPrivate( 1222 RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; }); 1223 assert(IsRegistered && "private var already registered as private"); 1224 // Silence the warning about unused variable. 1225 (void)IsRegistered; 1226 1227 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 1228 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 1229 QualType Type = PrivateVD->getType(); 1230 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef); 1231 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { 1232 // Store the address of the original variable associated with the LHS 1233 // implicit variable. 1234 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1235 return RedCG.getSharedLValue(Count).getAddress(*this); 1236 }); 1237 PrivateScope.addPrivate( 1238 RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); 1239 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) || 1240 isa<ArraySubscriptExpr>(IRef)) { 1241 // Store the address of the original variable associated with the LHS 1242 // implicit variable. 1243 PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { 1244 return RedCG.getSharedLValue(Count).getAddress(*this); 1245 }); 1246 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { 1247 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1248 ConvertTypeForMem(RHSVD->getType()), 1249 "rhs.begin"); 1250 }); 1251 } else { 1252 QualType Type = PrivateVD->getType(); 1253 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1254 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); 1255 // Store the address of the original variable associated with the LHS 1256 // implicit variable. 1257 if (IsArray) { 1258 OriginalAddr = Builder.CreateElementBitCast( 1259 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1260 } 1261 PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; }); 1262 PrivateScope.addPrivate( 1263 RHSVD, [this, PrivateVD, RHSVD, IsArray]() { 1264 return IsArray 1265 ? Builder.CreateElementBitCast( 1266 GetAddrOfLocalVar(PrivateVD), 1267 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1268 : GetAddrOfLocalVar(PrivateVD); 1269 }); 1270 } 1271 ++ILHS; 1272 ++IRHS; 1273 ++IPriv; 1274 ++Count; 1275 } 1276 if (!Data.ReductionVars.empty()) { 1277 Data.IsReductionWithTaskMod = true; 1278 Data.IsWorksharingReduction = 1279 isOpenMPWorksharingDirective(D.getDirectiveKind()); 1280 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit( 1281 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data); 1282 const Expr *TaskRedRef = nullptr; 1283 switch (D.getDirectiveKind()) { 1284 case OMPD_parallel: 1285 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr(); 1286 break; 1287 case OMPD_for: 1288 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr(); 1289 break; 1290 case OMPD_sections: 1291 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr(); 1292 break; 1293 case OMPD_parallel_for: 1294 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr(); 1295 break; 1296 case OMPD_parallel_master: 1297 TaskRedRef = 1298 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr(); 1299 break; 1300 case OMPD_parallel_sections: 1301 TaskRedRef = 1302 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr(); 1303 break; 1304 case OMPD_target_parallel: 1305 TaskRedRef = 1306 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr(); 1307 break; 1308 case OMPD_target_parallel_for: 1309 TaskRedRef = 1310 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr(); 1311 break; 1312 case OMPD_distribute_parallel_for: 1313 TaskRedRef = 1314 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr(); 1315 break; 1316 case OMPD_teams_distribute_parallel_for: 1317 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D) 1318 .getTaskReductionRefExpr(); 1319 break; 1320 case OMPD_target_teams_distribute_parallel_for: 1321 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D) 1322 .getTaskReductionRefExpr(); 1323 break; 1324 case OMPD_simd: 1325 case OMPD_for_simd: 1326 case OMPD_section: 1327 case OMPD_single: 1328 case OMPD_master: 1329 case OMPD_critical: 1330 case OMPD_parallel_for_simd: 1331 case OMPD_task: 1332 case OMPD_taskyield: 1333 case OMPD_barrier: 1334 case OMPD_taskwait: 1335 case OMPD_taskgroup: 1336 case OMPD_flush: 1337 case OMPD_depobj: 1338 case OMPD_scan: 1339 case OMPD_ordered: 1340 case OMPD_atomic: 1341 case OMPD_teams: 1342 case OMPD_target: 1343 case OMPD_cancellation_point: 1344 case OMPD_cancel: 1345 case OMPD_target_data: 1346 case OMPD_target_enter_data: 1347 case OMPD_target_exit_data: 1348 case OMPD_taskloop: 1349 case OMPD_taskloop_simd: 1350 case OMPD_master_taskloop: 1351 case OMPD_master_taskloop_simd: 1352 case OMPD_parallel_master_taskloop: 1353 case OMPD_parallel_master_taskloop_simd: 1354 case OMPD_distribute: 1355 case OMPD_target_update: 1356 case OMPD_distribute_parallel_for_simd: 1357 case OMPD_distribute_simd: 1358 case OMPD_target_parallel_for_simd: 1359 case OMPD_target_simd: 1360 case OMPD_teams_distribute: 1361 case OMPD_teams_distribute_simd: 1362 case OMPD_teams_distribute_parallel_for_simd: 1363 case OMPD_target_teams: 1364 case OMPD_target_teams_distribute: 1365 case OMPD_target_teams_distribute_parallel_for_simd: 1366 case OMPD_target_teams_distribute_simd: 1367 case OMPD_declare_target: 1368 case OMPD_end_declare_target: 1369 case OMPD_threadprivate: 1370 case OMPD_allocate: 1371 case OMPD_declare_reduction: 1372 case OMPD_declare_mapper: 1373 case OMPD_declare_simd: 1374 case OMPD_requires: 1375 case OMPD_declare_variant: 1376 case OMPD_begin_declare_variant: 1377 case OMPD_end_declare_variant: 1378 case OMPD_unknown: 1379 llvm_unreachable("Enexpected directive with task reductions."); 1380 } 1381 1382 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl()); 1383 EmitVarDecl(*VD); 1384 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD), 1385 /*Volatile=*/false, TaskRedRef->getType()); 1386 } 1387 } 1388 1389 void CodeGenFunction::EmitOMPReductionClauseFinal( 1390 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1391 if (!HaveInsertPoint()) 1392 return; 1393 llvm::SmallVector<const Expr *, 8> Privates; 1394 llvm::SmallVector<const Expr *, 8> LHSExprs; 1395 llvm::SmallVector<const Expr *, 8> RHSExprs; 1396 llvm::SmallVector<const Expr *, 8> ReductionOps; 1397 bool HasAtLeastOneReduction = false; 1398 bool IsReductionWithTaskMod = false; 1399 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1400 // Do not emit for inscan reductions. 1401 if (C->getModifier() == OMPC_REDUCTION_inscan) 1402 continue; 1403 HasAtLeastOneReduction = true; 1404 Privates.append(C->privates().begin(), C->privates().end()); 1405 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1406 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1407 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1408 IsReductionWithTaskMod = 1409 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task; 1410 } 1411 if (HasAtLeastOneReduction) { 1412 if (IsReductionWithTaskMod) { 1413 CGM.getOpenMPRuntime().emitTaskReductionFini( 1414 *this, D.getBeginLoc(), 1415 isOpenMPWorksharingDirective(D.getDirectiveKind())); 1416 } 1417 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1418 isOpenMPParallelDirective(D.getDirectiveKind()) || 1419 ReductionKind == OMPD_simd; 1420 bool SimpleReduction = ReductionKind == OMPD_simd; 1421 // Emit nowait reduction if nowait clause is present or directive is a 1422 // parallel directive (it always has implicit barrier). 1423 CGM.getOpenMPRuntime().emitReduction( 1424 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps, 1425 {WithNowait, SimpleReduction, ReductionKind}); 1426 } 1427 } 1428 1429 static void emitPostUpdateForReductionClause( 1430 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1431 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1432 if (!CGF.HaveInsertPoint()) 1433 return; 1434 llvm::BasicBlock *DoneBB = nullptr; 1435 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1436 if (const Expr *PostUpdate = C->getPostUpdateExpr()) { 1437 if (!DoneBB) { 1438 if (llvm::Value *Cond = CondGen(CGF)) { 1439 // If the first post-update expression is found, emit conditional 1440 // block if it was requested. 1441 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1442 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1443 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1444 CGF.EmitBlock(ThenBB); 1445 } 1446 } 1447 CGF.EmitIgnoredExpr(PostUpdate); 1448 } 1449 } 1450 if (DoneBB) 1451 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1452 } 1453 1454 namespace { 1455 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1456 /// parallel function. This is necessary for combined constructs such as 1457 /// 'distribute parallel for' 1458 typedef llvm::function_ref<void(CodeGenFunction &, 1459 const OMPExecutableDirective &, 1460 llvm::SmallVectorImpl<llvm::Value *> &)> 1461 CodeGenBoundParametersTy; 1462 } // anonymous namespace 1463 1464 static void 1465 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF, 1466 const OMPExecutableDirective &S) { 1467 if (CGF.getLangOpts().OpenMP < 50) 1468 return; 1469 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls; 1470 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 1471 for (const Expr *Ref : C->varlists()) { 1472 if (!Ref->getType()->isScalarType()) 1473 continue; 1474 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1475 if (!DRE) 1476 continue; 1477 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1478 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1479 } 1480 } 1481 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 1482 for (const Expr *Ref : C->varlists()) { 1483 if (!Ref->getType()->isScalarType()) 1484 continue; 1485 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1486 if (!DRE) 1487 continue; 1488 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1489 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1490 } 1491 } 1492 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) { 1493 for (const Expr *Ref : C->varlists()) { 1494 if (!Ref->getType()->isScalarType()) 1495 continue; 1496 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1497 if (!DRE) 1498 continue; 1499 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1500 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref); 1501 } 1502 } 1503 // Privates should ne analyzed since they are not captured at all. 1504 // Task reductions may be skipped - tasks are ignored. 1505 // Firstprivates do not return value but may be passed by reference - no need 1506 // to check for updated lastprivate conditional. 1507 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1508 for (const Expr *Ref : C->varlists()) { 1509 if (!Ref->getType()->isScalarType()) 1510 continue; 1511 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 1512 if (!DRE) 1513 continue; 1514 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl())); 1515 } 1516 } 1517 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional( 1518 CGF, S, PrivateDecls); 1519 } 1520 1521 static void emitCommonOMPParallelDirective( 1522 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1523 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1524 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1525 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1526 llvm::Function *OutlinedFn = 1527 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1528 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1529 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1530 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1531 llvm::Value *NumThreads = 1532 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1533 /*IgnoreResultAssign=*/true); 1534 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1535 CGF, NumThreads, NumThreadsClause->getBeginLoc()); 1536 } 1537 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1538 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1539 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1540 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc()); 1541 } 1542 const Expr *IfCond = nullptr; 1543 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1544 if (C->getNameModifier() == OMPD_unknown || 1545 C->getNameModifier() == OMPD_parallel) { 1546 IfCond = C->getCondition(); 1547 break; 1548 } 1549 } 1550 1551 OMPParallelScope Scope(CGF, S); 1552 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1553 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1554 // lower and upper bounds with the pragma 'for' chunking mechanism. 1555 // The following lambda takes care of appending the lower and upper bound 1556 // parameters when necessary 1557 CodeGenBoundParameters(CGF, S, CapturedVars); 1558 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1559 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn, 1560 CapturedVars, IfCond); 1561 } 1562 1563 static void emitEmptyBoundParameters(CodeGenFunction &, 1564 const OMPExecutableDirective &, 1565 llvm::SmallVectorImpl<llvm::Value *> &) {} 1566 1567 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1568 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 1569 // Check if we have any if clause associated with the directive. 1570 llvm::Value *IfCond = nullptr; 1571 if (const auto *C = S.getSingleClause<OMPIfClause>()) 1572 IfCond = EmitScalarExpr(C->getCondition(), 1573 /*IgnoreResultAssign=*/true); 1574 1575 llvm::Value *NumThreads = nullptr; 1576 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) 1577 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(), 1578 /*IgnoreResultAssign=*/true); 1579 1580 ProcBindKind ProcBind = OMP_PROC_BIND_default; 1581 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) 1582 ProcBind = ProcBindClause->getProcBindKind(); 1583 1584 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 1585 1586 // The cleanup callback that finalizes all variabels at the given location, 1587 // thus calls destructors etc. 1588 auto FiniCB = [this](InsertPointTy IP) { 1589 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 1590 }; 1591 1592 // Privatization callback that performs appropriate action for 1593 // shared/private/firstprivate/lastprivate/copyin/... variables. 1594 // 1595 // TODO: This defaults to shared right now. 1596 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1597 llvm::Value &Val, llvm::Value *&ReplVal) { 1598 // The next line is appropriate only for variables (Val) with the 1599 // data-sharing attribute "shared". 1600 ReplVal = &Val; 1601 1602 return CodeGenIP; 1603 }; 1604 1605 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1606 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt(); 1607 1608 auto BodyGenCB = [ParallelRegionBodyStmt, 1609 this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, 1610 llvm::BasicBlock &ContinuationBB) { 1611 OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP, 1612 ContinuationBB); 1613 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt, 1614 CodeGenIP, ContinuationBB); 1615 }; 1616 1617 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 1618 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 1619 Builder.restoreIP(OMPBuilder->CreateParallel(Builder, BodyGenCB, PrivCB, 1620 FiniCB, IfCond, NumThreads, 1621 ProcBind, S.hasCancel())); 1622 return; 1623 } 1624 1625 // Emit parallel region as a standalone region. 1626 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 1627 Action.Enter(CGF); 1628 OMPPrivateScope PrivateScope(CGF); 1629 bool Copyins = CGF.EmitOMPCopyinClause(S); 1630 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1631 if (Copyins) { 1632 // Emit implicit barrier to synchronize threads and avoid data races on 1633 // propagation master's thread values of threadprivate variables to local 1634 // instances of that variables of all other implicit threads. 1635 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1636 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 1637 /*ForceSimpleCall=*/true); 1638 } 1639 CGF.EmitOMPPrivateClause(S, PrivateScope); 1640 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1641 (void)PrivateScope.Privatize(); 1642 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt()); 1643 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1644 }; 1645 { 1646 auto LPCRegion = 1647 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 1648 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1649 emitEmptyBoundParameters); 1650 emitPostUpdateForReductionClause(*this, S, 1651 [](CodeGenFunction &) { return nullptr; }); 1652 } 1653 // Check for outer lastprivate conditional update. 1654 checkForLastprivateConditionalUpdate(*this, S); 1655 } 1656 1657 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, 1658 int MaxLevel, int Level = 0) { 1659 assert(Level < MaxLevel && "Too deep lookup during loop body codegen."); 1660 const Stmt *SimplifiedS = S->IgnoreContainers(); 1661 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) { 1662 PrettyStackTraceLoc CrashInfo( 1663 CGF.getContext().getSourceManager(), CS->getLBracLoc(), 1664 "LLVM IR generation of compound statement ('{}')"); 1665 1666 // Keep track of the current cleanup stack depth, including debug scopes. 1667 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange()); 1668 for (const Stmt *CurStmt : CS->body()) 1669 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level); 1670 return; 1671 } 1672 if (SimplifiedS == NextLoop) { 1673 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) { 1674 S = For->getBody(); 1675 } else { 1676 assert(isa<CXXForRangeStmt>(SimplifiedS) && 1677 "Expected canonical for loop or range-based for loop."); 1678 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS); 1679 CGF.EmitStmt(CXXFor->getLoopVarStmt()); 1680 S = CXXFor->getBody(); 1681 } 1682 if (Level + 1 < MaxLevel) { 1683 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop( 1684 S, /*TryImperfectlyNestedLoops=*/true); 1685 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1); 1686 return; 1687 } 1688 } 1689 CGF.EmitStmt(S); 1690 } 1691 1692 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1693 JumpDest LoopExit) { 1694 RunCleanupsScope BodyScope(*this); 1695 // Update counters values on current iteration. 1696 for (const Expr *UE : D.updates()) 1697 EmitIgnoredExpr(UE); 1698 // Update the linear variables. 1699 // In distribute directives only loop counters may be marked as linear, no 1700 // need to generate the code for them. 1701 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 1702 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1703 for (const Expr *UE : C->updates()) 1704 EmitIgnoredExpr(UE); 1705 } 1706 } 1707 1708 // On a continue in the body, jump to the end. 1709 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue"); 1710 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1711 for (const Expr *E : D.finals_conditions()) { 1712 if (!E) 1713 continue; 1714 // Check that loop counter in non-rectangular nest fits into the iteration 1715 // space. 1716 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next"); 1717 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(), 1718 getProfileCount(D.getBody())); 1719 EmitBlock(NextBB); 1720 } 1721 1722 OMPPrivateScope InscanScope(*this); 1723 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true); 1724 bool IsInscanRegion = InscanScope.Privatize(); 1725 if (IsInscanRegion) { 1726 // Need to remember the block before and after scan directive 1727 // to dispatch them correctly depending on the clause used in 1728 // this directive, inclusive or exclusive. For inclusive scan the natural 1729 // order of the blocks is used, for exclusive clause the blocks must be 1730 // executed in reverse order. 1731 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb"); 1732 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb"); 1733 // No need to allocate inscan exit block, in simd mode it is selected in the 1734 // codegen for the scan directive. 1735 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd) 1736 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb"); 1737 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch"); 1738 EmitBranch(OMPScanDispatch); 1739 EmitBlock(OMPBeforeScanBlock); 1740 } 1741 1742 // Emit loop variables for C++ range loops. 1743 const Stmt *Body = 1744 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); 1745 // Emit loop body. 1746 emitBody(*this, Body, 1747 OMPLoopDirective::tryToFindNextInnerLoop( 1748 Body, /*TryImperfectlyNestedLoops=*/true), 1749 D.getCollapsedNumber()); 1750 1751 // Jump to the dispatcher at the end of the loop body. 1752 if (IsInscanRegion) 1753 EmitBranch(OMPScanExitBlock); 1754 1755 // The end (updates/cleanups). 1756 EmitBlock(Continue.getBlock()); 1757 BreakContinueStack.pop_back(); 1758 } 1759 1760 void CodeGenFunction::EmitOMPInnerLoop( 1761 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond, 1762 const Expr *IncExpr, 1763 const llvm::function_ref<void(CodeGenFunction &)> BodyGen, 1764 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) { 1765 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1766 1767 // Start the loop with a block that tests the condition. 1768 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1769 EmitBlock(CondBlock); 1770 const SourceRange R = S.getSourceRange(); 1771 1772 // If attributes are attached, push to the basic block with them. 1773 const auto &OMPED = cast<OMPExecutableDirective>(S); 1774 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt(); 1775 const Stmt *SS = ICS->getCapturedStmt(); 1776 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS); 1777 if (AS) 1778 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), 1779 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()), 1780 SourceLocToDebugLoc(R.getEnd())); 1781 else 1782 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1783 SourceLocToDebugLoc(R.getEnd())); 1784 1785 // If there are any cleanups between here and the loop-exit scope, 1786 // create a block to stage a loop exit along. 1787 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 1788 if (RequiresCleanup) 1789 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1790 1791 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body"); 1792 1793 // Emit condition. 1794 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1795 if (ExitBlock != LoopExit.getBlock()) { 1796 EmitBlock(ExitBlock); 1797 EmitBranchThroughCleanup(LoopExit); 1798 } 1799 1800 EmitBlock(LoopBody); 1801 incrementProfileCounter(&S); 1802 1803 // Create a block for the increment. 1804 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1805 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1806 1807 BodyGen(*this); 1808 1809 // Emit "IV = IV + 1" and a back-edge to the condition block. 1810 EmitBlock(Continue.getBlock()); 1811 EmitIgnoredExpr(IncExpr); 1812 PostIncGen(*this); 1813 BreakContinueStack.pop_back(); 1814 EmitBranch(CondBlock); 1815 LoopStack.pop(); 1816 // Emit the fall-through block. 1817 EmitBlock(LoopExit.getBlock()); 1818 } 1819 1820 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1821 if (!HaveInsertPoint()) 1822 return false; 1823 // Emit inits for the linear variables. 1824 bool HasLinears = false; 1825 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1826 for (const Expr *Init : C->inits()) { 1827 HasLinears = true; 1828 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1829 if (const auto *Ref = 1830 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1831 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1832 const auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1833 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1834 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1835 VD->getInit()->getType(), VK_LValue, 1836 VD->getInit()->getExprLoc()); 1837 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1838 VD->getType()), 1839 /*capturedByInit=*/false); 1840 EmitAutoVarCleanups(Emission); 1841 } else { 1842 EmitVarDecl(*VD); 1843 } 1844 } 1845 // Emit the linear steps for the linear clauses. 1846 // If a step is not constant, it is pre-calculated before the loop. 1847 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1848 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1849 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1850 // Emit calculation of the linear step. 1851 EmitIgnoredExpr(CS); 1852 } 1853 } 1854 return HasLinears; 1855 } 1856 1857 void CodeGenFunction::EmitOMPLinearClauseFinal( 1858 const OMPLoopDirective &D, 1859 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 1860 if (!HaveInsertPoint()) 1861 return; 1862 llvm::BasicBlock *DoneBB = nullptr; 1863 // Emit the final values of the linear variables. 1864 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1865 auto IC = C->varlist_begin(); 1866 for (const Expr *F : C->finals()) { 1867 if (!DoneBB) { 1868 if (llvm::Value *Cond = CondGen(*this)) { 1869 // If the first post-update expression is found, emit conditional 1870 // block if it was requested. 1871 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu"); 1872 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1873 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1874 EmitBlock(ThenBB); 1875 } 1876 } 1877 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1878 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), 1879 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1880 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1881 Address OrigAddr = EmitLValue(&DRE).getAddress(*this); 1882 CodeGenFunction::OMPPrivateScope VarScope(*this); 1883 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 1884 (void)VarScope.Privatize(); 1885 EmitIgnoredExpr(F); 1886 ++IC; 1887 } 1888 if (const Expr *PostUpdate = C->getPostUpdateExpr()) 1889 EmitIgnoredExpr(PostUpdate); 1890 } 1891 if (DoneBB) 1892 EmitBlock(DoneBB, /*IsFinished=*/true); 1893 } 1894 1895 static void emitAlignedClause(CodeGenFunction &CGF, 1896 const OMPExecutableDirective &D) { 1897 if (!CGF.HaveInsertPoint()) 1898 return; 1899 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1900 llvm::APInt ClauseAlignment(64, 0); 1901 if (const Expr *AlignmentExpr = Clause->getAlignment()) { 1902 auto *AlignmentCI = 1903 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1904 ClauseAlignment = AlignmentCI->getValue(); 1905 } 1906 for (const Expr *E : Clause->varlists()) { 1907 llvm::APInt Alignment(ClauseAlignment); 1908 if (Alignment == 0) { 1909 // OpenMP [2.8.1, Description] 1910 // If no optional parameter is specified, implementation-defined default 1911 // alignments for SIMD instructions on the target platforms are assumed. 1912 Alignment = 1913 CGF.getContext() 1914 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1915 E->getType()->getPointeeType())) 1916 .getQuantity(); 1917 } 1918 assert((Alignment == 0 || Alignment.isPowerOf2()) && 1919 "alignment is not power of 2"); 1920 if (Alignment != 0) { 1921 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1922 CGF.emitAlignmentAssumption( 1923 PtrValue, E, /*No second loc needed*/ SourceLocation(), 1924 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment)); 1925 } 1926 } 1927 } 1928 } 1929 1930 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1931 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1932 if (!HaveInsertPoint()) 1933 return; 1934 auto I = S.private_counters().begin(); 1935 for (const Expr *E : S.counters()) { 1936 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1937 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1938 // Emit var without initialization. 1939 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD); 1940 EmitAutoVarCleanups(VarEmission); 1941 LocalDeclMap.erase(PrivateVD); 1942 (void)LoopScope.addPrivate(VD, [&VarEmission]() { 1943 return VarEmission.getAllocatedAddress(); 1944 }); 1945 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1946 VD->hasGlobalStorage()) { 1947 (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() { 1948 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), 1949 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1950 E->getType(), VK_LValue, E->getExprLoc()); 1951 return EmitLValue(&DRE).getAddress(*this); 1952 }); 1953 } else { 1954 (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { 1955 return VarEmission.getAllocatedAddress(); 1956 }); 1957 } 1958 ++I; 1959 } 1960 // Privatize extra loop counters used in loops for ordered(n) clauses. 1961 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) { 1962 if (!C->getNumForLoops()) 1963 continue; 1964 for (unsigned I = S.getCollapsedNumber(), 1965 E = C->getLoopNumIterations().size(); 1966 I < E; ++I) { 1967 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I)); 1968 const auto *VD = cast<VarDecl>(DRE->getDecl()); 1969 // Override only those variables that can be captured to avoid re-emission 1970 // of the variables declared within the loops. 1971 if (DRE->refersToEnclosingVariableOrCapture()) { 1972 (void)LoopScope.addPrivate(VD, [this, DRE, VD]() { 1973 return CreateMemTemp(DRE->getType(), VD->getName()); 1974 }); 1975 } 1976 } 1977 } 1978 } 1979 1980 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1981 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1982 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1983 if (!CGF.HaveInsertPoint()) 1984 return; 1985 { 1986 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1987 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1988 (void)PreCondScope.Privatize(); 1989 // Get initial values of real counters. 1990 for (const Expr *I : S.inits()) { 1991 CGF.EmitIgnoredExpr(I); 1992 } 1993 } 1994 // Create temp loop control variables with their init values to support 1995 // non-rectangular loops. 1996 CodeGenFunction::OMPMapVars PreCondVars; 1997 for (const Expr * E: S.dependent_counters()) { 1998 if (!E) 1999 continue; 2000 assert(!E->getType().getNonReferenceType()->isRecordType() && 2001 "dependent counter must not be an iterator."); 2002 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2003 Address CounterAddr = 2004 CGF.CreateMemTemp(VD->getType().getNonReferenceType()); 2005 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr); 2006 } 2007 (void)PreCondVars.apply(CGF); 2008 for (const Expr *E : S.dependent_inits()) { 2009 if (!E) 2010 continue; 2011 CGF.EmitIgnoredExpr(E); 2012 } 2013 // Check that loop is executed at least one time. 2014 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 2015 PreCondVars.restore(CGF); 2016 } 2017 2018 void CodeGenFunction::EmitOMPLinearClause( 2019 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 2020 if (!HaveInsertPoint()) 2021 return; 2022 llvm::DenseSet<const VarDecl *> SIMDLCVs; 2023 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 2024 const auto *LoopDirective = cast<OMPLoopDirective>(&D); 2025 for (const Expr *C : LoopDirective->counters()) { 2026 SIMDLCVs.insert( 2027 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 2028 } 2029 } 2030 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 2031 auto CurPrivate = C->privates().begin(); 2032 for (const Expr *E : C->varlists()) { 2033 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2034 const auto *PrivateVD = 2035 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 2036 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 2037 bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() { 2038 // Emit private VarDecl with copy init. 2039 EmitVarDecl(*PrivateVD); 2040 return GetAddrOfLocalVar(PrivateVD); 2041 }); 2042 assert(IsRegistered && "linear var already registered as private"); 2043 // Silence the warning about unused variable. 2044 (void)IsRegistered; 2045 } else { 2046 EmitVarDecl(*PrivateVD); 2047 } 2048 ++CurPrivate; 2049 } 2050 } 2051 } 2052 2053 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 2054 const OMPExecutableDirective &D, 2055 bool IsMonotonic) { 2056 if (!CGF.HaveInsertPoint()) 2057 return; 2058 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 2059 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 2060 /*ignoreResult=*/true); 2061 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2062 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2063 // In presence of finite 'safelen', it may be unsafe to mark all 2064 // the memory instructions parallel, because loop-carried 2065 // dependences of 'safelen' iterations are possible. 2066 if (!IsMonotonic) 2067 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 2068 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 2069 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 2070 /*ignoreResult=*/true); 2071 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 2072 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 2073 // In presence of finite 'safelen', it may be unsafe to mark all 2074 // the memory instructions parallel, because loop-carried 2075 // dependences of 'safelen' iterations are possible. 2076 CGF.LoopStack.setParallel(/*Enable=*/false); 2077 } 2078 } 2079 2080 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 2081 bool IsMonotonic) { 2082 // Walk clauses and process safelen/lastprivate. 2083 LoopStack.setParallel(!IsMonotonic); 2084 LoopStack.setVectorizeEnable(); 2085 emitSimdlenSafelenClause(*this, D, IsMonotonic); 2086 if (const auto *C = D.getSingleClause<OMPOrderClause>()) 2087 if (C->getKind() == OMPC_ORDER_concurrent) 2088 LoopStack.setParallel(/*Enable=*/true); 2089 if ((D.getDirectiveKind() == OMPD_simd || 2090 (getLangOpts().OpenMPSimd && 2091 isOpenMPSimdDirective(D.getDirectiveKind()))) && 2092 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(), 2093 [](const OMPReductionClause *C) { 2094 return C->getModifier() == OMPC_REDUCTION_inscan; 2095 })) 2096 // Disable parallel access in case of prefix sum. 2097 LoopStack.setParallel(/*Enable=*/false); 2098 } 2099 2100 void CodeGenFunction::EmitOMPSimdFinal( 2101 const OMPLoopDirective &D, 2102 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) { 2103 if (!HaveInsertPoint()) 2104 return; 2105 llvm::BasicBlock *DoneBB = nullptr; 2106 auto IC = D.counters().begin(); 2107 auto IPC = D.private_counters().begin(); 2108 for (const Expr *F : D.finals()) { 2109 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 2110 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 2111 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 2112 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 2113 OrigVD->hasGlobalStorage() || CED) { 2114 if (!DoneBB) { 2115 if (llvm::Value *Cond = CondGen(*this)) { 2116 // If the first post-update expression is found, emit conditional 2117 // block if it was requested. 2118 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then"); 2119 DoneBB = createBasicBlock(".omp.final.done"); 2120 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 2121 EmitBlock(ThenBB); 2122 } 2123 } 2124 Address OrigAddr = Address::invalid(); 2125 if (CED) { 2126 OrigAddr = 2127 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); 2128 } else { 2129 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD), 2130 /*RefersToEnclosingVariableOrCapture=*/false, 2131 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 2132 OrigAddr = EmitLValue(&DRE).getAddress(*this); 2133 } 2134 OMPPrivateScope VarScope(*this); 2135 VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); 2136 (void)VarScope.Privatize(); 2137 EmitIgnoredExpr(F); 2138 } 2139 ++IC; 2140 ++IPC; 2141 } 2142 if (DoneBB) 2143 EmitBlock(DoneBB, /*IsFinished=*/true); 2144 } 2145 2146 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 2147 const OMPLoopDirective &S, 2148 CodeGenFunction::JumpDest LoopExit) { 2149 CGF.EmitOMPLoopBody(S, LoopExit); 2150 CGF.EmitStopPoint(&S); 2151 } 2152 2153 /// Emit a helper variable and return corresponding lvalue. 2154 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 2155 const DeclRefExpr *Helper) { 2156 auto VDecl = cast<VarDecl>(Helper->getDecl()); 2157 CGF.EmitVarDecl(*VDecl); 2158 return CGF.EmitLValue(Helper); 2159 } 2160 2161 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S, 2162 const RegionCodeGenTy &SimdInitGen, 2163 const RegionCodeGenTy &BodyCodeGen) { 2164 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF, 2165 PrePostActionTy &) { 2166 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S); 2167 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2168 SimdInitGen(CGF); 2169 2170 BodyCodeGen(CGF); 2171 }; 2172 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 2173 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 2174 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false); 2175 2176 BodyCodeGen(CGF); 2177 }; 2178 const Expr *IfCond = nullptr; 2179 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2180 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2181 if (CGF.getLangOpts().OpenMP >= 50 && 2182 (C->getNameModifier() == OMPD_unknown || 2183 C->getNameModifier() == OMPD_simd)) { 2184 IfCond = C->getCondition(); 2185 break; 2186 } 2187 } 2188 } 2189 if (IfCond) { 2190 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen); 2191 } else { 2192 RegionCodeGenTy ThenRCG(ThenGen); 2193 ThenRCG(CGF); 2194 } 2195 } 2196 2197 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S, 2198 PrePostActionTy &Action) { 2199 Action.Enter(CGF); 2200 assert(isOpenMPSimdDirective(S.getDirectiveKind()) && 2201 "Expected simd directive"); 2202 OMPLoopScope PreInitScope(CGF, S); 2203 // if (PreCond) { 2204 // for (IV in 0..LastIteration) BODY; 2205 // <Final counter/linear vars updates>; 2206 // } 2207 // 2208 if (isOpenMPDistributeDirective(S.getDirectiveKind()) || 2209 isOpenMPWorksharingDirective(S.getDirectiveKind()) || 2210 isOpenMPTaskLoopDirective(S.getDirectiveKind())) { 2211 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable())); 2212 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable())); 2213 } 2214 2215 // Emit: if (PreCond) - begin. 2216 // If the condition constant folds and can be elided, avoid emitting the 2217 // whole loop. 2218 bool CondConstant; 2219 llvm::BasicBlock *ContBlock = nullptr; 2220 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2221 if (!CondConstant) 2222 return; 2223 } else { 2224 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then"); 2225 ContBlock = CGF.createBasicBlock("simd.if.end"); 2226 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 2227 CGF.getProfileCount(&S)); 2228 CGF.EmitBlock(ThenBlock); 2229 CGF.incrementProfileCounter(&S); 2230 } 2231 2232 // Emit the loop iteration variable. 2233 const Expr *IVExpr = S.getIterationVariable(); 2234 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 2235 CGF.EmitVarDecl(*IVDecl); 2236 CGF.EmitIgnoredExpr(S.getInit()); 2237 2238 // Emit the iterations count variable. 2239 // If it is not a variable, Sema decided to calculate iterations count on 2240 // each iteration (e.g., it is foldable into a constant). 2241 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2242 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2243 // Emit calculation of the iterations count. 2244 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 2245 } 2246 2247 emitAlignedClause(CGF, S); 2248 (void)CGF.EmitOMPLinearClauseInit(S); 2249 { 2250 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2251 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 2252 CGF.EmitOMPLinearClause(S, LoopScope); 2253 CGF.EmitOMPPrivateClause(S, LoopScope); 2254 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2255 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2256 CGF, S, CGF.EmitLValue(S.getIterationVariable())); 2257 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2258 (void)LoopScope.Privatize(); 2259 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2260 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 2261 2262 emitCommonSimdLoop( 2263 CGF, S, 2264 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2265 CGF.EmitOMPSimdInit(S); 2266 }, 2267 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2268 CGF.EmitOMPInnerLoop( 2269 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 2270 [&S](CodeGenFunction &CGF) { 2271 emitOMPLoopBodyWithStopPoint(CGF, S, 2272 CodeGenFunction::JumpDest()); 2273 }, 2274 [](CodeGenFunction &) {}); 2275 }); 2276 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; }); 2277 // Emit final copy of the lastprivate variables at the end of loops. 2278 if (HasLastprivateClause) 2279 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 2280 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 2281 emitPostUpdateForReductionClause(CGF, S, 2282 [](CodeGenFunction &) { return nullptr; }); 2283 } 2284 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; }); 2285 // Emit: if (PreCond) - end. 2286 if (ContBlock) { 2287 CGF.EmitBranch(ContBlock); 2288 CGF.EmitBlock(ContBlock, true); 2289 } 2290 } 2291 2292 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 2293 ParentLoopDirectiveForScanRegion ScanRegion(*this, S); 2294 OMPFirstScanLoop = true; 2295 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2296 emitOMPSimdRegion(CGF, S, Action); 2297 }; 2298 { 2299 auto LPCRegion = 2300 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 2301 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2302 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2303 } 2304 // Check for outer lastprivate conditional update. 2305 checkForLastprivateConditionalUpdate(*this, S); 2306 } 2307 2308 void CodeGenFunction::EmitOMPOuterLoop( 2309 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 2310 CodeGenFunction::OMPPrivateScope &LoopScope, 2311 const CodeGenFunction::OMPLoopArguments &LoopArgs, 2312 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 2313 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 2314 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2315 2316 const Expr *IVExpr = S.getIterationVariable(); 2317 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2318 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2319 2320 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 2321 2322 // Start the loop with a block that tests the condition. 2323 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond"); 2324 EmitBlock(CondBlock); 2325 const SourceRange R = S.getSourceRange(); 2326 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 2327 SourceLocToDebugLoc(R.getEnd())); 2328 2329 llvm::Value *BoolCondVal = nullptr; 2330 if (!DynamicOrOrdered) { 2331 // UB = min(UB, GlobalUB) or 2332 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 2333 // 'distribute parallel for') 2334 EmitIgnoredExpr(LoopArgs.EUB); 2335 // IV = LB 2336 EmitIgnoredExpr(LoopArgs.Init); 2337 // IV < UB 2338 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 2339 } else { 2340 BoolCondVal = 2341 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL, 2342 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 2343 } 2344 2345 // If there are any cleanups between here and the loop-exit scope, 2346 // create a block to stage a loop exit along. 2347 llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); 2348 if (LoopScope.requiresCleanups()) 2349 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 2350 2351 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body"); 2352 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 2353 if (ExitBlock != LoopExit.getBlock()) { 2354 EmitBlock(ExitBlock); 2355 EmitBranchThroughCleanup(LoopExit); 2356 } 2357 EmitBlock(LoopBody); 2358 2359 // Emit "IV = LB" (in case of static schedule, we have already calculated new 2360 // LB for loop condition and emitted it above). 2361 if (DynamicOrOrdered) 2362 EmitIgnoredExpr(LoopArgs.Init); 2363 2364 // Create a block for the increment. 2365 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 2366 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 2367 2368 emitCommonSimdLoop( 2369 *this, S, 2370 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2371 // Generate !llvm.loop.parallel metadata for loads and stores for loops 2372 // with dynamic/guided scheduling and without ordered clause. 2373 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2374 CGF.LoopStack.setParallel(!IsMonotonic); 2375 if (const auto *C = S.getSingleClause<OMPOrderClause>()) 2376 if (C->getKind() == OMPC_ORDER_concurrent) 2377 CGF.LoopStack.setParallel(/*Enable=*/true); 2378 } else { 2379 CGF.EmitOMPSimdInit(S, IsMonotonic); 2380 } 2381 }, 2382 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered, 2383 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2384 SourceLocation Loc = S.getBeginLoc(); 2385 // when 'distribute' is not combined with a 'for': 2386 // while (idx <= UB) { BODY; ++idx; } 2387 // when 'distribute' is combined with a 'for' 2388 // (e.g. 'distribute parallel for') 2389 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 2390 CGF.EmitOMPInnerLoop( 2391 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 2392 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 2393 CodeGenLoop(CGF, S, LoopExit); 2394 }, 2395 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 2396 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 2397 }); 2398 }); 2399 2400 EmitBlock(Continue.getBlock()); 2401 BreakContinueStack.pop_back(); 2402 if (!DynamicOrOrdered) { 2403 // Emit "LB = LB + Stride", "UB = UB + Stride". 2404 EmitIgnoredExpr(LoopArgs.NextLB); 2405 EmitIgnoredExpr(LoopArgs.NextUB); 2406 } 2407 2408 EmitBranch(CondBlock); 2409 LoopStack.pop(); 2410 // Emit the fall-through block. 2411 EmitBlock(LoopExit.getBlock()); 2412 2413 // Tell the runtime we are done. 2414 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 2415 if (!DynamicOrOrdered) 2416 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2417 S.getDirectiveKind()); 2418 }; 2419 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2420 } 2421 2422 void CodeGenFunction::EmitOMPForOuterLoop( 2423 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 2424 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 2425 const OMPLoopArguments &LoopArgs, 2426 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2427 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2428 2429 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 2430 const bool DynamicOrOrdered = 2431 Ordered || RT.isDynamic(ScheduleKind.Schedule); 2432 2433 assert((Ordered || 2434 !RT.isStaticNonchunked(ScheduleKind.Schedule, 2435 LoopArgs.Chunk != nullptr)) && 2436 "static non-chunked schedule does not need outer loop"); 2437 2438 // Emit outer loop. 2439 // 2440 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2441 // When schedule(dynamic,chunk_size) is specified, the iterations are 2442 // distributed to threads in the team in chunks as the threads request them. 2443 // Each thread executes a chunk of iterations, then requests another chunk, 2444 // until no chunks remain to be distributed. Each chunk contains chunk_size 2445 // iterations, except for the last chunk to be distributed, which may have 2446 // fewer iterations. When no chunk_size is specified, it defaults to 1. 2447 // 2448 // When schedule(guided,chunk_size) is specified, the iterations are assigned 2449 // to threads in the team in chunks as the executing threads request them. 2450 // Each thread executes a chunk of iterations, then requests another chunk, 2451 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 2452 // each chunk is proportional to the number of unassigned iterations divided 2453 // by the number of threads in the team, decreasing to 1. For a chunk_size 2454 // with value k (greater than 1), the size of each chunk is determined in the 2455 // same way, with the restriction that the chunks do not contain fewer than k 2456 // iterations (except for the last chunk to be assigned, which may have fewer 2457 // than k iterations). 2458 // 2459 // When schedule(auto) is specified, the decision regarding scheduling is 2460 // delegated to the compiler and/or runtime system. The programmer gives the 2461 // implementation the freedom to choose any possible mapping of iterations to 2462 // threads in the team. 2463 // 2464 // When schedule(runtime) is specified, the decision regarding scheduling is 2465 // deferred until run time, and the schedule and chunk size are taken from the 2466 // run-sched-var ICV. If the ICV is set to auto, the schedule is 2467 // implementation defined 2468 // 2469 // while(__kmpc_dispatch_next(&LB, &UB)) { 2470 // idx = LB; 2471 // while (idx <= UB) { BODY; ++idx; 2472 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 2473 // } // inner loop 2474 // } 2475 // 2476 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2477 // When schedule(static, chunk_size) is specified, iterations are divided into 2478 // chunks of size chunk_size, and the chunks are assigned to the threads in 2479 // the team in a round-robin fashion in the order of the thread number. 2480 // 2481 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 2482 // while (idx <= UB) { BODY; ++idx; } // inner loop 2483 // LB = LB + ST; 2484 // UB = UB + ST; 2485 // } 2486 // 2487 2488 const Expr *IVExpr = S.getIterationVariable(); 2489 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2490 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2491 2492 if (DynamicOrOrdered) { 2493 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds = 2494 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 2495 llvm::Value *LBVal = DispatchBounds.first; 2496 llvm::Value *UBVal = DispatchBounds.second; 2497 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 2498 LoopArgs.Chunk}; 2499 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize, 2500 IVSigned, Ordered, DipatchRTInputValues); 2501 } else { 2502 CGOpenMPRuntime::StaticRTInput StaticInit( 2503 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 2504 LoopArgs.ST, LoopArgs.Chunk); 2505 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(), 2506 ScheduleKind, StaticInit); 2507 } 2508 2509 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 2510 const unsigned IVSize, 2511 const bool IVSigned) { 2512 if (Ordered) { 2513 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 2514 IVSigned); 2515 } 2516 }; 2517 2518 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 2519 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 2520 OuterLoopArgs.IncExpr = S.getInc(); 2521 OuterLoopArgs.Init = S.getInit(); 2522 OuterLoopArgs.Cond = S.getCond(); 2523 OuterLoopArgs.NextLB = S.getNextLowerBound(); 2524 OuterLoopArgs.NextUB = S.getNextUpperBound(); 2525 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 2526 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 2527 } 2528 2529 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 2530 const unsigned IVSize, const bool IVSigned) {} 2531 2532 void CodeGenFunction::EmitOMPDistributeOuterLoop( 2533 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 2534 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 2535 const CodeGenLoopTy &CodeGenLoopContent) { 2536 2537 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2538 2539 // Emit outer loop. 2540 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 2541 // dynamic 2542 // 2543 2544 const Expr *IVExpr = S.getIterationVariable(); 2545 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2546 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2547 2548 CGOpenMPRuntime::StaticRTInput StaticInit( 2549 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 2550 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 2551 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); 2552 2553 // for combined 'distribute' and 'for' the increment expression of distribute 2554 // is stored in DistInc. For 'distribute' alone, it is in Inc. 2555 Expr *IncExpr; 2556 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 2557 IncExpr = S.getDistInc(); 2558 else 2559 IncExpr = S.getInc(); 2560 2561 // this routine is shared by 'omp distribute parallel for' and 2562 // 'omp distribute': select the right EUB expression depending on the 2563 // directive 2564 OMPLoopArguments OuterLoopArgs; 2565 OuterLoopArgs.LB = LoopArgs.LB; 2566 OuterLoopArgs.UB = LoopArgs.UB; 2567 OuterLoopArgs.ST = LoopArgs.ST; 2568 OuterLoopArgs.IL = LoopArgs.IL; 2569 OuterLoopArgs.Chunk = LoopArgs.Chunk; 2570 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2571 ? S.getCombinedEnsureUpperBound() 2572 : S.getEnsureUpperBound(); 2573 OuterLoopArgs.IncExpr = IncExpr; 2574 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2575 ? S.getCombinedInit() 2576 : S.getInit(); 2577 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2578 ? S.getCombinedCond() 2579 : S.getCond(); 2580 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2581 ? S.getCombinedNextLowerBound() 2582 : S.getNextLowerBound(); 2583 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 2584 ? S.getCombinedNextUpperBound() 2585 : S.getNextUpperBound(); 2586 2587 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 2588 LoopScope, OuterLoopArgs, CodeGenLoopContent, 2589 emitEmptyOrdered); 2590 } 2591 2592 static std::pair<LValue, LValue> 2593 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 2594 const OMPExecutableDirective &S) { 2595 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2596 LValue LB = 2597 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2598 LValue UB = 2599 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2600 2601 // When composing 'distribute' with 'for' (e.g. as in 'distribute 2602 // parallel for') we need to use the 'distribute' 2603 // chunk lower and upper bounds rather than the whole loop iteration 2604 // space. These are parameters to the outlined function for 'parallel' 2605 // and we copy the bounds of the previous schedule into the 2606 // the current ones. 2607 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 2608 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 2609 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar( 2610 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc()); 2611 PrevLBVal = CGF.EmitScalarConversion( 2612 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 2613 LS.getIterationVariable()->getType(), 2614 LS.getPrevLowerBoundVariable()->getExprLoc()); 2615 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar( 2616 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc()); 2617 PrevUBVal = CGF.EmitScalarConversion( 2618 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 2619 LS.getIterationVariable()->getType(), 2620 LS.getPrevUpperBoundVariable()->getExprLoc()); 2621 2622 CGF.EmitStoreOfScalar(PrevLBVal, LB); 2623 CGF.EmitStoreOfScalar(PrevUBVal, UB); 2624 2625 return {LB, UB}; 2626 } 2627 2628 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 2629 /// we need to use the LB and UB expressions generated by the worksharing 2630 /// code generation support, whereas in non combined situations we would 2631 /// just emit 0 and the LastIteration expression 2632 /// This function is necessary due to the difference of the LB and UB 2633 /// types for the RT emission routines for 'for_static_init' and 2634 /// 'for_dispatch_init' 2635 static std::pair<llvm::Value *, llvm::Value *> 2636 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 2637 const OMPExecutableDirective &S, 2638 Address LB, Address UB) { 2639 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2640 const Expr *IVExpr = LS.getIterationVariable(); 2641 // when implementing a dynamic schedule for a 'for' combined with a 2642 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 2643 // is not normalized as each team only executes its own assigned 2644 // distribute chunk 2645 QualType IteratorTy = IVExpr->getType(); 2646 llvm::Value *LBVal = 2647 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2648 llvm::Value *UBVal = 2649 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc()); 2650 return {LBVal, UBVal}; 2651 } 2652 2653 static void emitDistributeParallelForDistributeInnerBoundParams( 2654 CodeGenFunction &CGF, const OMPExecutableDirective &S, 2655 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 2656 const auto &Dir = cast<OMPLoopDirective>(S); 2657 LValue LB = 2658 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 2659 llvm::Value *LBCast = 2660 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), 2661 CGF.SizeTy, /*isSigned=*/false); 2662 CapturedVars.push_back(LBCast); 2663 LValue UB = 2664 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 2665 2666 llvm::Value *UBCast = 2667 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), 2668 CGF.SizeTy, /*isSigned=*/false); 2669 CapturedVars.push_back(UBCast); 2670 } 2671 2672 static void 2673 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 2674 const OMPLoopDirective &S, 2675 CodeGenFunction::JumpDest LoopExit) { 2676 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 2677 PrePostActionTy &Action) { 2678 Action.Enter(CGF); 2679 bool HasCancel = false; 2680 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 2681 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S)) 2682 HasCancel = D->hasCancel(); 2683 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S)) 2684 HasCancel = D->hasCancel(); 2685 else if (const auto *D = 2686 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S)) 2687 HasCancel = D->hasCancel(); 2688 } 2689 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 2690 HasCancel); 2691 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 2692 emitDistributeParallelForInnerBounds, 2693 emitDistributeParallelForDispatchBounds); 2694 }; 2695 2696 emitCommonOMPParallelDirective( 2697 CGF, S, 2698 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for, 2699 CGInlinedWorksharingLoop, 2700 emitDistributeParallelForDistributeInnerBoundParams); 2701 } 2702 2703 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 2704 const OMPDistributeParallelForDirective &S) { 2705 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2706 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2707 S.getDistInc()); 2708 }; 2709 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2710 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2711 } 2712 2713 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 2714 const OMPDistributeParallelForSimdDirective &S) { 2715 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2716 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 2717 S.getDistInc()); 2718 }; 2719 OMPLexicalScope Scope(*this, S, OMPD_parallel); 2720 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 2721 } 2722 2723 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2724 const OMPDistributeSimdDirective &S) { 2725 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2726 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 2727 }; 2728 OMPLexicalScope Scope(*this, S, OMPD_unknown); 2729 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2730 } 2731 2732 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 2733 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) { 2734 // Emit SPMD target parallel for region as a standalone region. 2735 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2736 emitOMPSimdRegion(CGF, S, Action); 2737 }; 2738 llvm::Function *Fn; 2739 llvm::Constant *Addr; 2740 // Emit target region as a standalone region. 2741 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 2742 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 2743 assert(Fn && Addr && "Target device function emission failed."); 2744 } 2745 2746 void CodeGenFunction::EmitOMPTargetSimdDirective( 2747 const OMPTargetSimdDirective &S) { 2748 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2749 emitOMPSimdRegion(CGF, S, Action); 2750 }; 2751 emitCommonOMPTargetDirective(*this, S, CodeGen); 2752 } 2753 2754 namespace { 2755 struct ScheduleKindModifiersTy { 2756 OpenMPScheduleClauseKind Kind; 2757 OpenMPScheduleClauseModifier M1; 2758 OpenMPScheduleClauseModifier M2; 2759 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2760 OpenMPScheduleClauseModifier M1, 2761 OpenMPScheduleClauseModifier M2) 2762 : Kind(Kind), M1(M1), M2(M2) {} 2763 }; 2764 } // namespace 2765 2766 bool CodeGenFunction::EmitOMPWorksharingLoop( 2767 const OMPLoopDirective &S, Expr *EUB, 2768 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2769 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2770 // Emit the loop iteration variable. 2771 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2772 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2773 EmitVarDecl(*IVDecl); 2774 2775 // Emit the iterations count variable. 2776 // If it is not a variable, Sema decided to calculate iterations count on each 2777 // iteration (e.g., it is foldable into a constant). 2778 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2779 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2780 // Emit calculation of the iterations count. 2781 EmitIgnoredExpr(S.getCalcLastIteration()); 2782 } 2783 2784 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 2785 2786 bool HasLastprivateClause; 2787 // Check pre-condition. 2788 { 2789 OMPLoopScope PreInitScope(*this, S); 2790 // Skip the entire loop if we don't meet the precondition. 2791 // If the condition constant folds and can be elided, avoid emitting the 2792 // whole loop. 2793 bool CondConstant; 2794 llvm::BasicBlock *ContBlock = nullptr; 2795 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2796 if (!CondConstant) 2797 return false; 2798 } else { 2799 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 2800 ContBlock = createBasicBlock("omp.precond.end"); 2801 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2802 getProfileCount(&S)); 2803 EmitBlock(ThenBlock); 2804 incrementProfileCounter(&S); 2805 } 2806 2807 RunCleanupsScope DoacrossCleanupScope(*this); 2808 bool Ordered = false; 2809 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2810 if (OrderedClause->getNumForLoops()) 2811 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations()); 2812 else 2813 Ordered = true; 2814 } 2815 2816 llvm::DenseSet<const Expr *> EmittedFinals; 2817 emitAlignedClause(*this, S); 2818 bool HasLinears = EmitOMPLinearClauseInit(S); 2819 // Emit helper vars inits. 2820 2821 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2822 LValue LB = Bounds.first; 2823 LValue UB = Bounds.second; 2824 LValue ST = 2825 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2826 LValue IL = 2827 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2828 2829 // Emit 'then' code. 2830 { 2831 OMPPrivateScope LoopScope(*this); 2832 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2833 // Emit implicit barrier to synchronize threads and avoid data races on 2834 // initialization of firstprivate variables and post-update of 2835 // lastprivate variables. 2836 CGM.getOpenMPRuntime().emitBarrierCall( 2837 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 2838 /*ForceSimpleCall=*/true); 2839 } 2840 EmitOMPPrivateClause(S, LoopScope); 2841 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion( 2842 *this, S, EmitLValue(S.getIterationVariable())); 2843 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2844 EmitOMPReductionClauseInit(S, LoopScope); 2845 EmitOMPPrivateLoopCounters(S, LoopScope); 2846 EmitOMPLinearClause(S, LoopScope); 2847 (void)LoopScope.Privatize(); 2848 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 2849 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 2850 2851 // Detect the loop schedule kind and chunk. 2852 const Expr *ChunkExpr = nullptr; 2853 OpenMPScheduleTy ScheduleKind; 2854 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 2855 ScheduleKind.Schedule = C->getScheduleKind(); 2856 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2857 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2858 ChunkExpr = C->getChunkSize(); 2859 } else { 2860 // Default behaviour for schedule clause. 2861 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk( 2862 *this, S, ScheduleKind.Schedule, ChunkExpr); 2863 } 2864 bool HasChunkSizeOne = false; 2865 llvm::Value *Chunk = nullptr; 2866 if (ChunkExpr) { 2867 Chunk = EmitScalarExpr(ChunkExpr); 2868 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(), 2869 S.getIterationVariable()->getType(), 2870 S.getBeginLoc()); 2871 Expr::EvalResult Result; 2872 if (ChunkExpr->EvaluateAsInt(Result, getContext())) { 2873 llvm::APSInt EvaluatedChunk = Result.Val.getInt(); 2874 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1); 2875 } 2876 } 2877 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2878 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2879 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2880 // If the static schedule kind is specified or if the ordered clause is 2881 // specified, and if no monotonic modifier is specified, the effect will 2882 // be as if the monotonic modifier was specified. 2883 bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule, 2884 /* Chunked */ Chunk != nullptr) && HasChunkSizeOne && 2885 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 2886 bool IsMonotonic = 2887 Ordered || 2888 ((ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2889 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) && 2890 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic || 2891 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) || 2892 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2893 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2894 if ((RT.isStaticNonchunked(ScheduleKind.Schedule, 2895 /* Chunked */ Chunk != nullptr) || 2896 StaticChunkedOne) && 2897 !Ordered) { 2898 JumpDest LoopExit = 2899 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2900 emitCommonSimdLoop( 2901 *this, S, 2902 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) { 2903 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2904 CGF.EmitOMPSimdInit(S, IsMonotonic); 2905 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) { 2906 if (C->getKind() == OMPC_ORDER_concurrent) 2907 CGF.LoopStack.setParallel(/*Enable=*/true); 2908 } 2909 }, 2910 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk, 2911 &S, ScheduleKind, LoopExit, 2912 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 2913 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2914 // When no chunk_size is specified, the iteration space is divided 2915 // into chunks that are approximately equal in size, and at most 2916 // one chunk is distributed to each thread. Note that the size of 2917 // the chunks is unspecified in this case. 2918 CGOpenMPRuntime::StaticRTInput StaticInit( 2919 IVSize, IVSigned, Ordered, IL.getAddress(CGF), 2920 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), 2921 StaticChunkedOne ? Chunk : nullptr); 2922 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2923 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, 2924 StaticInit); 2925 // UB = min(UB, GlobalUB); 2926 if (!StaticChunkedOne) 2927 CGF.EmitIgnoredExpr(S.getEnsureUpperBound()); 2928 // IV = LB; 2929 CGF.EmitIgnoredExpr(S.getInit()); 2930 // For unchunked static schedule generate: 2931 // 2932 // while (idx <= UB) { 2933 // BODY; 2934 // ++idx; 2935 // } 2936 // 2937 // For static schedule with chunk one: 2938 // 2939 // while (IV <= PrevUB) { 2940 // BODY; 2941 // IV += ST; 2942 // } 2943 CGF.EmitOMPInnerLoop( 2944 S, LoopScope.requiresCleanups(), 2945 StaticChunkedOne ? S.getCombinedParForInDistCond() 2946 : S.getCond(), 2947 StaticChunkedOne ? S.getDistInc() : S.getInc(), 2948 [&S, LoopExit](CodeGenFunction &CGF) { 2949 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit); 2950 }, 2951 [](CodeGenFunction &) {}); 2952 }); 2953 EmitBlock(LoopExit.getBlock()); 2954 // Tell the runtime we are done. 2955 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2956 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 2957 S.getDirectiveKind()); 2958 }; 2959 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2960 } else { 2961 // Emit the outer loop, which requests its work chunk [LB..UB] from 2962 // runtime and runs the inner loop to process it. 2963 const OMPLoopArguments LoopArguments( 2964 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 2965 IL.getAddress(*this), Chunk, EUB); 2966 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2967 LoopArguments, CGDispatchBounds); 2968 } 2969 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2970 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 2971 return CGF.Builder.CreateIsNotNull( 2972 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2973 }); 2974 } 2975 EmitOMPReductionClauseFinal( 2976 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2977 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2978 : /*Parallel only*/ OMPD_parallel); 2979 // Emit post-update of the reduction variables if IsLastIter != 0. 2980 emitPostUpdateForReductionClause( 2981 *this, S, [IL, &S](CodeGenFunction &CGF) { 2982 return CGF.Builder.CreateIsNotNull( 2983 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2984 }); 2985 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2986 if (HasLastprivateClause) 2987 EmitOMPLastprivateClauseFinal( 2988 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2989 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 2990 } 2991 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) { 2992 return CGF.Builder.CreateIsNotNull( 2993 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 2994 }); 2995 DoacrossCleanupScope.ForceCleanup(); 2996 // We're now done with the loop, so jump to the continuation block. 2997 if (ContBlock) { 2998 EmitBranch(ContBlock); 2999 EmitBlock(ContBlock, /*IsFinished=*/true); 3000 } 3001 } 3002 return HasLastprivateClause; 3003 } 3004 3005 /// The following two functions generate expressions for the loop lower 3006 /// and upper bounds in case of static and dynamic (dispatch) schedule 3007 /// of the associated 'for' or 'distribute' loop. 3008 static std::pair<LValue, LValue> 3009 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3010 const auto &LS = cast<OMPLoopDirective>(S); 3011 LValue LB = 3012 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 3013 LValue UB = 3014 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 3015 return {LB, UB}; 3016 } 3017 3018 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 3019 /// consider the lower and upper bound expressions generated by the 3020 /// worksharing loop support, but we use 0 and the iteration space size as 3021 /// constants 3022 static std::pair<llvm::Value *, llvm::Value *> 3023 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 3024 Address LB, Address UB) { 3025 const auto &LS = cast<OMPLoopDirective>(S); 3026 const Expr *IVExpr = LS.getIterationVariable(); 3027 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 3028 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 3029 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 3030 return {LBVal, UBVal}; 3031 } 3032 3033 /// Emits the code for the directive with inscan reductions. 3034 /// The code is the following: 3035 /// \code 3036 /// size num_iters = <num_iters>; 3037 /// <type> buffer[num_iters]; 3038 /// #pragma omp ... 3039 /// for (i: 0..<num_iters>) { 3040 /// <input phase>; 3041 /// buffer[i] = red; 3042 /// } 3043 /// for (int k = 0; k != ceil(log2(num_iters)); ++k) 3044 /// for (size cnt = last_iter; cnt >= pow(2, k); --k) 3045 /// buffer[i] op= buffer[i-pow(2,k)]; 3046 /// #pragma omp ... 3047 /// for (0..<num_iters>) { 3048 /// red = InclusiveScan ? buffer[i] : buffer[i-1]; 3049 /// <scan phase>; 3050 /// } 3051 /// \endcode 3052 static void emitScanBasedDirective( 3053 CodeGenFunction &CGF, const OMPLoopDirective &S, 3054 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen, 3055 llvm::function_ref<void(CodeGenFunction &)> FirstGen, 3056 llvm::function_ref<void(CodeGenFunction &)> SecondGen) { 3057 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast( 3058 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false); 3059 SmallVector<const Expr *, 4> Shareds; 3060 SmallVector<const Expr *, 4> Privates; 3061 SmallVector<const Expr *, 4> ReductionOps; 3062 SmallVector<const Expr *, 4> LHSs; 3063 SmallVector<const Expr *, 4> RHSs; 3064 SmallVector<const Expr *, 4> CopyOps; 3065 SmallVector<const Expr *, 4> CopyArrayTemps; 3066 SmallVector<const Expr *, 4> CopyArrayElems; 3067 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3068 assert(C->getModifier() == OMPC_REDUCTION_inscan && 3069 "Only inscan reductions are expected."); 3070 Shareds.append(C->varlist_begin(), C->varlist_end()); 3071 Privates.append(C->privates().begin(), C->privates().end()); 3072 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 3073 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3074 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3075 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 3076 CopyArrayTemps.append(C->copy_array_temps().begin(), 3077 C->copy_array_temps().end()); 3078 CopyArrayElems.append(C->copy_array_elems().begin(), 3079 C->copy_array_elems().end()); 3080 } 3081 { 3082 // Emit buffers for each reduction variables. 3083 // ReductionCodeGen is required to emit correctly the code for array 3084 // reductions. 3085 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps); 3086 unsigned Count = 0; 3087 auto *ITA = CopyArrayTemps.begin(); 3088 for (const Expr *IRef : Privates) { 3089 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 3090 // Emit variably modified arrays, used for arrays/array sections 3091 // reductions. 3092 if (PrivateVD->getType()->isVariablyModifiedType()) { 3093 RedCG.emitSharedOrigLValue(CGF, Count); 3094 RedCG.emitAggregateType(CGF, Count); 3095 } 3096 CodeGenFunction::OpaqueValueMapping DimMapping( 3097 CGF, 3098 cast<OpaqueValueExpr>( 3099 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe()) 3100 ->getSizeExpr()), 3101 RValue::get(OMPScanNumIterations)); 3102 // Emit temp buffer. 3103 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl())); 3104 ++ITA; 3105 ++Count; 3106 } 3107 } 3108 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S); 3109 { 3110 // Emit loop with input phase: 3111 // #pragma omp ... 3112 // for (i: 0..<num_iters>) { 3113 // <input phase>; 3114 // buffer[i] = red; 3115 // } 3116 CGF.OMPFirstScanLoop = true; 3117 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3118 FirstGen(CGF); 3119 } 3120 // Emit prefix reduction: 3121 // for (int k = 0; k <= ceil(log2(n)); ++k) 3122 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock(); 3123 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body"); 3124 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit"); 3125 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy); 3126 llvm::Value *Arg = 3127 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy); 3128 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg); 3129 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy); 3130 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal); 3131 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy); 3132 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub( 3133 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3134 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc()); 3135 CGF.EmitBlock(LoopBB); 3136 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2); 3137 // size pow2k = 1; 3138 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3139 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB); 3140 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB); 3141 // for (size i = n - 1; i >= 2 ^ k; --i) 3142 // tmp[i] op= tmp[i-pow2k]; 3143 llvm::BasicBlock *InnerLoopBB = 3144 CGF.createBasicBlock("omp.inner.log.scan.body"); 3145 llvm::BasicBlock *InnerExitBB = 3146 CGF.createBasicBlock("omp.inner.log.scan.exit"); 3147 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K); 3148 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3149 CGF.EmitBlock(InnerLoopBB); 3150 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2); 3151 IVal->addIncoming(NMin1, LoopBB); 3152 { 3153 CodeGenFunction::OMPPrivateScope PrivScope(CGF); 3154 auto *ILHS = LHSs.begin(); 3155 auto *IRHS = RHSs.begin(); 3156 for (const Expr *CopyArrayElem : CopyArrayElems) { 3157 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 3158 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 3159 Address LHSAddr = Address::invalid(); 3160 { 3161 CodeGenFunction::OpaqueValueMapping IdxMapping( 3162 CGF, 3163 cast<OpaqueValueExpr>( 3164 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3165 RValue::get(IVal)); 3166 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3167 } 3168 PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; }); 3169 Address RHSAddr = Address::invalid(); 3170 { 3171 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K); 3172 CodeGenFunction::OpaqueValueMapping IdxMapping( 3173 CGF, 3174 cast<OpaqueValueExpr>( 3175 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 3176 RValue::get(OffsetIVal)); 3177 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF); 3178 } 3179 PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; }); 3180 ++ILHS; 3181 ++IRHS; 3182 } 3183 PrivScope.Privatize(); 3184 CGF.CGM.getOpenMPRuntime().emitReduction( 3185 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 3186 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown}); 3187 } 3188 llvm::Value *NextIVal = 3189 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1)); 3190 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock()); 3191 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K); 3192 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); 3193 CGF.EmitBlock(InnerExitBB); 3194 llvm::Value *Next = 3195 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1)); 3196 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock()); 3197 // pow2k <<= 1; 3198 llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); 3199 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock()); 3200 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal); 3201 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB); 3202 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc()); 3203 CGF.EmitBlock(ExitBB); 3204 3205 CGF.OMPFirstScanLoop = false; 3206 SecondGen(CGF); 3207 } 3208 3209 static bool emitWorksharingDirective(CodeGenFunction &CGF, 3210 const OMPLoopDirective &S, 3211 bool HasCancel) { 3212 bool HasLastprivates; 3213 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(), 3214 [](const OMPReductionClause *C) { 3215 return C->getModifier() == OMPC_REDUCTION_inscan; 3216 })) { 3217 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) { 3218 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF); 3219 OMPLoopScope LoopScope(CGF, S); 3220 return CGF.EmitScalarExpr(S.getNumIterations()); 3221 }; 3222 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) { 3223 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3224 CGF, S.getDirectiveKind(), HasCancel); 3225 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3226 emitForLoopBounds, 3227 emitDispatchForLoopBounds); 3228 // Emit an implicit barrier at the end. 3229 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), 3230 OMPD_for); 3231 }; 3232 const auto &&SecondGen = [&S, HasCancel, 3233 &HasLastprivates](CodeGenFunction &CGF) { 3234 CodeGenFunction::OMPCancelStackRAII CancelRegion( 3235 CGF, S.getDirectiveKind(), HasCancel); 3236 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3237 emitForLoopBounds, 3238 emitDispatchForLoopBounds); 3239 }; 3240 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen); 3241 } else { 3242 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(), 3243 HasCancel); 3244 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 3245 emitForLoopBounds, 3246 emitDispatchForLoopBounds); 3247 } 3248 return HasLastprivates; 3249 } 3250 3251 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 3252 bool HasLastprivates = false; 3253 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3254 PrePostActionTy &) { 3255 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel()); 3256 }; 3257 { 3258 auto LPCRegion = 3259 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3260 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3261 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 3262 S.hasCancel()); 3263 } 3264 3265 // Emit an implicit barrier at the end. 3266 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3267 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3268 // Check for outer lastprivate conditional update. 3269 checkForLastprivateConditionalUpdate(*this, S); 3270 } 3271 3272 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 3273 bool HasLastprivates = false; 3274 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 3275 PrePostActionTy &) { 3276 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3277 }; 3278 { 3279 auto LPCRegion = 3280 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3281 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3282 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 3283 } 3284 3285 // Emit an implicit barrier at the end. 3286 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) 3287 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for); 3288 // Check for outer lastprivate conditional update. 3289 checkForLastprivateConditionalUpdate(*this, S); 3290 } 3291 3292 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 3293 const Twine &Name, 3294 llvm::Value *Init = nullptr) { 3295 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 3296 if (Init) 3297 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 3298 return LVal; 3299 } 3300 3301 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 3302 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt(); 3303 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt); 3304 bool HasLastprivates = false; 3305 auto &&CodeGen = [&S, CapturedStmt, CS, 3306 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) { 3307 const ASTContext &C = CGF.getContext(); 3308 QualType KmpInt32Ty = 3309 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 3310 // Emit helper vars inits. 3311 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 3312 CGF.Builder.getInt32(0)); 3313 llvm::ConstantInt *GlobalUBVal = CS != nullptr 3314 ? CGF.Builder.getInt32(CS->size() - 1) 3315 : CGF.Builder.getInt32(0); 3316 LValue UB = 3317 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 3318 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 3319 CGF.Builder.getInt32(1)); 3320 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 3321 CGF.Builder.getInt32(0)); 3322 // Loop counter. 3323 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 3324 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3325 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 3326 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue); 3327 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 3328 // Generate condition for loop. 3329 BinaryOperator *Cond = BinaryOperator::Create( 3330 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary, 3331 S.getBeginLoc(), FPOptionsOverride()); 3332 // Increment for loop counter. 3333 UnaryOperator *Inc = UnaryOperator::Create( 3334 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 3335 S.getBeginLoc(), true, FPOptionsOverride()); 3336 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) { 3337 // Iterate through all sections and emit a switch construct: 3338 // switch (IV) { 3339 // case 0: 3340 // <SectionStmt[0]>; 3341 // break; 3342 // ... 3343 // case <NumSection> - 1: 3344 // <SectionStmt[<NumSection> - 1]>; 3345 // break; 3346 // } 3347 // .omp.sections.exit: 3348 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 3349 llvm::SwitchInst *SwitchStmt = 3350 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()), 3351 ExitBB, CS == nullptr ? 1 : CS->size()); 3352 if (CS) { 3353 unsigned CaseNumber = 0; 3354 for (const Stmt *SubStmt : CS->children()) { 3355 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3356 CGF.EmitBlock(CaseBB); 3357 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 3358 CGF.EmitStmt(SubStmt); 3359 CGF.EmitBranch(ExitBB); 3360 ++CaseNumber; 3361 } 3362 } else { 3363 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case"); 3364 CGF.EmitBlock(CaseBB); 3365 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 3366 CGF.EmitStmt(CapturedStmt); 3367 CGF.EmitBranch(ExitBB); 3368 } 3369 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 3370 }; 3371 3372 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 3373 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 3374 // Emit implicit barrier to synchronize threads and avoid data races on 3375 // initialization of firstprivate variables and post-update of lastprivate 3376 // variables. 3377 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3378 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3379 /*ForceSimpleCall=*/true); 3380 } 3381 CGF.EmitOMPPrivateClause(S, LoopScope); 3382 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV); 3383 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 3384 CGF.EmitOMPReductionClauseInit(S, LoopScope); 3385 (void)LoopScope.Privatize(); 3386 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 3387 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 3388 3389 // Emit static non-chunked loop. 3390 OpenMPScheduleTy ScheduleKind; 3391 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 3392 CGOpenMPRuntime::StaticRTInput StaticInit( 3393 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), 3394 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); 3395 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 3396 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); 3397 // UB = min(UB, GlobalUB); 3398 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc()); 3399 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect( 3400 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 3401 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 3402 // IV = LB; 3403 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV); 3404 // while (idx <= UB) { BODY; ++idx; } 3405 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen, 3406 [](CodeGenFunction &) {}); 3407 // Tell the runtime we are done. 3408 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 3409 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(), 3410 S.getDirectiveKind()); 3411 }; 3412 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 3413 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3414 // Emit post-update of the reduction variables if IsLastIter != 0. 3415 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) { 3416 return CGF.Builder.CreateIsNotNull( 3417 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 3418 }); 3419 3420 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3421 if (HasLastprivates) 3422 CGF.EmitOMPLastprivateClauseFinal( 3423 S, /*NoFinals=*/false, 3424 CGF.Builder.CreateIsNotNull( 3425 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()))); 3426 }; 3427 3428 bool HasCancel = false; 3429 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 3430 HasCancel = OSD->hasCancel(); 3431 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 3432 HasCancel = OPSD->hasCancel(); 3433 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 3434 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 3435 HasCancel); 3436 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 3437 // clause. Otherwise the barrier will be generated by the codegen for the 3438 // directive. 3439 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 3440 // Emit implicit barrier to synchronize threads and avoid data races on 3441 // initialization of firstprivate variables. 3442 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3443 OMPD_unknown); 3444 } 3445 } 3446 3447 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 3448 { 3449 auto LPCRegion = 3450 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3451 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3452 EmitSections(S); 3453 } 3454 // Emit an implicit barrier at the end. 3455 if (!S.getSingleClause<OMPNowaitClause>()) { 3456 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), 3457 OMPD_sections); 3458 } 3459 // Check for outer lastprivate conditional update. 3460 checkForLastprivateConditionalUpdate(*this, S); 3461 } 3462 3463 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 3464 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3465 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3466 }; 3467 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3468 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 3469 S.hasCancel()); 3470 } 3471 3472 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 3473 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 3474 llvm::SmallVector<const Expr *, 8> DestExprs; 3475 llvm::SmallVector<const Expr *, 8> SrcExprs; 3476 llvm::SmallVector<const Expr *, 8> AssignmentOps; 3477 // Check if there are any 'copyprivate' clauses associated with this 3478 // 'single' construct. 3479 // Build a list of copyprivate variables along with helper expressions 3480 // (<source>, <destination>, <destination>=<source> expressions) 3481 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 3482 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 3483 DestExprs.append(C->destination_exprs().begin(), 3484 C->destination_exprs().end()); 3485 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 3486 AssignmentOps.append(C->assignment_ops().begin(), 3487 C->assignment_ops().end()); 3488 } 3489 // Emit code for 'single' region along with 'copyprivate' clauses 3490 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3491 Action.Enter(CGF); 3492 OMPPrivateScope SingleScope(CGF); 3493 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 3494 CGF.EmitOMPPrivateClause(S, SingleScope); 3495 (void)SingleScope.Privatize(); 3496 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3497 }; 3498 { 3499 auto LPCRegion = 3500 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3501 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3502 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(), 3503 CopyprivateVars, DestExprs, 3504 SrcExprs, AssignmentOps); 3505 } 3506 // Emit an implicit barrier at the end (to avoid data race on firstprivate 3507 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 3508 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 3509 CGM.getOpenMPRuntime().emitBarrierCall( 3510 *this, S.getBeginLoc(), 3511 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 3512 } 3513 // Check for outer lastprivate conditional update. 3514 checkForLastprivateConditionalUpdate(*this, S); 3515 } 3516 3517 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 3518 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3519 Action.Enter(CGF); 3520 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3521 }; 3522 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc()); 3523 } 3524 3525 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 3526 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3527 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3528 3529 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3530 const Stmt *MasterRegionBodyStmt = CS->getCapturedStmt(); 3531 3532 auto FiniCB = [this](InsertPointTy IP) { 3533 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3534 }; 3535 3536 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP, 3537 InsertPointTy CodeGenIP, 3538 llvm::BasicBlock &FiniBB) { 3539 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3540 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt, 3541 CodeGenIP, FiniBB); 3542 }; 3543 3544 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3545 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3546 Builder.restoreIP(OMPBuilder->CreateMaster(Builder, BodyGenCB, FiniCB)); 3547 3548 return; 3549 } 3550 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3551 emitMaster(*this, S); 3552 } 3553 3554 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 3555 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 3556 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 3557 3558 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 3559 const Stmt *CriticalRegionBodyStmt = CS->getCapturedStmt(); 3560 const Expr *Hint = nullptr; 3561 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3562 Hint = HintClause->getHint(); 3563 3564 // TODO: This is slightly different from what's currently being done in 3565 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything 3566 // about typing is final. 3567 llvm::Value *HintInst = nullptr; 3568 if (Hint) 3569 HintInst = 3570 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false); 3571 3572 auto FiniCB = [this](InsertPointTy IP) { 3573 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP); 3574 }; 3575 3576 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP, 3577 InsertPointTy CodeGenIP, 3578 llvm::BasicBlock &FiniBB) { 3579 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB); 3580 OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt, 3581 CodeGenIP, FiniBB); 3582 }; 3583 3584 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP); 3585 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI); 3586 Builder.restoreIP(OMPBuilder->CreateCritical( 3587 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(), 3588 HintInst)); 3589 3590 return; 3591 } 3592 3593 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3594 Action.Enter(CGF); 3595 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 3596 }; 3597 const Expr *Hint = nullptr; 3598 if (const auto *HintClause = S.getSingleClause<OMPHintClause>()) 3599 Hint = HintClause->getHint(); 3600 OMPLexicalScope Scope(*this, S, OMPD_unknown); 3601 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 3602 S.getDirectiveName().getAsString(), 3603 CodeGen, S.getBeginLoc(), Hint); 3604 } 3605 3606 void CodeGenFunction::EmitOMPParallelForDirective( 3607 const OMPParallelForDirective &S) { 3608 // Emit directive as a combined directive that consists of two implicit 3609 // directives: 'parallel' with 'for' directive. 3610 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3611 Action.Enter(CGF); 3612 (void)emitWorksharingDirective(CGF, S, S.hasCancel()); 3613 }; 3614 { 3615 auto LPCRegion = 3616 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3617 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 3618 emitEmptyBoundParameters); 3619 } 3620 // Check for outer lastprivate conditional update. 3621 checkForLastprivateConditionalUpdate(*this, S); 3622 } 3623 3624 void CodeGenFunction::EmitOMPParallelForSimdDirective( 3625 const OMPParallelForSimdDirective &S) { 3626 // Emit directive as a combined directive that consists of two implicit 3627 // directives: 'parallel' with 'for' directive. 3628 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3629 Action.Enter(CGF); 3630 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false); 3631 }; 3632 { 3633 auto LPCRegion = 3634 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3635 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen, 3636 emitEmptyBoundParameters); 3637 } 3638 // Check for outer lastprivate conditional update. 3639 checkForLastprivateConditionalUpdate(*this, S); 3640 } 3641 3642 void CodeGenFunction::EmitOMPParallelMasterDirective( 3643 const OMPParallelMasterDirective &S) { 3644 // Emit directive as a combined directive that consists of two implicit 3645 // directives: 'parallel' with 'master' directive. 3646 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3647 Action.Enter(CGF); 3648 OMPPrivateScope PrivateScope(CGF); 3649 bool Copyins = CGF.EmitOMPCopyinClause(S); 3650 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3651 if (Copyins) { 3652 // Emit implicit barrier to synchronize threads and avoid data races on 3653 // propagation master's thread values of threadprivate variables to local 3654 // instances of that variables of all other implicit threads. 3655 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 3656 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 3657 /*ForceSimpleCall=*/true); 3658 } 3659 CGF.EmitOMPPrivateClause(S, PrivateScope); 3660 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3661 (void)PrivateScope.Privatize(); 3662 emitMaster(CGF, S); 3663 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 3664 }; 3665 { 3666 auto LPCRegion = 3667 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3668 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen, 3669 emitEmptyBoundParameters); 3670 emitPostUpdateForReductionClause(*this, S, 3671 [](CodeGenFunction &) { return nullptr; }); 3672 } 3673 // Check for outer lastprivate conditional update. 3674 checkForLastprivateConditionalUpdate(*this, S); 3675 } 3676 3677 void CodeGenFunction::EmitOMPParallelSectionsDirective( 3678 const OMPParallelSectionsDirective &S) { 3679 // Emit directive as a combined directive that consists of two implicit 3680 // directives: 'parallel' with 'sections' directive. 3681 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3682 Action.Enter(CGF); 3683 CGF.EmitSections(S); 3684 }; 3685 { 3686 auto LPCRegion = 3687 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 3688 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 3689 emitEmptyBoundParameters); 3690 } 3691 // Check for outer lastprivate conditional update. 3692 checkForLastprivateConditionalUpdate(*this, S); 3693 } 3694 3695 void CodeGenFunction::EmitOMPTaskBasedDirective( 3696 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion, 3697 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen, 3698 OMPTaskDataTy &Data) { 3699 // Emit outlined function for task construct. 3700 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion); 3701 auto I = CS->getCapturedDecl()->param_begin(); 3702 auto PartId = std::next(I); 3703 auto TaskT = std::next(I, 4); 3704 // Check if the task is final 3705 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 3706 // If the condition constant folds and can be elided, try to avoid emitting 3707 // the condition and the dead arm of the if/else. 3708 const Expr *Cond = Clause->getCondition(); 3709 bool CondConstant; 3710 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 3711 Data.Final.setInt(CondConstant); 3712 else 3713 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 3714 } else { 3715 // By default the task is not final. 3716 Data.Final.setInt(/*IntVal=*/false); 3717 } 3718 // Check if the task has 'priority' clause. 3719 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 3720 const Expr *Prio = Clause->getPriority(); 3721 Data.Priority.setInt(/*IntVal=*/true); 3722 Data.Priority.setPointer(EmitScalarConversion( 3723 EmitScalarExpr(Prio), Prio->getType(), 3724 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 3725 Prio->getExprLoc())); 3726 } 3727 // The first function argument for tasks is a thread id, the second one is a 3728 // part id (0 for tied tasks, >=0 for untied task). 3729 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 3730 // Get list of private variables. 3731 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 3732 auto IRef = C->varlist_begin(); 3733 for (const Expr *IInit : C->private_copies()) { 3734 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3735 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3736 Data.PrivateVars.push_back(*IRef); 3737 Data.PrivateCopies.push_back(IInit); 3738 } 3739 ++IRef; 3740 } 3741 } 3742 EmittedAsPrivate.clear(); 3743 // Get list of firstprivate variables. 3744 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 3745 auto IRef = C->varlist_begin(); 3746 auto IElemInitRef = C->inits().begin(); 3747 for (const Expr *IInit : C->private_copies()) { 3748 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3749 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3750 Data.FirstprivateVars.push_back(*IRef); 3751 Data.FirstprivateCopies.push_back(IInit); 3752 Data.FirstprivateInits.push_back(*IElemInitRef); 3753 } 3754 ++IRef; 3755 ++IElemInitRef; 3756 } 3757 } 3758 // Get list of lastprivate variables (for taskloops). 3759 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 3760 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 3761 auto IRef = C->varlist_begin(); 3762 auto ID = C->destination_exprs().begin(); 3763 for (const Expr *IInit : C->private_copies()) { 3764 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 3765 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 3766 Data.LastprivateVars.push_back(*IRef); 3767 Data.LastprivateCopies.push_back(IInit); 3768 } 3769 LastprivateDstsOrigs.insert( 3770 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 3771 cast<DeclRefExpr>(*IRef)}); 3772 ++IRef; 3773 ++ID; 3774 } 3775 } 3776 SmallVector<const Expr *, 4> LHSs; 3777 SmallVector<const Expr *, 4> RHSs; 3778 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 3779 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 3780 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 3781 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 3782 Data.ReductionOps.append(C->reduction_ops().begin(), 3783 C->reduction_ops().end()); 3784 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 3785 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 3786 } 3787 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 3788 *this, S.getBeginLoc(), LHSs, RHSs, Data); 3789 // Build list of dependences. 3790 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 3791 OMPTaskDataTy::DependData &DD = 3792 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 3793 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 3794 } 3795 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs, 3796 CapturedRegion](CodeGenFunction &CGF, 3797 PrePostActionTy &Action) { 3798 // Set proper addresses for generated private copies. 3799 OMPPrivateScope Scope(CGF); 3800 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs; 3801 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 3802 !Data.LastprivateVars.empty()) { 3803 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 3804 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 3805 enum { PrivatesParam = 2, CopyFnParam = 3 }; 3806 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 3807 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 3808 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 3809 CS->getCapturedDecl()->getParam(PrivatesParam))); 3810 // Map privates. 3811 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 3812 llvm::SmallVector<llvm::Value *, 16> CallArgs; 3813 CallArgs.push_back(PrivatesPtr); 3814 for (const Expr *E : Data.PrivateVars) { 3815 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3816 Address PrivatePtr = CGF.CreateMemTemp( 3817 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 3818 PrivatePtrs.emplace_back(VD, PrivatePtr); 3819 CallArgs.push_back(PrivatePtr.getPointer()); 3820 } 3821 for (const Expr *E : Data.FirstprivateVars) { 3822 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3823 Address PrivatePtr = 3824 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3825 ".firstpriv.ptr.addr"); 3826 PrivatePtrs.emplace_back(VD, PrivatePtr); 3827 FirstprivatePtrs.emplace_back(VD, PrivatePtr); 3828 CallArgs.push_back(PrivatePtr.getPointer()); 3829 } 3830 for (const Expr *E : Data.LastprivateVars) { 3831 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 3832 Address PrivatePtr = 3833 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 3834 ".lastpriv.ptr.addr"); 3835 PrivatePtrs.emplace_back(VD, PrivatePtr); 3836 CallArgs.push_back(PrivatePtr.getPointer()); 3837 } 3838 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3839 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 3840 for (const auto &Pair : LastprivateDstsOrigs) { 3841 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 3842 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD), 3843 /*RefersToEnclosingVariableOrCapture=*/ 3844 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 3845 Pair.second->getType(), VK_LValue, 3846 Pair.second->getExprLoc()); 3847 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 3848 return CGF.EmitLValue(&DRE).getAddress(CGF); 3849 }); 3850 } 3851 for (const auto &Pair : PrivatePtrs) { 3852 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3853 CGF.getContext().getDeclAlign(Pair.first)); 3854 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 3855 } 3856 } 3857 if (Data.Reductions) { 3858 OMPPrivateScope FirstprivateScope(CGF); 3859 for (const auto &Pair : FirstprivatePtrs) { 3860 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 3861 CGF.getContext().getDeclAlign(Pair.first)); 3862 FirstprivateScope.addPrivate(Pair.first, 3863 [Replacement]() { return Replacement; }); 3864 } 3865 (void)FirstprivateScope.Privatize(); 3866 OMPLexicalScope LexScope(CGF, S, CapturedRegion); 3867 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars, 3868 Data.ReductionCopies, Data.ReductionOps); 3869 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 3870 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 3871 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 3872 RedCG.emitSharedOrigLValue(CGF, Cnt); 3873 RedCG.emitAggregateType(CGF, Cnt); 3874 // FIXME: This must removed once the runtime library is fixed. 3875 // Emit required threadprivate variables for 3876 // initializer/combiner/finalizer. 3877 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3878 RedCG, Cnt); 3879 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3880 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3881 Replacement = 3882 Address(CGF.EmitScalarConversion( 3883 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3884 CGF.getContext().getPointerType( 3885 Data.ReductionCopies[Cnt]->getType()), 3886 Data.ReductionCopies[Cnt]->getExprLoc()), 3887 Replacement.getAlignment()); 3888 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3889 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 3890 [Replacement]() { return Replacement; }); 3891 } 3892 } 3893 // Privatize all private variables except for in_reduction items. 3894 (void)Scope.Privatize(); 3895 SmallVector<const Expr *, 4> InRedVars; 3896 SmallVector<const Expr *, 4> InRedPrivs; 3897 SmallVector<const Expr *, 4> InRedOps; 3898 SmallVector<const Expr *, 4> TaskgroupDescriptors; 3899 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 3900 auto IPriv = C->privates().begin(); 3901 auto IRed = C->reduction_ops().begin(); 3902 auto ITD = C->taskgroup_descriptors().begin(); 3903 for (const Expr *Ref : C->varlists()) { 3904 InRedVars.emplace_back(Ref); 3905 InRedPrivs.emplace_back(*IPriv); 3906 InRedOps.emplace_back(*IRed); 3907 TaskgroupDescriptors.emplace_back(*ITD); 3908 std::advance(IPriv, 1); 3909 std::advance(IRed, 1); 3910 std::advance(ITD, 1); 3911 } 3912 } 3913 // Privatize in_reduction items here, because taskgroup descriptors must be 3914 // privatized earlier. 3915 OMPPrivateScope InRedScope(CGF); 3916 if (!InRedVars.empty()) { 3917 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps); 3918 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 3919 RedCG.emitSharedOrigLValue(CGF, Cnt); 3920 RedCG.emitAggregateType(CGF, Cnt); 3921 // The taskgroup descriptor variable is always implicit firstprivate and 3922 // privatized already during processing of the firstprivates. 3923 // FIXME: This must removed once the runtime library is fixed. 3924 // Emit required threadprivate variables for 3925 // initializer/combiner/finalizer. 3926 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(), 3927 RedCG, Cnt); 3928 llvm::Value *ReductionsPtr; 3929 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) { 3930 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), 3931 TRExpr->getExprLoc()); 3932 } else { 3933 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 3934 } 3935 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 3936 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 3937 Replacement = Address( 3938 CGF.EmitScalarConversion( 3939 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 3940 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 3941 InRedPrivs[Cnt]->getExprLoc()), 3942 Replacement.getAlignment()); 3943 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 3944 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 3945 [Replacement]() { return Replacement; }); 3946 } 3947 } 3948 (void)InRedScope.Privatize(); 3949 3950 Action.Enter(CGF); 3951 BodyGen(CGF); 3952 }; 3953 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 3954 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 3955 Data.NumberOfParts); 3956 OMPLexicalScope Scope(*this, S, llvm::None, 3957 !isOpenMPParallelDirective(S.getDirectiveKind()) && 3958 !isOpenMPSimdDirective(S.getDirectiveKind())); 3959 TaskGen(*this, OutlinedFn, Data); 3960 } 3961 3962 static ImplicitParamDecl * 3963 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data, 3964 QualType Ty, CapturedDecl *CD, 3965 SourceLocation Loc) { 3966 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3967 ImplicitParamDecl::Other); 3968 auto *OrigRef = DeclRefExpr::Create( 3969 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD, 3970 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3971 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty, 3972 ImplicitParamDecl::Other); 3973 auto *PrivateRef = DeclRefExpr::Create( 3974 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD, 3975 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue); 3976 QualType ElemType = C.getBaseElementType(Ty); 3977 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType, 3978 ImplicitParamDecl::Other); 3979 auto *InitRef = DeclRefExpr::Create( 3980 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD, 3981 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue); 3982 PrivateVD->setInitStyle(VarDecl::CInit); 3983 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue, 3984 InitRef, /*BasePath=*/nullptr, 3985 VK_RValue)); 3986 Data.FirstprivateVars.emplace_back(OrigRef); 3987 Data.FirstprivateCopies.emplace_back(PrivateRef); 3988 Data.FirstprivateInits.emplace_back(InitRef); 3989 return OrigVD; 3990 } 3991 3992 void CodeGenFunction::EmitOMPTargetTaskBasedDirective( 3993 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, 3994 OMPTargetDataInfo &InputInfo) { 3995 // Emit outlined function for task construct. 3996 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 3997 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 3998 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 3999 auto I = CS->getCapturedDecl()->param_begin(); 4000 auto PartId = std::next(I); 4001 auto TaskT = std::next(I, 4); 4002 OMPTaskDataTy Data; 4003 // The task is not final. 4004 Data.Final.setInt(/*IntVal=*/false); 4005 // Get list of firstprivate variables. 4006 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 4007 auto IRef = C->varlist_begin(); 4008 auto IElemInitRef = C->inits().begin(); 4009 for (auto *IInit : C->private_copies()) { 4010 Data.FirstprivateVars.push_back(*IRef); 4011 Data.FirstprivateCopies.push_back(IInit); 4012 Data.FirstprivateInits.push_back(*IElemInitRef); 4013 ++IRef; 4014 ++IElemInitRef; 4015 } 4016 } 4017 OMPPrivateScope TargetScope(*this); 4018 VarDecl *BPVD = nullptr; 4019 VarDecl *PVD = nullptr; 4020 VarDecl *SVD = nullptr; 4021 if (InputInfo.NumberOfTargetItems > 0) { 4022 auto *CD = CapturedDecl::Create( 4023 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0); 4024 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems); 4025 QualType BaseAndPointersType = getContext().getConstantArrayType( 4026 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal, 4027 /*IndexTypeQuals=*/0); 4028 BPVD = createImplicitFirstprivateForType( 4029 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4030 PVD = createImplicitFirstprivateForType( 4031 getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc()); 4032 QualType SizesType = getContext().getConstantArrayType( 4033 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1), 4034 ArrSize, nullptr, ArrayType::Normal, 4035 /*IndexTypeQuals=*/0); 4036 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD, 4037 S.getBeginLoc()); 4038 TargetScope.addPrivate( 4039 BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; }); 4040 TargetScope.addPrivate(PVD, 4041 [&InputInfo]() { return InputInfo.PointersArray; }); 4042 TargetScope.addPrivate(SVD, 4043 [&InputInfo]() { return InputInfo.SizesArray; }); 4044 } 4045 (void)TargetScope.Privatize(); 4046 // Build list of dependences. 4047 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 4048 OMPTaskDataTy::DependData &DD = 4049 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier()); 4050 DD.DepExprs.append(C->varlist_begin(), C->varlist_end()); 4051 } 4052 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, 4053 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) { 4054 // Set proper addresses for generated private copies. 4055 OMPPrivateScope Scope(CGF); 4056 if (!Data.FirstprivateVars.empty()) { 4057 llvm::FunctionType *CopyFnTy = llvm::FunctionType::get( 4058 CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true); 4059 enum { PrivatesParam = 2, CopyFnParam = 3 }; 4060 llvm::Value *CopyFn = CGF.Builder.CreateLoad( 4061 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam))); 4062 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar( 4063 CS->getCapturedDecl()->getParam(PrivatesParam))); 4064 // Map privates. 4065 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 4066 llvm::SmallVector<llvm::Value *, 16> CallArgs; 4067 CallArgs.push_back(PrivatesPtr); 4068 for (const Expr *E : Data.FirstprivateVars) { 4069 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4070 Address PrivatePtr = 4071 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 4072 ".firstpriv.ptr.addr"); 4073 PrivatePtrs.emplace_back(VD, PrivatePtr); 4074 CallArgs.push_back(PrivatePtr.getPointer()); 4075 } 4076 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4077 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs); 4078 for (const auto &Pair : PrivatePtrs) { 4079 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 4080 CGF.getContext().getDeclAlign(Pair.first)); 4081 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 4082 } 4083 } 4084 // Privatize all private variables except for in_reduction items. 4085 (void)Scope.Privatize(); 4086 if (InputInfo.NumberOfTargetItems > 0) { 4087 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP( 4088 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0); 4089 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP( 4090 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0); 4091 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP( 4092 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0); 4093 } 4094 4095 Action.Enter(CGF); 4096 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false); 4097 BodyGen(CGF); 4098 }; 4099 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 4100 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true, 4101 Data.NumberOfParts); 4102 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0); 4103 IntegerLiteral IfCond(getContext(), TrueOrFalse, 4104 getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4105 SourceLocation()); 4106 4107 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn, 4108 SharedsTy, CapturedStruct, &IfCond, Data); 4109 } 4110 4111 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 4112 // Emit outlined function for task construct. 4113 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task); 4114 Address CapturedStruct = GenerateCapturedStmtArgument(*CS); 4115 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4116 const Expr *IfCond = nullptr; 4117 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4118 if (C->getNameModifier() == OMPD_unknown || 4119 C->getNameModifier() == OMPD_task) { 4120 IfCond = C->getCondition(); 4121 break; 4122 } 4123 } 4124 4125 OMPTaskDataTy Data; 4126 // Check if we should emit tied or untied task. 4127 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 4128 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 4129 CGF.EmitStmt(CS->getCapturedStmt()); 4130 }; 4131 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4132 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 4133 const OMPTaskDataTy &Data) { 4134 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn, 4135 SharedsTy, CapturedStruct, IfCond, 4136 Data); 4137 }; 4138 auto LPCRegion = 4139 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 4140 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data); 4141 } 4142 4143 void CodeGenFunction::EmitOMPTaskyieldDirective( 4144 const OMPTaskyieldDirective &S) { 4145 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc()); 4146 } 4147 4148 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 4149 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier); 4150 } 4151 4152 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 4153 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc()); 4154 } 4155 4156 void CodeGenFunction::EmitOMPTaskgroupDirective( 4157 const OMPTaskgroupDirective &S) { 4158 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4159 Action.Enter(CGF); 4160 if (const Expr *E = S.getReductionRef()) { 4161 SmallVector<const Expr *, 4> LHSs; 4162 SmallVector<const Expr *, 4> RHSs; 4163 OMPTaskDataTy Data; 4164 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 4165 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end()); 4166 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end()); 4167 Data.ReductionCopies.append(C->privates().begin(), C->privates().end()); 4168 Data.ReductionOps.append(C->reduction_ops().begin(), 4169 C->reduction_ops().end()); 4170 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4171 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4172 } 4173 llvm::Value *ReductionDesc = 4174 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(), 4175 LHSs, RHSs, Data); 4176 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4177 CGF.EmitVarDecl(*VD); 4178 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 4179 /*Volatile=*/false, E->getType()); 4180 } 4181 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 4182 }; 4183 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4184 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc()); 4185 } 4186 4187 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 4188 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>() 4189 ? llvm::AtomicOrdering::NotAtomic 4190 : llvm::AtomicOrdering::AcquireRelease; 4191 CGM.getOpenMPRuntime().emitFlush( 4192 *this, 4193 [&S]() -> ArrayRef<const Expr *> { 4194 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) 4195 return llvm::makeArrayRef(FlushClause->varlist_begin(), 4196 FlushClause->varlist_end()); 4197 return llvm::None; 4198 }(), 4199 S.getBeginLoc(), AO); 4200 } 4201 4202 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) { 4203 const auto *DO = S.getSingleClause<OMPDepobjClause>(); 4204 LValue DOLVal = EmitLValue(DO->getDepobj()); 4205 if (const auto *DC = S.getSingleClause<OMPDependClause>()) { 4206 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(), 4207 DC->getModifier()); 4208 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end()); 4209 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause( 4210 *this, Dependencies, DC->getBeginLoc()); 4211 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal); 4212 return; 4213 } 4214 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) { 4215 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc()); 4216 return; 4217 } 4218 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) { 4219 CGM.getOpenMPRuntime().emitUpdateClause( 4220 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc()); 4221 return; 4222 } 4223 } 4224 4225 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) { 4226 if (!OMPParentLoopDirectiveForScan) 4227 return; 4228 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan; 4229 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>(); 4230 SmallVector<const Expr *, 4> Shareds; 4231 SmallVector<const Expr *, 4> Privates; 4232 SmallVector<const Expr *, 4> LHSs; 4233 SmallVector<const Expr *, 4> RHSs; 4234 SmallVector<const Expr *, 4> ReductionOps; 4235 SmallVector<const Expr *, 4> CopyOps; 4236 SmallVector<const Expr *, 4> CopyArrayTemps; 4237 SmallVector<const Expr *, 4> CopyArrayElems; 4238 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) { 4239 if (C->getModifier() != OMPC_REDUCTION_inscan) 4240 continue; 4241 Shareds.append(C->varlist_begin(), C->varlist_end()); 4242 Privates.append(C->privates().begin(), C->privates().end()); 4243 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 4244 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 4245 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 4246 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end()); 4247 CopyArrayTemps.append(C->copy_array_temps().begin(), 4248 C->copy_array_temps().end()); 4249 CopyArrayElems.append(C->copy_array_elems().begin(), 4250 C->copy_array_elems().end()); 4251 } 4252 if (ParentDir.getDirectiveKind() == OMPD_simd || 4253 (getLangOpts().OpenMPSimd && 4254 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) { 4255 // For simd directive and simd-based directives in simd only mode, use the 4256 // following codegen: 4257 // int x = 0; 4258 // #pragma omp simd reduction(inscan, +: x) 4259 // for (..) { 4260 // <first part> 4261 // #pragma omp scan inclusive(x) 4262 // <second part> 4263 // } 4264 // is transformed to: 4265 // int x = 0; 4266 // for (..) { 4267 // int x_priv = 0; 4268 // <first part> 4269 // x = x_priv + x; 4270 // x_priv = x; 4271 // <second part> 4272 // } 4273 // and 4274 // int x = 0; 4275 // #pragma omp simd reduction(inscan, +: x) 4276 // for (..) { 4277 // <first part> 4278 // #pragma omp scan exclusive(x) 4279 // <second part> 4280 // } 4281 // to 4282 // int x = 0; 4283 // for (..) { 4284 // int x_priv = 0; 4285 // <second part> 4286 // int temp = x; 4287 // x = x_priv + x; 4288 // x_priv = temp; 4289 // <first part> 4290 // } 4291 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce"); 4292 EmitBranch(IsInclusive 4293 ? OMPScanReduce 4294 : BreakContinueStack.back().ContinueBlock.getBlock()); 4295 EmitBlock(OMPScanDispatch); 4296 { 4297 // New scope for correct construction/destruction of temp variables for 4298 // exclusive scan. 4299 LexicalScope Scope(*this, S.getSourceRange()); 4300 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock); 4301 EmitBlock(OMPScanReduce); 4302 if (!IsInclusive) { 4303 // Create temp var and copy LHS value to this temp value. 4304 // TMP = LHS; 4305 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4306 const Expr *PrivateExpr = Privates[I]; 4307 const Expr *TempExpr = CopyArrayTemps[I]; 4308 EmitAutoVarDecl( 4309 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl())); 4310 LValue DestLVal = EmitLValue(TempExpr); 4311 LValue SrcLVal = EmitLValue(LHSs[I]); 4312 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4313 SrcLVal.getAddress(*this), 4314 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4315 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4316 CopyOps[I]); 4317 } 4318 } 4319 CGM.getOpenMPRuntime().emitReduction( 4320 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps, 4321 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd}); 4322 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4323 const Expr *PrivateExpr = Privates[I]; 4324 LValue DestLVal; 4325 LValue SrcLVal; 4326 if (IsInclusive) { 4327 DestLVal = EmitLValue(RHSs[I]); 4328 SrcLVal = EmitLValue(LHSs[I]); 4329 } else { 4330 const Expr *TempExpr = CopyArrayTemps[I]; 4331 DestLVal = EmitLValue(RHSs[I]); 4332 SrcLVal = EmitLValue(TempExpr); 4333 } 4334 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4335 SrcLVal.getAddress(*this), 4336 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4337 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4338 CopyOps[I]); 4339 } 4340 } 4341 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock); 4342 OMPScanExitBlock = IsInclusive 4343 ? BreakContinueStack.back().ContinueBlock.getBlock() 4344 : OMPScanReduce; 4345 EmitBlock(OMPAfterScanBlock); 4346 return; 4347 } 4348 if (!IsInclusive) { 4349 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4350 EmitBlock(OMPScanExitBlock); 4351 } 4352 if (OMPFirstScanLoop) { 4353 // Emit buffer[i] = red; at the end of the input phase. 4354 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4355 .getIterationVariable() 4356 ->IgnoreParenImpCasts(); 4357 LValue IdxLVal = EmitLValue(IVExpr); 4358 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4359 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4360 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4361 const Expr *PrivateExpr = Privates[I]; 4362 const Expr *OrigExpr = Shareds[I]; 4363 const Expr *CopyArrayElem = CopyArrayElems[I]; 4364 OpaqueValueMapping IdxMapping( 4365 *this, 4366 cast<OpaqueValueExpr>( 4367 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4368 RValue::get(IdxVal)); 4369 LValue DestLVal = EmitLValue(CopyArrayElem); 4370 LValue SrcLVal = EmitLValue(OrigExpr); 4371 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4372 SrcLVal.getAddress(*this), 4373 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4374 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4375 CopyOps[I]); 4376 } 4377 } 4378 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4379 if (IsInclusive) { 4380 EmitBlock(OMPScanExitBlock); 4381 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock()); 4382 } 4383 EmitBlock(OMPScanDispatch); 4384 if (!OMPFirstScanLoop) { 4385 // Emit red = buffer[i]; at the entrance to the scan phase. 4386 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir) 4387 .getIterationVariable() 4388 ->IgnoreParenImpCasts(); 4389 LValue IdxLVal = EmitLValue(IVExpr); 4390 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc()); 4391 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false); 4392 llvm::BasicBlock *ExclusiveExitBB = nullptr; 4393 if (!IsInclusive) { 4394 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec"); 4395 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit"); 4396 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal); 4397 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB); 4398 EmitBlock(ContBB); 4399 // Use idx - 1 iteration for exclusive scan. 4400 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1)); 4401 } 4402 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) { 4403 const Expr *PrivateExpr = Privates[I]; 4404 const Expr *OrigExpr = Shareds[I]; 4405 const Expr *CopyArrayElem = CopyArrayElems[I]; 4406 OpaqueValueMapping IdxMapping( 4407 *this, 4408 cast<OpaqueValueExpr>( 4409 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()), 4410 RValue::get(IdxVal)); 4411 LValue SrcLVal = EmitLValue(CopyArrayElem); 4412 LValue DestLVal = EmitLValue(OrigExpr); 4413 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this), 4414 SrcLVal.getAddress(*this), 4415 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()), 4416 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), 4417 CopyOps[I]); 4418 } 4419 if (!IsInclusive) { 4420 EmitBlock(ExclusiveExitBB); 4421 } 4422 } 4423 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock 4424 : OMPAfterScanBlock); 4425 EmitBlock(OMPAfterScanBlock); 4426 } 4427 4428 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 4429 const CodeGenLoopTy &CodeGenLoop, 4430 Expr *IncExpr) { 4431 // Emit the loop iteration variable. 4432 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 4433 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl()); 4434 EmitVarDecl(*IVDecl); 4435 4436 // Emit the iterations count variable. 4437 // If it is not a variable, Sema decided to calculate iterations count on each 4438 // iteration (e.g., it is foldable into a constant). 4439 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4440 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4441 // Emit calculation of the iterations count. 4442 EmitIgnoredExpr(S.getCalcLastIteration()); 4443 } 4444 4445 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime(); 4446 4447 bool HasLastprivateClause = false; 4448 // Check pre-condition. 4449 { 4450 OMPLoopScope PreInitScope(*this, S); 4451 // Skip the entire loop if we don't meet the precondition. 4452 // If the condition constant folds and can be elided, avoid emitting the 4453 // whole loop. 4454 bool CondConstant; 4455 llvm::BasicBlock *ContBlock = nullptr; 4456 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4457 if (!CondConstant) 4458 return; 4459 } else { 4460 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then"); 4461 ContBlock = createBasicBlock("omp.precond.end"); 4462 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 4463 getProfileCount(&S)); 4464 EmitBlock(ThenBlock); 4465 incrementProfileCounter(&S); 4466 } 4467 4468 emitAlignedClause(*this, S); 4469 // Emit 'then' code. 4470 { 4471 // Emit helper vars inits. 4472 4473 LValue LB = EmitOMPHelperVar( 4474 *this, cast<DeclRefExpr>( 4475 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4476 ? S.getCombinedLowerBoundVariable() 4477 : S.getLowerBoundVariable()))); 4478 LValue UB = EmitOMPHelperVar( 4479 *this, cast<DeclRefExpr>( 4480 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4481 ? S.getCombinedUpperBoundVariable() 4482 : S.getUpperBoundVariable()))); 4483 LValue ST = 4484 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 4485 LValue IL = 4486 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 4487 4488 OMPPrivateScope LoopScope(*this); 4489 if (EmitOMPFirstprivateClause(S, LoopScope)) { 4490 // Emit implicit barrier to synchronize threads and avoid data races 4491 // on initialization of firstprivate variables and post-update of 4492 // lastprivate variables. 4493 CGM.getOpenMPRuntime().emitBarrierCall( 4494 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false, 4495 /*ForceSimpleCall=*/true); 4496 } 4497 EmitOMPPrivateClause(S, LoopScope); 4498 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4499 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4500 !isOpenMPTeamsDirective(S.getDirectiveKind())) 4501 EmitOMPReductionClauseInit(S, LoopScope); 4502 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 4503 EmitOMPPrivateLoopCounters(S, LoopScope); 4504 (void)LoopScope.Privatize(); 4505 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 4506 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S); 4507 4508 // Detect the distribute schedule kind and chunk. 4509 llvm::Value *Chunk = nullptr; 4510 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 4511 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 4512 ScheduleKind = C->getDistScheduleKind(); 4513 if (const Expr *Ch = C->getChunkSize()) { 4514 Chunk = EmitScalarExpr(Ch); 4515 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 4516 S.getIterationVariable()->getType(), 4517 S.getBeginLoc()); 4518 } 4519 } else { 4520 // Default behaviour for dist_schedule clause. 4521 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk( 4522 *this, S, ScheduleKind, Chunk); 4523 } 4524 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 4525 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 4526 4527 // OpenMP [2.10.8, distribute Construct, Description] 4528 // If dist_schedule is specified, kind must be static. If specified, 4529 // iterations are divided into chunks of size chunk_size, chunks are 4530 // assigned to the teams of the league in a round-robin fashion in the 4531 // order of the team number. When no chunk_size is specified, the 4532 // iteration space is divided into chunks that are approximately equal 4533 // in size, and at most one chunk is distributed to each team of the 4534 // league. The size of the chunks is unspecified in this case. 4535 bool StaticChunked = RT.isStaticChunked( 4536 ScheduleKind, /* Chunked */ Chunk != nullptr) && 4537 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()); 4538 if (RT.isStaticNonchunked(ScheduleKind, 4539 /* Chunked */ Chunk != nullptr) || 4540 StaticChunked) { 4541 CGOpenMPRuntime::StaticRTInput StaticInit( 4542 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), 4543 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4544 StaticChunked ? Chunk : nullptr); 4545 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, 4546 StaticInit); 4547 JumpDest LoopExit = 4548 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 4549 // UB = min(UB, GlobalUB); 4550 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4551 ? S.getCombinedEnsureUpperBound() 4552 : S.getEnsureUpperBound()); 4553 // IV = LB; 4554 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4555 ? S.getCombinedInit() 4556 : S.getInit()); 4557 4558 const Expr *Cond = 4559 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 4560 ? S.getCombinedCond() 4561 : S.getCond(); 4562 4563 if (StaticChunked) 4564 Cond = S.getCombinedDistCond(); 4565 4566 // For static unchunked schedules generate: 4567 // 4568 // 1. For distribute alone, codegen 4569 // while (idx <= UB) { 4570 // BODY; 4571 // ++idx; 4572 // } 4573 // 4574 // 2. When combined with 'for' (e.g. as in 'distribute parallel for') 4575 // while (idx <= UB) { 4576 // <CodeGen rest of pragma>(LB, UB); 4577 // idx += ST; 4578 // } 4579 // 4580 // For static chunk one schedule generate: 4581 // 4582 // while (IV <= GlobalUB) { 4583 // <CodeGen rest of pragma>(LB, UB); 4584 // LB += ST; 4585 // UB += ST; 4586 // UB = min(UB, GlobalUB); 4587 // IV = LB; 4588 // } 4589 // 4590 emitCommonSimdLoop( 4591 *this, S, 4592 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4593 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4594 CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true); 4595 }, 4596 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop, 4597 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) { 4598 CGF.EmitOMPInnerLoop( 4599 S, LoopScope.requiresCleanups(), Cond, IncExpr, 4600 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 4601 CodeGenLoop(CGF, S, LoopExit); 4602 }, 4603 [&S, StaticChunked](CodeGenFunction &CGF) { 4604 if (StaticChunked) { 4605 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound()); 4606 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound()); 4607 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound()); 4608 CGF.EmitIgnoredExpr(S.getCombinedInit()); 4609 } 4610 }); 4611 }); 4612 EmitBlock(LoopExit.getBlock()); 4613 // Tell the runtime we are done. 4614 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind()); 4615 } else { 4616 // Emit the outer loop, which requests its work chunk [LB..UB] from 4617 // runtime and runs the inner loop to process it. 4618 const OMPLoopArguments LoopArguments = { 4619 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), 4620 IL.getAddress(*this), Chunk}; 4621 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 4622 CodeGenLoop); 4623 } 4624 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 4625 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) { 4626 return CGF.Builder.CreateIsNotNull( 4627 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4628 }); 4629 } 4630 if (isOpenMPSimdDirective(S.getDirectiveKind()) && 4631 !isOpenMPParallelDirective(S.getDirectiveKind()) && 4632 !isOpenMPTeamsDirective(S.getDirectiveKind())) { 4633 EmitOMPReductionClauseFinal(S, OMPD_simd); 4634 // Emit post-update of the reduction variables if IsLastIter != 0. 4635 emitPostUpdateForReductionClause( 4636 *this, S, [IL, &S](CodeGenFunction &CGF) { 4637 return CGF.Builder.CreateIsNotNull( 4638 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())); 4639 }); 4640 } 4641 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4642 if (HasLastprivateClause) { 4643 EmitOMPLastprivateClauseFinal( 4644 S, /*NoFinals=*/false, 4645 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc()))); 4646 } 4647 } 4648 4649 // We're now done with the loop, so jump to the continuation block. 4650 if (ContBlock) { 4651 EmitBranch(ContBlock); 4652 EmitBlock(ContBlock, true); 4653 } 4654 } 4655 } 4656 4657 void CodeGenFunction::EmitOMPDistributeDirective( 4658 const OMPDistributeDirective &S) { 4659 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 4660 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 4661 }; 4662 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4663 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen); 4664 } 4665 4666 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 4667 const CapturedStmt *S, 4668 SourceLocation Loc) { 4669 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4670 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 4671 CGF.CapturedStmtInfo = &CapStmtInfo; 4672 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc); 4673 Fn->setDoesNotRecurse(); 4674 return Fn; 4675 } 4676 4677 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 4678 if (S.hasClausesOfKind<OMPDependClause>()) { 4679 assert(!S.getAssociatedStmt() && 4680 "No associated statement must be in ordered depend construct."); 4681 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 4682 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 4683 return; 4684 } 4685 const auto *C = S.getSingleClause<OMPSIMDClause>(); 4686 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 4687 PrePostActionTy &Action) { 4688 const CapturedStmt *CS = S.getInnermostCapturedStmt(); 4689 if (C) { 4690 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 4691 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 4692 llvm::Function *OutlinedFn = 4693 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc()); 4694 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(), 4695 OutlinedFn, CapturedVars); 4696 } else { 4697 Action.Enter(CGF); 4698 CGF.EmitStmt(CS->getCapturedStmt()); 4699 } 4700 }; 4701 OMPLexicalScope Scope(*this, S, OMPD_unknown); 4702 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C); 4703 } 4704 4705 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 4706 QualType SrcType, QualType DestType, 4707 SourceLocation Loc) { 4708 assert(CGF.hasScalarEvaluationKind(DestType) && 4709 "DestType must have scalar evaluation kind."); 4710 assert(!Val.isAggregate() && "Must be a scalar or complex."); 4711 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 4712 DestType, Loc) 4713 : CGF.EmitComplexToScalarConversion( 4714 Val.getComplexVal(), SrcType, DestType, Loc); 4715 } 4716 4717 static CodeGenFunction::ComplexPairTy 4718 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 4719 QualType DestType, SourceLocation Loc) { 4720 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 4721 "DestType must have complex evaluation kind."); 4722 CodeGenFunction::ComplexPairTy ComplexVal; 4723 if (Val.isScalar()) { 4724 // Convert the input element to the element type of the complex. 4725 QualType DestElementType = 4726 DestType->castAs<ComplexType>()->getElementType(); 4727 llvm::Value *ScalarVal = CGF.EmitScalarConversion( 4728 Val.getScalarVal(), SrcType, DestElementType, Loc); 4729 ComplexVal = CodeGenFunction::ComplexPairTy( 4730 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 4731 } else { 4732 assert(Val.isComplex() && "Must be a scalar or complex."); 4733 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 4734 QualType DestElementType = 4735 DestType->castAs<ComplexType>()->getElementType(); 4736 ComplexVal.first = CGF.EmitScalarConversion( 4737 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 4738 ComplexVal.second = CGF.EmitScalarConversion( 4739 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 4740 } 4741 return ComplexVal; 4742 } 4743 4744 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4745 LValue LVal, RValue RVal) { 4746 if (LVal.isGlobalReg()) 4747 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 4748 else 4749 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false); 4750 } 4751 4752 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF, 4753 llvm::AtomicOrdering AO, LValue LVal, 4754 SourceLocation Loc) { 4755 if (LVal.isGlobalReg()) 4756 return CGF.EmitLoadOfLValue(LVal, Loc); 4757 return CGF.EmitAtomicLoad( 4758 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO), 4759 LVal.isVolatile()); 4760 } 4761 4762 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 4763 QualType RValTy, SourceLocation Loc) { 4764 switch (getEvaluationKind(LVal.getType())) { 4765 case TEK_Scalar: 4766 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 4767 *this, RVal, RValTy, LVal.getType(), Loc)), 4768 LVal); 4769 break; 4770 case TEK_Complex: 4771 EmitStoreOfComplex( 4772 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 4773 /*isInit=*/false); 4774 break; 4775 case TEK_Aggregate: 4776 llvm_unreachable("Must be a scalar or complex."); 4777 } 4778 } 4779 4780 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO, 4781 const Expr *X, const Expr *V, 4782 SourceLocation Loc) { 4783 // v = x; 4784 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 4785 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 4786 LValue XLValue = CGF.EmitLValue(X); 4787 LValue VLValue = CGF.EmitLValue(V); 4788 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc); 4789 // OpenMP, 2.17.7, atomic Construct 4790 // If the read or capture clause is specified and the acquire, acq_rel, or 4791 // seq_cst clause is specified then the strong flush on exit from the atomic 4792 // operation is also an acquire flush. 4793 switch (AO) { 4794 case llvm::AtomicOrdering::Acquire: 4795 case llvm::AtomicOrdering::AcquireRelease: 4796 case llvm::AtomicOrdering::SequentiallyConsistent: 4797 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4798 llvm::AtomicOrdering::Acquire); 4799 break; 4800 case llvm::AtomicOrdering::Monotonic: 4801 case llvm::AtomicOrdering::Release: 4802 break; 4803 case llvm::AtomicOrdering::NotAtomic: 4804 case llvm::AtomicOrdering::Unordered: 4805 llvm_unreachable("Unexpected ordering."); 4806 } 4807 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 4808 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 4809 } 4810 4811 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF, 4812 llvm::AtomicOrdering AO, const Expr *X, 4813 const Expr *E, SourceLocation Loc) { 4814 // x = expr; 4815 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 4816 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 4817 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4818 // OpenMP, 2.17.7, atomic Construct 4819 // If the write, update, or capture clause is specified and the release, 4820 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4821 // the atomic operation is also a release flush. 4822 switch (AO) { 4823 case llvm::AtomicOrdering::Release: 4824 case llvm::AtomicOrdering::AcquireRelease: 4825 case llvm::AtomicOrdering::SequentiallyConsistent: 4826 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4827 llvm::AtomicOrdering::Release); 4828 break; 4829 case llvm::AtomicOrdering::Acquire: 4830 case llvm::AtomicOrdering::Monotonic: 4831 break; 4832 case llvm::AtomicOrdering::NotAtomic: 4833 case llvm::AtomicOrdering::Unordered: 4834 llvm_unreachable("Unexpected ordering."); 4835 } 4836 } 4837 4838 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 4839 RValue Update, 4840 BinaryOperatorKind BO, 4841 llvm::AtomicOrdering AO, 4842 bool IsXLHSInRHSPart) { 4843 ASTContext &Context = CGF.getContext(); 4844 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 4845 // expression is simple and atomic is allowed for the given type for the 4846 // target platform. 4847 if (BO == BO_Comma || !Update.isScalar() || 4848 !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || 4849 (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 4850 (Update.getScalarVal()->getType() != 4851 X.getAddress(CGF).getElementType())) || 4852 !X.getAddress(CGF).getElementType()->isIntegerTy() || 4853 !Context.getTargetInfo().hasBuiltinAtomic( 4854 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 4855 return std::make_pair(false, RValue::get(nullptr)); 4856 4857 llvm::AtomicRMWInst::BinOp RMWOp; 4858 switch (BO) { 4859 case BO_Add: 4860 RMWOp = llvm::AtomicRMWInst::Add; 4861 break; 4862 case BO_Sub: 4863 if (!IsXLHSInRHSPart) 4864 return std::make_pair(false, RValue::get(nullptr)); 4865 RMWOp = llvm::AtomicRMWInst::Sub; 4866 break; 4867 case BO_And: 4868 RMWOp = llvm::AtomicRMWInst::And; 4869 break; 4870 case BO_Or: 4871 RMWOp = llvm::AtomicRMWInst::Or; 4872 break; 4873 case BO_Xor: 4874 RMWOp = llvm::AtomicRMWInst::Xor; 4875 break; 4876 case BO_LT: 4877 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4878 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 4879 : llvm::AtomicRMWInst::Max) 4880 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 4881 : llvm::AtomicRMWInst::UMax); 4882 break; 4883 case BO_GT: 4884 RMWOp = X.getType()->hasSignedIntegerRepresentation() 4885 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 4886 : llvm::AtomicRMWInst::Min) 4887 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 4888 : llvm::AtomicRMWInst::UMin); 4889 break; 4890 case BO_Assign: 4891 RMWOp = llvm::AtomicRMWInst::Xchg; 4892 break; 4893 case BO_Mul: 4894 case BO_Div: 4895 case BO_Rem: 4896 case BO_Shl: 4897 case BO_Shr: 4898 case BO_LAnd: 4899 case BO_LOr: 4900 return std::make_pair(false, RValue::get(nullptr)); 4901 case BO_PtrMemD: 4902 case BO_PtrMemI: 4903 case BO_LE: 4904 case BO_GE: 4905 case BO_EQ: 4906 case BO_NE: 4907 case BO_Cmp: 4908 case BO_AddAssign: 4909 case BO_SubAssign: 4910 case BO_AndAssign: 4911 case BO_OrAssign: 4912 case BO_XorAssign: 4913 case BO_MulAssign: 4914 case BO_DivAssign: 4915 case BO_RemAssign: 4916 case BO_ShlAssign: 4917 case BO_ShrAssign: 4918 case BO_Comma: 4919 llvm_unreachable("Unsupported atomic update operation"); 4920 } 4921 llvm::Value *UpdateVal = Update.getScalarVal(); 4922 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 4923 UpdateVal = CGF.Builder.CreateIntCast( 4924 IC, X.getAddress(CGF).getElementType(), 4925 X.getType()->hasSignedIntegerRepresentation()); 4926 } 4927 llvm::Value *Res = 4928 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); 4929 return std::make_pair(true, RValue::get(Res)); 4930 } 4931 4932 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 4933 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 4934 llvm::AtomicOrdering AO, SourceLocation Loc, 4935 const llvm::function_ref<RValue(RValue)> CommonGen) { 4936 // Update expressions are allowed to have the following forms: 4937 // x binop= expr; -> xrval + expr; 4938 // x++, ++x -> xrval + 1; 4939 // x--, --x -> xrval - 1; 4940 // x = x binop expr; -> xrval binop expr 4941 // x = expr Op x; - > expr binop xrval; 4942 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 4943 if (!Res.first) { 4944 if (X.isGlobalReg()) { 4945 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 4946 // 'xrval'. 4947 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 4948 } else { 4949 // Perform compare-and-swap procedure. 4950 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 4951 } 4952 } 4953 return Res; 4954 } 4955 4956 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF, 4957 llvm::AtomicOrdering AO, const Expr *X, 4958 const Expr *E, const Expr *UE, 4959 bool IsXLHSInRHSPart, SourceLocation Loc) { 4960 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 4961 "Update expr in 'atomic update' must be a binary operator."); 4962 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 4963 // Update expressions are allowed to have the following forms: 4964 // x binop= expr; -> xrval + expr; 4965 // x++, ++x -> xrval + 1; 4966 // x--, --x -> xrval - 1; 4967 // x = x binop expr; -> xrval binop expr 4968 // x = expr Op x; - > expr binop xrval; 4969 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 4970 LValue XLValue = CGF.EmitLValue(X); 4971 RValue ExprRValue = CGF.EmitAnyExpr(E); 4972 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 4973 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 4974 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 4975 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 4976 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) { 4977 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 4978 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 4979 return CGF.EmitAnyExpr(UE); 4980 }; 4981 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 4982 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 4983 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 4984 // OpenMP, 2.17.7, atomic Construct 4985 // If the write, update, or capture clause is specified and the release, 4986 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 4987 // the atomic operation is also a release flush. 4988 switch (AO) { 4989 case llvm::AtomicOrdering::Release: 4990 case llvm::AtomicOrdering::AcquireRelease: 4991 case llvm::AtomicOrdering::SequentiallyConsistent: 4992 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 4993 llvm::AtomicOrdering::Release); 4994 break; 4995 case llvm::AtomicOrdering::Acquire: 4996 case llvm::AtomicOrdering::Monotonic: 4997 break; 4998 case llvm::AtomicOrdering::NotAtomic: 4999 case llvm::AtomicOrdering::Unordered: 5000 llvm_unreachable("Unexpected ordering."); 5001 } 5002 } 5003 5004 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 5005 QualType SourceType, QualType ResType, 5006 SourceLocation Loc) { 5007 switch (CGF.getEvaluationKind(ResType)) { 5008 case TEK_Scalar: 5009 return RValue::get( 5010 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 5011 case TEK_Complex: { 5012 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 5013 return RValue::getComplex(Res.first, Res.second); 5014 } 5015 case TEK_Aggregate: 5016 break; 5017 } 5018 llvm_unreachable("Must be a scalar or complex."); 5019 } 5020 5021 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF, 5022 llvm::AtomicOrdering AO, 5023 bool IsPostfixUpdate, const Expr *V, 5024 const Expr *X, const Expr *E, 5025 const Expr *UE, bool IsXLHSInRHSPart, 5026 SourceLocation Loc) { 5027 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 5028 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 5029 RValue NewVVal; 5030 LValue VLValue = CGF.EmitLValue(V); 5031 LValue XLValue = CGF.EmitLValue(X); 5032 RValue ExprRValue = CGF.EmitAnyExpr(E); 5033 QualType NewVValType; 5034 if (UE) { 5035 // 'x' is updated with some additional value. 5036 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 5037 "Update expr in 'atomic capture' must be a binary operator."); 5038 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 5039 // Update expressions are allowed to have the following forms: 5040 // x binop= expr; -> xrval + expr; 5041 // x++, ++x -> xrval + 1; 5042 // x--, --x -> xrval - 1; 5043 // x = x binop expr; -> xrval binop expr 5044 // x = expr Op x; - > expr binop xrval; 5045 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 5046 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 5047 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 5048 NewVValType = XRValExpr->getType(); 5049 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 5050 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 5051 IsPostfixUpdate](RValue XRValue) { 5052 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5053 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 5054 RValue Res = CGF.EmitAnyExpr(UE); 5055 NewVVal = IsPostfixUpdate ? XRValue : Res; 5056 return Res; 5057 }; 5058 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5059 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 5060 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5061 if (Res.first) { 5062 // 'atomicrmw' instruction was generated. 5063 if (IsPostfixUpdate) { 5064 // Use old value from 'atomicrmw'. 5065 NewVVal = Res.second; 5066 } else { 5067 // 'atomicrmw' does not provide new value, so evaluate it using old 5068 // value of 'x'. 5069 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 5070 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 5071 NewVVal = CGF.EmitAnyExpr(UE); 5072 } 5073 } 5074 } else { 5075 // 'x' is simply rewritten with some 'expr'. 5076 NewVValType = X->getType().getNonReferenceType(); 5077 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 5078 X->getType().getNonReferenceType(), Loc); 5079 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) { 5080 NewVVal = XRValue; 5081 return ExprRValue; 5082 }; 5083 // Try to perform atomicrmw xchg, otherwise simple exchange. 5084 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 5085 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 5086 Loc, Gen); 5087 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X); 5088 if (Res.first) { 5089 // 'atomicrmw' instruction was generated. 5090 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 5091 } 5092 } 5093 // Emit post-update store to 'v' of old/new 'x' value. 5094 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 5095 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V); 5096 // OpenMP, 2.17.7, atomic Construct 5097 // If the write, update, or capture clause is specified and the release, 5098 // acq_rel, or seq_cst clause is specified then the strong flush on entry to 5099 // the atomic operation is also a release flush. 5100 // If the read or capture clause is specified and the acquire, acq_rel, or 5101 // seq_cst clause is specified then the strong flush on exit from the atomic 5102 // operation is also an acquire flush. 5103 switch (AO) { 5104 case llvm::AtomicOrdering::Release: 5105 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5106 llvm::AtomicOrdering::Release); 5107 break; 5108 case llvm::AtomicOrdering::Acquire: 5109 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5110 llvm::AtomicOrdering::Acquire); 5111 break; 5112 case llvm::AtomicOrdering::AcquireRelease: 5113 case llvm::AtomicOrdering::SequentiallyConsistent: 5114 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc, 5115 llvm::AtomicOrdering::AcquireRelease); 5116 break; 5117 case llvm::AtomicOrdering::Monotonic: 5118 break; 5119 case llvm::AtomicOrdering::NotAtomic: 5120 case llvm::AtomicOrdering::Unordered: 5121 llvm_unreachable("Unexpected ordering."); 5122 } 5123 } 5124 5125 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 5126 llvm::AtomicOrdering AO, bool IsPostfixUpdate, 5127 const Expr *X, const Expr *V, const Expr *E, 5128 const Expr *UE, bool IsXLHSInRHSPart, 5129 SourceLocation Loc) { 5130 switch (Kind) { 5131 case OMPC_read: 5132 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc); 5133 break; 5134 case OMPC_write: 5135 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc); 5136 break; 5137 case OMPC_unknown: 5138 case OMPC_update: 5139 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc); 5140 break; 5141 case OMPC_capture: 5142 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE, 5143 IsXLHSInRHSPart, Loc); 5144 break; 5145 case OMPC_if: 5146 case OMPC_final: 5147 case OMPC_num_threads: 5148 case OMPC_private: 5149 case OMPC_firstprivate: 5150 case OMPC_lastprivate: 5151 case OMPC_reduction: 5152 case OMPC_task_reduction: 5153 case OMPC_in_reduction: 5154 case OMPC_safelen: 5155 case OMPC_simdlen: 5156 case OMPC_allocator: 5157 case OMPC_allocate: 5158 case OMPC_collapse: 5159 case OMPC_default: 5160 case OMPC_seq_cst: 5161 case OMPC_acq_rel: 5162 case OMPC_acquire: 5163 case OMPC_release: 5164 case OMPC_relaxed: 5165 case OMPC_shared: 5166 case OMPC_linear: 5167 case OMPC_aligned: 5168 case OMPC_copyin: 5169 case OMPC_copyprivate: 5170 case OMPC_flush: 5171 case OMPC_depobj: 5172 case OMPC_proc_bind: 5173 case OMPC_schedule: 5174 case OMPC_ordered: 5175 case OMPC_nowait: 5176 case OMPC_untied: 5177 case OMPC_threadprivate: 5178 case OMPC_depend: 5179 case OMPC_mergeable: 5180 case OMPC_device: 5181 case OMPC_threads: 5182 case OMPC_simd: 5183 case OMPC_map: 5184 case OMPC_num_teams: 5185 case OMPC_thread_limit: 5186 case OMPC_priority: 5187 case OMPC_grainsize: 5188 case OMPC_nogroup: 5189 case OMPC_num_tasks: 5190 case OMPC_hint: 5191 case OMPC_dist_schedule: 5192 case OMPC_defaultmap: 5193 case OMPC_uniform: 5194 case OMPC_to: 5195 case OMPC_from: 5196 case OMPC_use_device_ptr: 5197 case OMPC_use_device_addr: 5198 case OMPC_is_device_ptr: 5199 case OMPC_unified_address: 5200 case OMPC_unified_shared_memory: 5201 case OMPC_reverse_offload: 5202 case OMPC_dynamic_allocators: 5203 case OMPC_atomic_default_mem_order: 5204 case OMPC_device_type: 5205 case OMPC_match: 5206 case OMPC_nontemporal: 5207 case OMPC_order: 5208 case OMPC_destroy: 5209 case OMPC_detach: 5210 case OMPC_inclusive: 5211 case OMPC_exclusive: 5212 case OMPC_uses_allocators: 5213 case OMPC_affinity: 5214 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 5215 } 5216 } 5217 5218 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 5219 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic; 5220 bool MemOrderingSpecified = false; 5221 if (S.getSingleClause<OMPSeqCstClause>()) { 5222 AO = llvm::AtomicOrdering::SequentiallyConsistent; 5223 MemOrderingSpecified = true; 5224 } else if (S.getSingleClause<OMPAcqRelClause>()) { 5225 AO = llvm::AtomicOrdering::AcquireRelease; 5226 MemOrderingSpecified = true; 5227 } else if (S.getSingleClause<OMPAcquireClause>()) { 5228 AO = llvm::AtomicOrdering::Acquire; 5229 MemOrderingSpecified = true; 5230 } else if (S.getSingleClause<OMPReleaseClause>()) { 5231 AO = llvm::AtomicOrdering::Release; 5232 MemOrderingSpecified = true; 5233 } else if (S.getSingleClause<OMPRelaxedClause>()) { 5234 AO = llvm::AtomicOrdering::Monotonic; 5235 MemOrderingSpecified = true; 5236 } 5237 OpenMPClauseKind Kind = OMPC_unknown; 5238 for (const OMPClause *C : S.clauses()) { 5239 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause, 5240 // if it is first). 5241 if (C->getClauseKind() != OMPC_seq_cst && 5242 C->getClauseKind() != OMPC_acq_rel && 5243 C->getClauseKind() != OMPC_acquire && 5244 C->getClauseKind() != OMPC_release && 5245 C->getClauseKind() != OMPC_relaxed) { 5246 Kind = C->getClauseKind(); 5247 break; 5248 } 5249 } 5250 if (!MemOrderingSpecified) { 5251 llvm::AtomicOrdering DefaultOrder = 5252 CGM.getOpenMPRuntime().getDefaultMemoryOrdering(); 5253 if (DefaultOrder == llvm::AtomicOrdering::Monotonic || 5254 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent || 5255 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease && 5256 Kind == OMPC_capture)) { 5257 AO = DefaultOrder; 5258 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) { 5259 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) { 5260 AO = llvm::AtomicOrdering::Release; 5261 } else if (Kind == OMPC_read) { 5262 assert(Kind == OMPC_read && "Unexpected atomic kind."); 5263 AO = llvm::AtomicOrdering::Acquire; 5264 } 5265 } 5266 } 5267 5268 const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers(); 5269 5270 auto &&CodeGen = [&S, Kind, AO, CS](CodeGenFunction &CGF, 5271 PrePostActionTy &) { 5272 CGF.EmitStopPoint(CS); 5273 emitOMPAtomicExpr(CGF, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(), 5274 S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(), 5275 S.getBeginLoc()); 5276 }; 5277 OMPLexicalScope Scope(*this, S, OMPD_unknown); 5278 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 5279 } 5280 5281 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 5282 const OMPExecutableDirective &S, 5283 const RegionCodeGenTy &CodeGen) { 5284 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 5285 CodeGenModule &CGM = CGF.CGM; 5286 5287 // On device emit this construct as inlined code. 5288 if (CGM.getLangOpts().OpenMPIsDevice) { 5289 OMPLexicalScope Scope(CGF, S, OMPD_target); 5290 CGM.getOpenMPRuntime().emitInlinedDirective( 5291 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5292 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5293 }); 5294 return; 5295 } 5296 5297 auto LPCRegion = 5298 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S); 5299 llvm::Function *Fn = nullptr; 5300 llvm::Constant *FnID = nullptr; 5301 5302 const Expr *IfCond = nullptr; 5303 // Check for the at most one if clause associated with the target region. 5304 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5305 if (C->getNameModifier() == OMPD_unknown || 5306 C->getNameModifier() == OMPD_target) { 5307 IfCond = C->getCondition(); 5308 break; 5309 } 5310 } 5311 5312 // Check if we have any device clause associated with the directive. 5313 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device( 5314 nullptr, OMPC_DEVICE_unknown); 5315 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 5316 Device.setPointerAndInt(C->getDevice(), C->getModifier()); 5317 5318 // Check if we have an if clause whose conditional always evaluates to false 5319 // or if we do not have any targets specified. If so the target region is not 5320 // an offload entry point. 5321 bool IsOffloadEntry = true; 5322 if (IfCond) { 5323 bool Val; 5324 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 5325 IsOffloadEntry = false; 5326 } 5327 if (CGM.getLangOpts().OMPTargetTriples.empty()) 5328 IsOffloadEntry = false; 5329 5330 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 5331 StringRef ParentName; 5332 // In case we have Ctors/Dtors we use the complete type variant to produce 5333 // the mangling of the device outlined kernel. 5334 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 5335 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 5336 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 5337 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 5338 else 5339 ParentName = 5340 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 5341 5342 // Emit target region as a standalone region. 5343 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 5344 IsOffloadEntry, CodeGen); 5345 OMPLexicalScope Scope(CGF, S, OMPD_task); 5346 auto &&SizeEmitter = 5347 [IsOffloadEntry](CodeGenFunction &CGF, 5348 const OMPLoopDirective &D) -> llvm::Value * { 5349 if (IsOffloadEntry) { 5350 OMPLoopScope(CGF, D); 5351 // Emit calculation of the iterations count. 5352 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations()); 5353 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty, 5354 /*isSigned=*/false); 5355 return NumIterations; 5356 } 5357 return nullptr; 5358 }; 5359 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 5360 SizeEmitter); 5361 } 5362 5363 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 5364 PrePostActionTy &Action) { 5365 Action.Enter(CGF); 5366 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5367 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5368 CGF.EmitOMPPrivateClause(S, PrivateScope); 5369 (void)PrivateScope.Privatize(); 5370 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5371 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5372 5373 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt()); 5374 } 5375 5376 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 5377 StringRef ParentName, 5378 const OMPTargetDirective &S) { 5379 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5380 emitTargetRegion(CGF, S, Action); 5381 }; 5382 llvm::Function *Fn; 5383 llvm::Constant *Addr; 5384 // Emit target region as a standalone region. 5385 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5386 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5387 assert(Fn && Addr && "Target device function emission failed."); 5388 } 5389 5390 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 5391 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5392 emitTargetRegion(CGF, S, Action); 5393 }; 5394 emitCommonOMPTargetDirective(*this, S, CodeGen); 5395 } 5396 5397 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 5398 const OMPExecutableDirective &S, 5399 OpenMPDirectiveKind InnermostKind, 5400 const RegionCodeGenTy &CodeGen) { 5401 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 5402 llvm::Function *OutlinedFn = 5403 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 5404 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 5405 5406 const auto *NT = S.getSingleClause<OMPNumTeamsClause>(); 5407 const auto *TL = S.getSingleClause<OMPThreadLimitClause>(); 5408 if (NT || TL) { 5409 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr; 5410 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr; 5411 5412 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 5413 S.getBeginLoc()); 5414 } 5415 5416 OMPTeamsScope Scope(CGF, S); 5417 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 5418 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 5419 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn, 5420 CapturedVars); 5421 } 5422 5423 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 5424 // Emit teams region as a standalone region. 5425 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5426 Action.Enter(CGF); 5427 OMPPrivateScope PrivateScope(CGF); 5428 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5429 CGF.EmitOMPPrivateClause(S, PrivateScope); 5430 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5431 (void)PrivateScope.Privatize(); 5432 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt()); 5433 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5434 }; 5435 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5436 emitPostUpdateForReductionClause(*this, S, 5437 [](CodeGenFunction &) { return nullptr; }); 5438 } 5439 5440 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5441 const OMPTargetTeamsDirective &S) { 5442 auto *CS = S.getCapturedStmt(OMPD_teams); 5443 Action.Enter(CGF); 5444 // Emit teams region as a standalone region. 5445 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 5446 Action.Enter(CGF); 5447 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5448 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 5449 CGF.EmitOMPPrivateClause(S, PrivateScope); 5450 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5451 (void)PrivateScope.Privatize(); 5452 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 5453 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 5454 CGF.EmitStmt(CS->getCapturedStmt()); 5455 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5456 }; 5457 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 5458 emitPostUpdateForReductionClause(CGF, S, 5459 [](CodeGenFunction &) { return nullptr; }); 5460 } 5461 5462 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 5463 CodeGenModule &CGM, StringRef ParentName, 5464 const OMPTargetTeamsDirective &S) { 5465 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5466 emitTargetTeamsRegion(CGF, Action, S); 5467 }; 5468 llvm::Function *Fn; 5469 llvm::Constant *Addr; 5470 // Emit target region as a standalone region. 5471 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5472 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5473 assert(Fn && Addr && "Target device function emission failed."); 5474 } 5475 5476 void CodeGenFunction::EmitOMPTargetTeamsDirective( 5477 const OMPTargetTeamsDirective &S) { 5478 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5479 emitTargetTeamsRegion(CGF, Action, S); 5480 }; 5481 emitCommonOMPTargetDirective(*this, S, CodeGen); 5482 } 5483 5484 static void 5485 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 5486 const OMPTargetTeamsDistributeDirective &S) { 5487 Action.Enter(CGF); 5488 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5489 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5490 }; 5491 5492 // Emit teams region as a standalone region. 5493 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5494 PrePostActionTy &Action) { 5495 Action.Enter(CGF); 5496 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5497 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5498 (void)PrivateScope.Privatize(); 5499 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5500 CodeGenDistribute); 5501 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5502 }; 5503 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen); 5504 emitPostUpdateForReductionClause(CGF, S, 5505 [](CodeGenFunction &) { return nullptr; }); 5506 } 5507 5508 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 5509 CodeGenModule &CGM, StringRef ParentName, 5510 const OMPTargetTeamsDistributeDirective &S) { 5511 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5512 emitTargetTeamsDistributeRegion(CGF, Action, S); 5513 }; 5514 llvm::Function *Fn; 5515 llvm::Constant *Addr; 5516 // Emit target region as a standalone region. 5517 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5518 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5519 assert(Fn && Addr && "Target device function emission failed."); 5520 } 5521 5522 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 5523 const OMPTargetTeamsDistributeDirective &S) { 5524 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5525 emitTargetTeamsDistributeRegion(CGF, Action, S); 5526 }; 5527 emitCommonOMPTargetDirective(*this, S, CodeGen); 5528 } 5529 5530 static void emitTargetTeamsDistributeSimdRegion( 5531 CodeGenFunction &CGF, PrePostActionTy &Action, 5532 const OMPTargetTeamsDistributeSimdDirective &S) { 5533 Action.Enter(CGF); 5534 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5535 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5536 }; 5537 5538 // Emit teams region as a standalone region. 5539 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5540 PrePostActionTy &Action) { 5541 Action.Enter(CGF); 5542 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5543 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5544 (void)PrivateScope.Privatize(); 5545 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5546 CodeGenDistribute); 5547 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5548 }; 5549 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen); 5550 emitPostUpdateForReductionClause(CGF, S, 5551 [](CodeGenFunction &) { return nullptr; }); 5552 } 5553 5554 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 5555 CodeGenModule &CGM, StringRef ParentName, 5556 const OMPTargetTeamsDistributeSimdDirective &S) { 5557 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5558 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5559 }; 5560 llvm::Function *Fn; 5561 llvm::Constant *Addr; 5562 // Emit target region as a standalone region. 5563 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5564 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5565 assert(Fn && Addr && "Target device function emission failed."); 5566 } 5567 5568 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 5569 const OMPTargetTeamsDistributeSimdDirective &S) { 5570 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5571 emitTargetTeamsDistributeSimdRegion(CGF, Action, S); 5572 }; 5573 emitCommonOMPTargetDirective(*this, S, CodeGen); 5574 } 5575 5576 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 5577 const OMPTeamsDistributeDirective &S) { 5578 5579 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5580 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5581 }; 5582 5583 // Emit teams region as a standalone region. 5584 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5585 PrePostActionTy &Action) { 5586 Action.Enter(CGF); 5587 OMPPrivateScope PrivateScope(CGF); 5588 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5589 (void)PrivateScope.Privatize(); 5590 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5591 CodeGenDistribute); 5592 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5593 }; 5594 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen); 5595 emitPostUpdateForReductionClause(*this, S, 5596 [](CodeGenFunction &) { return nullptr; }); 5597 } 5598 5599 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 5600 const OMPTeamsDistributeSimdDirective &S) { 5601 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5602 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 5603 }; 5604 5605 // Emit teams region as a standalone region. 5606 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5607 PrePostActionTy &Action) { 5608 Action.Enter(CGF); 5609 OMPPrivateScope PrivateScope(CGF); 5610 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5611 (void)PrivateScope.Privatize(); 5612 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, 5613 CodeGenDistribute); 5614 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5615 }; 5616 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen); 5617 emitPostUpdateForReductionClause(*this, S, 5618 [](CodeGenFunction &) { return nullptr; }); 5619 } 5620 5621 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 5622 const OMPTeamsDistributeParallelForDirective &S) { 5623 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5624 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5625 S.getDistInc()); 5626 }; 5627 5628 // Emit teams region as a standalone region. 5629 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5630 PrePostActionTy &Action) { 5631 Action.Enter(CGF); 5632 OMPPrivateScope PrivateScope(CGF); 5633 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5634 (void)PrivateScope.Privatize(); 5635 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, 5636 CodeGenDistribute); 5637 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5638 }; 5639 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen); 5640 emitPostUpdateForReductionClause(*this, S, 5641 [](CodeGenFunction &) { return nullptr; }); 5642 } 5643 5644 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 5645 const OMPTeamsDistributeParallelForSimdDirective &S) { 5646 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5647 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5648 S.getDistInc()); 5649 }; 5650 5651 // Emit teams region as a standalone region. 5652 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5653 PrePostActionTy &Action) { 5654 Action.Enter(CGF); 5655 OMPPrivateScope PrivateScope(CGF); 5656 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5657 (void)PrivateScope.Privatize(); 5658 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5659 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5660 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5661 }; 5662 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd, 5663 CodeGen); 5664 emitPostUpdateForReductionClause(*this, S, 5665 [](CodeGenFunction &) { return nullptr; }); 5666 } 5667 5668 static void emitTargetTeamsDistributeParallelForRegion( 5669 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S, 5670 PrePostActionTy &Action) { 5671 Action.Enter(CGF); 5672 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5673 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5674 S.getDistInc()); 5675 }; 5676 5677 // Emit teams region as a standalone region. 5678 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5679 PrePostActionTy &Action) { 5680 Action.Enter(CGF); 5681 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5682 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5683 (void)PrivateScope.Privatize(); 5684 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5685 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5686 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5687 }; 5688 5689 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for, 5690 CodeGenTeams); 5691 emitPostUpdateForReductionClause(CGF, S, 5692 [](CodeGenFunction &) { return nullptr; }); 5693 } 5694 5695 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 5696 CodeGenModule &CGM, StringRef ParentName, 5697 const OMPTargetTeamsDistributeParallelForDirective &S) { 5698 // Emit SPMD target teams distribute parallel for region as a standalone 5699 // region. 5700 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5701 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5702 }; 5703 llvm::Function *Fn; 5704 llvm::Constant *Addr; 5705 // Emit target region as a standalone region. 5706 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5707 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5708 assert(Fn && Addr && "Target device function emission failed."); 5709 } 5710 5711 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 5712 const OMPTargetTeamsDistributeParallelForDirective &S) { 5713 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5714 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action); 5715 }; 5716 emitCommonOMPTargetDirective(*this, S, CodeGen); 5717 } 5718 5719 static void emitTargetTeamsDistributeParallelForSimdRegion( 5720 CodeGenFunction &CGF, 5721 const OMPTargetTeamsDistributeParallelForSimdDirective &S, 5722 PrePostActionTy &Action) { 5723 Action.Enter(CGF); 5724 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5725 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 5726 S.getDistInc()); 5727 }; 5728 5729 // Emit teams region as a standalone region. 5730 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF, 5731 PrePostActionTy &Action) { 5732 Action.Enter(CGF); 5733 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5734 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 5735 (void)PrivateScope.Privatize(); 5736 CGF.CGM.getOpenMPRuntime().emitInlinedDirective( 5737 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false); 5738 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 5739 }; 5740 5741 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd, 5742 CodeGenTeams); 5743 emitPostUpdateForReductionClause(CGF, S, 5744 [](CodeGenFunction &) { return nullptr; }); 5745 } 5746 5747 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 5748 CodeGenModule &CGM, StringRef ParentName, 5749 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5750 // Emit SPMD target teams distribute parallel for simd region as a standalone 5751 // region. 5752 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5753 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5754 }; 5755 llvm::Function *Fn; 5756 llvm::Constant *Addr; 5757 // Emit target region as a standalone region. 5758 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 5759 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 5760 assert(Fn && Addr && "Target device function emission failed."); 5761 } 5762 5763 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 5764 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 5765 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 5766 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action); 5767 }; 5768 emitCommonOMPTargetDirective(*this, S, CodeGen); 5769 } 5770 5771 void CodeGenFunction::EmitOMPCancellationPointDirective( 5772 const OMPCancellationPointDirective &S) { 5773 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(), 5774 S.getCancelRegion()); 5775 } 5776 5777 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 5778 const Expr *IfCond = nullptr; 5779 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 5780 if (C->getNameModifier() == OMPD_unknown || 5781 C->getNameModifier() == OMPD_cancel) { 5782 IfCond = C->getCondition(); 5783 break; 5784 } 5785 } 5786 if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder()) { 5787 // TODO: This check is necessary as we only generate `omp parallel` through 5788 // the OpenMPIRBuilder for now. 5789 if (S.getCancelRegion() == OMPD_parallel) { 5790 llvm::Value *IfCondition = nullptr; 5791 if (IfCond) 5792 IfCondition = EmitScalarExpr(IfCond, 5793 /*IgnoreResultAssign=*/true); 5794 return Builder.restoreIP( 5795 OMPBuilder->CreateCancel(Builder, IfCondition, S.getCancelRegion())); 5796 } 5797 } 5798 5799 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond, 5800 S.getCancelRegion()); 5801 } 5802 5803 CodeGenFunction::JumpDest 5804 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 5805 if (Kind == OMPD_parallel || Kind == OMPD_task || 5806 Kind == OMPD_target_parallel || Kind == OMPD_taskloop || 5807 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop) 5808 return ReturnBlock; 5809 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 5810 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 5811 Kind == OMPD_distribute_parallel_for || 5812 Kind == OMPD_target_parallel_for || 5813 Kind == OMPD_teams_distribute_parallel_for || 5814 Kind == OMPD_target_teams_distribute_parallel_for); 5815 return OMPCancelStack.getExitBlock(); 5816 } 5817 5818 void CodeGenFunction::EmitOMPUseDevicePtrClause( 5819 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, 5820 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5821 auto OrigVarIt = C.varlist_begin(); 5822 auto InitIt = C.inits().begin(); 5823 for (const Expr *PvtVarIt : C.private_copies()) { 5824 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 5825 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 5826 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 5827 5828 // In order to identify the right initializer we need to match the 5829 // declaration used by the mapping logic. In some cases we may get 5830 // OMPCapturedExprDecl that refers to the original declaration. 5831 const ValueDecl *MatchingVD = OrigVD; 5832 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5833 // OMPCapturedExprDecl are used to privative fields of the current 5834 // structure. 5835 const auto *ME = cast<MemberExpr>(OED->getInit()); 5836 assert(isa<CXXThisExpr>(ME->getBase()) && 5837 "Base should be the current struct!"); 5838 MatchingVD = ME->getMemberDecl(); 5839 } 5840 5841 // If we don't have information about the current list item, move on to 5842 // the next one. 5843 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5844 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5845 continue; 5846 5847 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD, 5848 InitAddrIt, InitVD, 5849 PvtVD]() { 5850 // Initialize the temporary initialization variable with the address we 5851 // get from the runtime library. We have to cast the source address 5852 // because it is always a void *. References are materialized in the 5853 // privatization scope, so the initialization here disregards the fact 5854 // the original variable is a reference. 5855 QualType AddrQTy = 5856 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 5857 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 5858 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 5859 setAddrOfLocalVar(InitVD, InitAddr); 5860 5861 // Emit private declaration, it will be initialized by the value we 5862 // declaration we just added to the local declarations map. 5863 EmitDecl(*PvtVD); 5864 5865 // The initialization variables reached its purpose in the emission 5866 // of the previous declaration, so we don't need it anymore. 5867 LocalDeclMap.erase(InitVD); 5868 5869 // Return the address of the private variable. 5870 return GetAddrOfLocalVar(PvtVD); 5871 }); 5872 assert(IsRegistered && "firstprivate var already registered as private"); 5873 // Silence the warning about unused variable. 5874 (void)IsRegistered; 5875 5876 ++OrigVarIt; 5877 ++InitIt; 5878 } 5879 } 5880 5881 static const VarDecl *getBaseDecl(const Expr *Ref) { 5882 const Expr *Base = Ref->IgnoreParenImpCasts(); 5883 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base)) 5884 Base = OASE->getBase()->IgnoreParenImpCasts(); 5885 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base)) 5886 Base = ASE->getBase()->IgnoreParenImpCasts(); 5887 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl()); 5888 } 5889 5890 void CodeGenFunction::EmitOMPUseDeviceAddrClause( 5891 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope, 5892 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 5893 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed; 5894 for (const Expr *Ref : C.varlists()) { 5895 const VarDecl *OrigVD = getBaseDecl(Ref); 5896 if (!Processed.insert(OrigVD).second) 5897 continue; 5898 // In order to identify the right initializer we need to match the 5899 // declaration used by the mapping logic. In some cases we may get 5900 // OMPCapturedExprDecl that refers to the original declaration. 5901 const ValueDecl *MatchingVD = OrigVD; 5902 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 5903 // OMPCapturedExprDecl are used to privative fields of the current 5904 // structure. 5905 const auto *ME = cast<MemberExpr>(OED->getInit()); 5906 assert(isa<CXXThisExpr>(ME->getBase()) && 5907 "Base should be the current struct!"); 5908 MatchingVD = ME->getMemberDecl(); 5909 } 5910 5911 // If we don't have information about the current list item, move on to 5912 // the next one. 5913 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 5914 if (InitAddrIt == CaptureDeviceAddrMap.end()) 5915 continue; 5916 5917 Address PrivAddr = InitAddrIt->getSecond(); 5918 // For declrefs and variable length array need to load the pointer for 5919 // correct mapping, since the pointer to the data was passed to the runtime. 5920 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) || 5921 MatchingVD->getType()->isArrayType()) 5922 PrivAddr = 5923 EmitLoadOfPointer(PrivAddr, getContext() 5924 .getPointerType(OrigVD->getType()) 5925 ->castAs<PointerType>()); 5926 llvm::Type *RealTy = 5927 ConvertTypeForMem(OrigVD->getType().getNonReferenceType()) 5928 ->getPointerTo(); 5929 PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy); 5930 5931 (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; }); 5932 } 5933 } 5934 5935 // Generate the instructions for '#pragma omp target data' directive. 5936 void CodeGenFunction::EmitOMPTargetDataDirective( 5937 const OMPTargetDataDirective &S) { 5938 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 5939 5940 // Create a pre/post action to signal the privatization of the device pointer. 5941 // This action can be replaced by the OpenMP runtime code generation to 5942 // deactivate privatization. 5943 bool PrivatizeDevicePointers = false; 5944 class DevicePointerPrivActionTy : public PrePostActionTy { 5945 bool &PrivatizeDevicePointers; 5946 5947 public: 5948 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 5949 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 5950 void Enter(CodeGenFunction &CGF) override { 5951 PrivatizeDevicePointers = true; 5952 } 5953 }; 5954 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 5955 5956 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 5957 CodeGenFunction &CGF, PrePostActionTy &Action) { 5958 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 5959 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt()); 5960 }; 5961 5962 // Codegen that selects whether to generate the privatization code or not. 5963 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 5964 &InnermostCodeGen](CodeGenFunction &CGF, 5965 PrePostActionTy &Action) { 5966 RegionCodeGenTy RCG(InnermostCodeGen); 5967 PrivatizeDevicePointers = false; 5968 5969 // Call the pre-action to change the status of PrivatizeDevicePointers if 5970 // needed. 5971 Action.Enter(CGF); 5972 5973 if (PrivatizeDevicePointers) { 5974 OMPPrivateScope PrivateScope(CGF); 5975 // Emit all instances of the use_device_ptr clause. 5976 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 5977 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 5978 Info.CaptureDeviceAddrMap); 5979 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>()) 5980 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope, 5981 Info.CaptureDeviceAddrMap); 5982 (void)PrivateScope.Privatize(); 5983 RCG(CGF); 5984 } else { 5985 RCG(CGF); 5986 } 5987 }; 5988 5989 // Forward the provided action to the privatization codegen. 5990 RegionCodeGenTy PrivRCG(PrivCodeGen); 5991 PrivRCG.setAction(Action); 5992 5993 // Notwithstanding the body of the region is emitted as inlined directive, 5994 // we don't use an inline scope as changes in the references inside the 5995 // region are expected to be visible outside, so we do not privative them. 5996 OMPLexicalScope Scope(CGF, S); 5997 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 5998 PrivRCG); 5999 }; 6000 6001 RegionCodeGenTy RCG(CodeGen); 6002 6003 // If we don't have target devices, don't bother emitting the data mapping 6004 // code. 6005 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 6006 RCG(*this); 6007 return; 6008 } 6009 6010 // Check if we have any if clause associated with the directive. 6011 const Expr *IfCond = nullptr; 6012 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6013 IfCond = C->getCondition(); 6014 6015 // Check if we have any device clause associated with the directive. 6016 const Expr *Device = nullptr; 6017 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6018 Device = C->getDevice(); 6019 6020 // Set the action to signal privatization of device pointers. 6021 RCG.setAction(PrivAction); 6022 6023 // Emit region code. 6024 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 6025 Info); 6026 } 6027 6028 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 6029 const OMPTargetEnterDataDirective &S) { 6030 // If we don't have target devices, don't bother emitting the data mapping 6031 // code. 6032 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6033 return; 6034 6035 // Check if we have any if clause associated with the directive. 6036 const Expr *IfCond = nullptr; 6037 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6038 IfCond = C->getCondition(); 6039 6040 // Check if we have any device clause associated with the directive. 6041 const Expr *Device = nullptr; 6042 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6043 Device = C->getDevice(); 6044 6045 OMPLexicalScope Scope(*this, S, OMPD_task); 6046 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6047 } 6048 6049 void CodeGenFunction::EmitOMPTargetExitDataDirective( 6050 const OMPTargetExitDataDirective &S) { 6051 // If we don't have target devices, don't bother emitting the data mapping 6052 // code. 6053 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6054 return; 6055 6056 // Check if we have any if clause associated with the directive. 6057 const Expr *IfCond = nullptr; 6058 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6059 IfCond = C->getCondition(); 6060 6061 // Check if we have any device clause associated with the directive. 6062 const Expr *Device = nullptr; 6063 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6064 Device = C->getDevice(); 6065 6066 OMPLexicalScope Scope(*this, S, OMPD_task); 6067 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6068 } 6069 6070 static void emitTargetParallelRegion(CodeGenFunction &CGF, 6071 const OMPTargetParallelDirective &S, 6072 PrePostActionTy &Action) { 6073 // Get the captured statement associated with the 'parallel' region. 6074 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 6075 Action.Enter(CGF); 6076 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) { 6077 Action.Enter(CGF); 6078 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 6079 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 6080 CGF.EmitOMPPrivateClause(S, PrivateScope); 6081 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 6082 (void)PrivateScope.Privatize(); 6083 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind())) 6084 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S); 6085 // TODO: Add support for clauses. 6086 CGF.EmitStmt(CS->getCapturedStmt()); 6087 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 6088 }; 6089 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 6090 emitEmptyBoundParameters); 6091 emitPostUpdateForReductionClause(CGF, S, 6092 [](CodeGenFunction &) { return nullptr; }); 6093 } 6094 6095 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 6096 CodeGenModule &CGM, StringRef ParentName, 6097 const OMPTargetParallelDirective &S) { 6098 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6099 emitTargetParallelRegion(CGF, S, Action); 6100 }; 6101 llvm::Function *Fn; 6102 llvm::Constant *Addr; 6103 // Emit target region as a standalone region. 6104 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6105 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6106 assert(Fn && Addr && "Target device function emission failed."); 6107 } 6108 6109 void CodeGenFunction::EmitOMPTargetParallelDirective( 6110 const OMPTargetParallelDirective &S) { 6111 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6112 emitTargetParallelRegion(CGF, S, Action); 6113 }; 6114 emitCommonOMPTargetDirective(*this, S, CodeGen); 6115 } 6116 6117 static void emitTargetParallelForRegion(CodeGenFunction &CGF, 6118 const OMPTargetParallelForDirective &S, 6119 PrePostActionTy &Action) { 6120 Action.Enter(CGF); 6121 // Emit directive as a combined directive that consists of two implicit 6122 // directives: 'parallel' with 'for' directive. 6123 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6124 Action.Enter(CGF); 6125 CodeGenFunction::OMPCancelStackRAII CancelRegion( 6126 CGF, OMPD_target_parallel_for, S.hasCancel()); 6127 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6128 emitDispatchForLoopBounds); 6129 }; 6130 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen, 6131 emitEmptyBoundParameters); 6132 } 6133 6134 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 6135 CodeGenModule &CGM, StringRef ParentName, 6136 const OMPTargetParallelForDirective &S) { 6137 // Emit SPMD target parallel for region as a standalone region. 6138 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6139 emitTargetParallelForRegion(CGF, S, Action); 6140 }; 6141 llvm::Function *Fn; 6142 llvm::Constant *Addr; 6143 // Emit target region as a standalone region. 6144 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6145 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6146 assert(Fn && Addr && "Target device function emission failed."); 6147 } 6148 6149 void CodeGenFunction::EmitOMPTargetParallelForDirective( 6150 const OMPTargetParallelForDirective &S) { 6151 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6152 emitTargetParallelForRegion(CGF, S, Action); 6153 }; 6154 emitCommonOMPTargetDirective(*this, S, CodeGen); 6155 } 6156 6157 static void 6158 emitTargetParallelForSimdRegion(CodeGenFunction &CGF, 6159 const OMPTargetParallelForSimdDirective &S, 6160 PrePostActionTy &Action) { 6161 Action.Enter(CGF); 6162 // Emit directive as a combined directive that consists of two implicit 6163 // directives: 'parallel' with 'for' directive. 6164 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6165 Action.Enter(CGF); 6166 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 6167 emitDispatchForLoopBounds); 6168 }; 6169 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen, 6170 emitEmptyBoundParameters); 6171 } 6172 6173 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 6174 CodeGenModule &CGM, StringRef ParentName, 6175 const OMPTargetParallelForSimdDirective &S) { 6176 // Emit SPMD target parallel for region as a standalone region. 6177 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6178 emitTargetParallelForSimdRegion(CGF, S, Action); 6179 }; 6180 llvm::Function *Fn; 6181 llvm::Constant *Addr; 6182 // Emit target region as a standalone region. 6183 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 6184 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 6185 assert(Fn && Addr && "Target device function emission failed."); 6186 } 6187 6188 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 6189 const OMPTargetParallelForSimdDirective &S) { 6190 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6191 emitTargetParallelForSimdRegion(CGF, S, Action); 6192 }; 6193 emitCommonOMPTargetDirective(*this, S, CodeGen); 6194 } 6195 6196 /// Emit a helper variable and return corresponding lvalue. 6197 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 6198 const ImplicitParamDecl *PVD, 6199 CodeGenFunction::OMPPrivateScope &Privates) { 6200 const auto *VDecl = cast<VarDecl>(Helper->getDecl()); 6201 Privates.addPrivate(VDecl, 6202 [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); }); 6203 } 6204 6205 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 6206 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 6207 // Emit outlined function for task construct. 6208 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop); 6209 Address CapturedStruct = Address::invalid(); 6210 { 6211 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6212 CapturedStruct = GenerateCapturedStmtArgument(*CS); 6213 } 6214 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 6215 const Expr *IfCond = nullptr; 6216 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 6217 if (C->getNameModifier() == OMPD_unknown || 6218 C->getNameModifier() == OMPD_taskloop) { 6219 IfCond = C->getCondition(); 6220 break; 6221 } 6222 } 6223 6224 OMPTaskDataTy Data; 6225 // Check if taskloop must be emitted without taskgroup. 6226 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 6227 // TODO: Check if we should emit tied or untied task. 6228 Data.Tied = true; 6229 // Set scheduling for taskloop 6230 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 6231 // grainsize clause 6232 Data.Schedule.setInt(/*IntVal=*/false); 6233 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 6234 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 6235 // num_tasks clause 6236 Data.Schedule.setInt(/*IntVal=*/true); 6237 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 6238 } 6239 6240 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 6241 // if (PreCond) { 6242 // for (IV in 0..LastIteration) BODY; 6243 // <Final counter/linear vars updates>; 6244 // } 6245 // 6246 6247 // Emit: if (PreCond) - begin. 6248 // If the condition constant folds and can be elided, avoid emitting the 6249 // whole loop. 6250 bool CondConstant; 6251 llvm::BasicBlock *ContBlock = nullptr; 6252 OMPLoopScope PreInitScope(CGF, S); 6253 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 6254 if (!CondConstant) 6255 return; 6256 } else { 6257 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 6258 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 6259 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 6260 CGF.getProfileCount(&S)); 6261 CGF.EmitBlock(ThenBlock); 6262 CGF.incrementProfileCounter(&S); 6263 } 6264 6265 (void)CGF.EmitOMPLinearClauseInit(S); 6266 6267 OMPPrivateScope LoopScope(CGF); 6268 // Emit helper vars inits. 6269 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 6270 auto *I = CS->getCapturedDecl()->param_begin(); 6271 auto *LBP = std::next(I, LowerBound); 6272 auto *UBP = std::next(I, UpperBound); 6273 auto *STP = std::next(I, Stride); 6274 auto *LIP = std::next(I, LastIter); 6275 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 6276 LoopScope); 6277 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 6278 LoopScope); 6279 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 6280 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 6281 LoopScope); 6282 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 6283 CGF.EmitOMPLinearClause(S, LoopScope); 6284 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 6285 (void)LoopScope.Privatize(); 6286 // Emit the loop iteration variable. 6287 const Expr *IVExpr = S.getIterationVariable(); 6288 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 6289 CGF.EmitVarDecl(*IVDecl); 6290 CGF.EmitIgnoredExpr(S.getInit()); 6291 6292 // Emit the iterations count variable. 6293 // If it is not a variable, Sema decided to calculate iterations count on 6294 // each iteration (e.g., it is foldable into a constant). 6295 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 6296 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 6297 // Emit calculation of the iterations count. 6298 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 6299 } 6300 6301 { 6302 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false); 6303 emitCommonSimdLoop( 6304 CGF, S, 6305 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 6306 if (isOpenMPSimdDirective(S.getDirectiveKind())) 6307 CGF.EmitOMPSimdInit(S); 6308 }, 6309 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) { 6310 CGF.EmitOMPInnerLoop( 6311 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 6312 [&S](CodeGenFunction &CGF) { 6313 emitOMPLoopBodyWithStopPoint(CGF, S, 6314 CodeGenFunction::JumpDest()); 6315 }, 6316 [](CodeGenFunction &) {}); 6317 }); 6318 } 6319 // Emit: if (PreCond) - end. 6320 if (ContBlock) { 6321 CGF.EmitBranch(ContBlock); 6322 CGF.EmitBlock(ContBlock, true); 6323 } 6324 // Emit final copy of the lastprivate variables if IsLastIter != 0. 6325 if (HasLastprivateClause) { 6326 CGF.EmitOMPLastprivateClauseFinal( 6327 S, isOpenMPSimdDirective(S.getDirectiveKind()), 6328 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 6329 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6330 (*LIP)->getType(), S.getBeginLoc()))); 6331 } 6332 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) { 6333 return CGF.Builder.CreateIsNotNull( 6334 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 6335 (*LIP)->getType(), S.getBeginLoc())); 6336 }); 6337 }; 6338 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 6339 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn, 6340 const OMPTaskDataTy &Data) { 6341 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond, 6342 &Data](CodeGenFunction &CGF, PrePostActionTy &) { 6343 OMPLoopScope PreInitScope(CGF, S); 6344 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S, 6345 OutlinedFn, SharedsTy, 6346 CapturedStruct, IfCond, Data); 6347 }; 6348 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 6349 CodeGen); 6350 }; 6351 if (Data.Nogroup) { 6352 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data); 6353 } else { 6354 CGM.getOpenMPRuntime().emitTaskgroupRegion( 6355 *this, 6356 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 6357 PrePostActionTy &Action) { 6358 Action.Enter(CGF); 6359 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, 6360 Data); 6361 }, 6362 S.getBeginLoc()); 6363 } 6364 } 6365 6366 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 6367 auto LPCRegion = 6368 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6369 EmitOMPTaskLoopBasedDirective(S); 6370 } 6371 6372 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 6373 const OMPTaskLoopSimdDirective &S) { 6374 auto LPCRegion = 6375 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6376 OMPLexicalScope Scope(*this, S); 6377 EmitOMPTaskLoopBasedDirective(S); 6378 } 6379 6380 void CodeGenFunction::EmitOMPMasterTaskLoopDirective( 6381 const OMPMasterTaskLoopDirective &S) { 6382 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6383 Action.Enter(CGF); 6384 EmitOMPTaskLoopBasedDirective(S); 6385 }; 6386 auto LPCRegion = 6387 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6388 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false); 6389 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6390 } 6391 6392 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective( 6393 const OMPMasterTaskLoopSimdDirective &S) { 6394 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6395 Action.Enter(CGF); 6396 EmitOMPTaskLoopBasedDirective(S); 6397 }; 6398 auto LPCRegion = 6399 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6400 OMPLexicalScope Scope(*this, S); 6401 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc()); 6402 } 6403 6404 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective( 6405 const OMPParallelMasterTaskLoopDirective &S) { 6406 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6407 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6408 PrePostActionTy &Action) { 6409 Action.Enter(CGF); 6410 CGF.EmitOMPTaskLoopBasedDirective(S); 6411 }; 6412 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6413 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6414 S.getBeginLoc()); 6415 }; 6416 auto LPCRegion = 6417 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6418 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen, 6419 emitEmptyBoundParameters); 6420 } 6421 6422 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective( 6423 const OMPParallelMasterTaskLoopSimdDirective &S) { 6424 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) { 6425 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF, 6426 PrePostActionTy &Action) { 6427 Action.Enter(CGF); 6428 CGF.EmitOMPTaskLoopBasedDirective(S); 6429 }; 6430 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false); 6431 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen, 6432 S.getBeginLoc()); 6433 }; 6434 auto LPCRegion = 6435 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S); 6436 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen, 6437 emitEmptyBoundParameters); 6438 } 6439 6440 // Generate the instructions for '#pragma omp target update' directive. 6441 void CodeGenFunction::EmitOMPTargetUpdateDirective( 6442 const OMPTargetUpdateDirective &S) { 6443 // If we don't have target devices, don't bother emitting the data mapping 6444 // code. 6445 if (CGM.getLangOpts().OMPTargetTriples.empty()) 6446 return; 6447 6448 // Check if we have any if clause associated with the directive. 6449 const Expr *IfCond = nullptr; 6450 if (const auto *C = S.getSingleClause<OMPIfClause>()) 6451 IfCond = C->getCondition(); 6452 6453 // Check if we have any device clause associated with the directive. 6454 const Expr *Device = nullptr; 6455 if (const auto *C = S.getSingleClause<OMPDeviceClause>()) 6456 Device = C->getDevice(); 6457 6458 OMPLexicalScope Scope(*this, S, OMPD_task); 6459 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 6460 } 6461 6462 void CodeGenFunction::EmitSimpleOMPExecutableDirective( 6463 const OMPExecutableDirective &D) { 6464 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) { 6465 EmitOMPScanDirective(*SD); 6466 return; 6467 } 6468 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt()) 6469 return; 6470 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) { 6471 OMPPrivateScope GlobalsScope(CGF); 6472 if (isOpenMPTaskingDirective(D.getDirectiveKind())) { 6473 // Capture global firstprivates to avoid crash. 6474 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 6475 for (const Expr *Ref : C->varlists()) { 6476 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts()); 6477 if (!DRE) 6478 continue; 6479 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()); 6480 if (!VD || VD->hasLocalStorage()) 6481 continue; 6482 if (!CGF.LocalDeclMap.count(VD)) { 6483 LValue GlobLVal = CGF.EmitLValue(Ref); 6484 GlobalsScope.addPrivate( 6485 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6486 } 6487 } 6488 } 6489 } 6490 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 6491 (void)GlobalsScope.Privatize(); 6492 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D); 6493 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action); 6494 } else { 6495 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) { 6496 for (const Expr *E : LD->counters()) { 6497 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 6498 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { 6499 LValue GlobLVal = CGF.EmitLValue(E); 6500 GlobalsScope.addPrivate( 6501 VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); 6502 } 6503 if (isa<OMPCapturedExprDecl>(VD)) { 6504 // Emit only those that were not explicitly referenced in clauses. 6505 if (!CGF.LocalDeclMap.count(VD)) 6506 CGF.EmitVarDecl(*VD); 6507 } 6508 } 6509 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) { 6510 if (!C->getNumForLoops()) 6511 continue; 6512 for (unsigned I = LD->getCollapsedNumber(), 6513 E = C->getLoopNumIterations().size(); 6514 I < E; ++I) { 6515 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>( 6516 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) { 6517 // Emit only those that were not explicitly referenced in clauses. 6518 if (!CGF.LocalDeclMap.count(VD)) 6519 CGF.EmitVarDecl(*VD); 6520 } 6521 } 6522 } 6523 } 6524 (void)GlobalsScope.Privatize(); 6525 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt()); 6526 } 6527 }; 6528 { 6529 auto LPCRegion = 6530 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D); 6531 OMPSimdLexicalScope Scope(*this, D); 6532 CGM.getOpenMPRuntime().emitInlinedDirective( 6533 *this, 6534 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd 6535 : D.getDirectiveKind(), 6536 CodeGen); 6537 } 6538 // Check for outer lastprivate conditional update. 6539 checkForLastprivateConditionalUpdate(*this, D); 6540 } 6541