1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCleanup.h" 15 #include "CGOpenMPRuntime.h" 16 #include "CodeGenFunction.h" 17 #include "CodeGenModule.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/Stmt.h" 20 #include "clang/AST/StmtOpenMP.h" 21 #include "clang/AST/DeclOpenMP.h" 22 #include "llvm/IR/CallSite.h" 23 using namespace clang; 24 using namespace CodeGen; 25 26 namespace { 27 /// Lexical scope for OpenMP executable constructs, that handles correct codegen 28 /// for captured expressions. 29 class OMPLexicalScope : public CodeGenFunction::LexicalScope { 30 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 31 for (const auto *C : S.clauses()) { 32 if (auto *CPI = OMPClauseWithPreInit::get(C)) { 33 if (auto *PreInit = cast_or_null<DeclStmt>(CPI->getPreInitStmt())) { 34 for (const auto *I : PreInit->decls()) { 35 if (!I->hasAttr<OMPCaptureNoInitAttr>()) 36 CGF.EmitVarDecl(cast<VarDecl>(*I)); 37 else { 38 CodeGenFunction::AutoVarEmission Emission = 39 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I)); 40 CGF.EmitAutoVarCleanups(Emission); 41 } 42 } 43 } 44 } 45 } 46 } 47 CodeGenFunction::OMPPrivateScope InlinedShareds; 48 49 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) { 50 return CGF.LambdaCaptureFields.lookup(VD) || 51 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) || 52 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl)); 53 } 54 55 public: 56 OMPLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S, 57 bool AsInlined = false, bool EmitPreInitStmt = true) 58 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()), 59 InlinedShareds(CGF) { 60 if (EmitPreInitStmt) 61 emitPreInitStmt(CGF, S); 62 if (AsInlined) { 63 if (S.hasAssociatedStmt()) { 64 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 65 for (auto &C : CS->captures()) { 66 if (C.capturesVariable() || C.capturesVariableByCopy()) { 67 auto *VD = C.getCapturedVar(); 68 assert(VD == VD->getCanonicalDecl() && 69 "Canonical decl must be captured."); 70 DeclRefExpr DRE(const_cast<VarDecl *>(VD), 71 isCapturedVar(CGF, VD) || 72 (CGF.CapturedStmtInfo && 73 InlinedShareds.isGlobalVarCaptured(VD)), 74 VD->getType().getNonReferenceType(), VK_LValue, 75 SourceLocation()); 76 InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { 77 return CGF.EmitLValue(&DRE).getAddress(); 78 }); 79 } 80 } 81 (void)InlinedShareds.Privatize(); 82 } 83 } 84 } 85 }; 86 87 /// Lexical scope for OpenMP parallel construct, that handles correct codegen 88 /// for captured expressions. 89 class OMPParallelScope final : public OMPLexicalScope { 90 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 91 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 92 return !(isOpenMPTargetExecutionDirective(Kind) || 93 isOpenMPLoopBoundSharingDirective(Kind)) && 94 isOpenMPParallelDirective(Kind); 95 } 96 97 public: 98 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 99 : OMPLexicalScope(CGF, S, 100 /*AsInlined=*/false, 101 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {} 102 }; 103 104 /// Lexical scope for OpenMP teams construct, that handles correct codegen 105 /// for captured expressions. 106 class OMPTeamsScope final : public OMPLexicalScope { 107 bool EmitPreInitStmt(const OMPExecutableDirective &S) { 108 OpenMPDirectiveKind Kind = S.getDirectiveKind(); 109 return !isOpenMPTargetExecutionDirective(Kind) && 110 isOpenMPTeamsDirective(Kind); 111 } 112 113 public: 114 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S) 115 : OMPLexicalScope(CGF, S, 116 /*AsInlined=*/false, 117 /*EmitPreInitStmt=*/EmitPreInitStmt(S)) {} 118 }; 119 120 /// Private scope for OpenMP loop-based directives, that supports capturing 121 /// of used expression from loop statement. 122 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { 123 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) { 124 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) { 125 if (auto *PreInits = cast_or_null<DeclStmt>(LD->getPreInits())) { 126 for (const auto *I : PreInits->decls()) 127 CGF.EmitVarDecl(cast<VarDecl>(*I)); 128 } 129 } 130 } 131 132 public: 133 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S) 134 : CodeGenFunction::RunCleanupsScope(CGF) { 135 emitPreInitStmt(CGF, S); 136 } 137 }; 138 139 } // namespace 140 141 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) { 142 if (auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) { 143 if (auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) { 144 OrigVD = OrigVD->getCanonicalDecl(); 145 bool IsCaptured = 146 LambdaCaptureFields.lookup(OrigVD) || 147 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) || 148 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)); 149 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), IsCaptured, 150 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc()); 151 return EmitLValue(&DRE); 152 } 153 } 154 return EmitLValue(E); 155 } 156 157 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) { 158 auto &C = getContext(); 159 llvm::Value *Size = nullptr; 160 auto SizeInChars = C.getTypeSizeInChars(Ty); 161 if (SizeInChars.isZero()) { 162 // getTypeSizeInChars() returns 0 for a VLA. 163 while (auto *VAT = C.getAsVariableArrayType(Ty)) { 164 llvm::Value *ArraySize; 165 std::tie(ArraySize, Ty) = getVLASize(VAT); 166 Size = Size ? Builder.CreateNUWMul(Size, ArraySize) : ArraySize; 167 } 168 SizeInChars = C.getTypeSizeInChars(Ty); 169 if (SizeInChars.isZero()) 170 return llvm::ConstantInt::get(SizeTy, /*V=*/0); 171 Size = Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars)); 172 } else 173 Size = CGM.getSize(SizeInChars); 174 return Size; 175 } 176 177 void CodeGenFunction::GenerateOpenMPCapturedVars( 178 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 179 const RecordDecl *RD = S.getCapturedRecordDecl(); 180 auto CurField = RD->field_begin(); 181 auto CurCap = S.captures().begin(); 182 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 183 E = S.capture_init_end(); 184 I != E; ++I, ++CurField, ++CurCap) { 185 if (CurField->hasCapturedVLAType()) { 186 auto VAT = CurField->getCapturedVLAType(); 187 auto *Val = VLASizeMap[VAT->getSizeExpr()]; 188 CapturedVars.push_back(Val); 189 } else if (CurCap->capturesThis()) 190 CapturedVars.push_back(CXXThisValue); 191 else if (CurCap->capturesVariableByCopy()) { 192 llvm::Value *CV = 193 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal(); 194 195 // If the field is not a pointer, we need to save the actual value 196 // and load it as a void pointer. 197 if (!CurField->getType()->isAnyPointerType()) { 198 auto &Ctx = getContext(); 199 auto DstAddr = CreateMemTemp( 200 Ctx.getUIntPtrType(), 201 Twine(CurCap->getCapturedVar()->getName()) + ".casted"); 202 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType()); 203 204 auto *SrcAddrVal = EmitScalarConversion( 205 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()), 206 Ctx.getPointerType(CurField->getType()), SourceLocation()); 207 LValue SrcLV = 208 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType()); 209 210 // Store the value using the source type pointer. 211 EmitStoreThroughLValue(RValue::get(CV), SrcLV); 212 213 // Load the value using the destination type pointer. 214 CV = EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal(); 215 } 216 CapturedVars.push_back(CV); 217 } else { 218 assert(CurCap->capturesVariable() && "Expected capture by reference."); 219 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer()); 220 } 221 } 222 } 223 224 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType, 225 StringRef Name, LValue AddrLV, 226 bool isReferenceType = false) { 227 ASTContext &Ctx = CGF.getContext(); 228 229 auto *CastedPtr = CGF.EmitScalarConversion( 230 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(), 231 Ctx.getPointerType(DstType), SourceLocation()); 232 auto TmpAddr = 233 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 234 .getAddress(); 235 236 // If we are dealing with references we need to return the address of the 237 // reference instead of the reference of the value. 238 if (isReferenceType) { 239 QualType RefType = Ctx.getLValueReferenceType(DstType); 240 auto *RefVal = TmpAddr.getPointer(); 241 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref"); 242 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType); 243 CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true); 244 } 245 246 return TmpAddr; 247 } 248 249 static QualType getCanonicalParamType(ASTContext &C, QualType T) { 250 if (T->isLValueReferenceType()) { 251 return C.getLValueReferenceType( 252 getCanonicalParamType(C, T.getNonReferenceType()), 253 /*SpelledAsLValue=*/false); 254 } 255 if (T->isPointerType()) 256 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType())); 257 return C.getCanonicalParamType(T); 258 } 259 260 namespace { 261 /// Contains required data for proper outlined function codegen. 262 struct FunctionOptions { 263 /// Captured statement for which the function is generated. 264 const CapturedStmt *S = nullptr; 265 /// true if cast to/from UIntPtr is required for variables captured by 266 /// value. 267 const bool UIntPtrCastRequired = true; 268 /// true if only casted arguments must be registered as local args or VLA 269 /// sizes. 270 const bool RegisterCastedArgsOnly = false; 271 /// Name of the generated function. 272 const StringRef FunctionName; 273 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired, 274 bool RegisterCastedArgsOnly, 275 StringRef FunctionName) 276 : S(S), UIntPtrCastRequired(UIntPtrCastRequired), 277 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly), 278 FunctionName(FunctionName) {} 279 }; 280 } 281 282 static llvm::Function *emitOutlinedFunctionPrologue( 283 CodeGenFunction &CGF, FunctionArgList &Args, 284 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> 285 &LocalAddrs, 286 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> 287 &VLASizes, 288 llvm::Value *&CXXThisValue, const FunctionOptions &FO) { 289 const CapturedDecl *CD = FO.S->getCapturedDecl(); 290 const RecordDecl *RD = FO.S->getCapturedRecordDecl(); 291 assert(CD->hasBody() && "missing CapturedDecl body"); 292 293 CXXThisValue = nullptr; 294 // Build the argument list. 295 CodeGenModule &CGM = CGF.CGM; 296 ASTContext &Ctx = CGM.getContext(); 297 FunctionArgList TargetArgs; 298 Args.append(CD->param_begin(), 299 std::next(CD->param_begin(), CD->getContextParamPosition())); 300 TargetArgs.append( 301 CD->param_begin(), 302 std::next(CD->param_begin(), CD->getContextParamPosition())); 303 auto I = FO.S->captures().begin(); 304 for (auto *FD : RD->fields()) { 305 QualType ArgType = FD->getType(); 306 IdentifierInfo *II = nullptr; 307 VarDecl *CapVar = nullptr; 308 309 // If this is a capture by copy and the type is not a pointer, the outlined 310 // function argument type should be uintptr and the value properly casted to 311 // uintptr. This is necessary given that the runtime library is only able to 312 // deal with pointers. We can pass in the same way the VLA type sizes to the 313 // outlined function. 314 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 315 I->capturesVariableArrayType()) { 316 if (FO.UIntPtrCastRequired) 317 ArgType = Ctx.getUIntPtrType(); 318 } 319 320 if (I->capturesVariable() || I->capturesVariableByCopy()) { 321 CapVar = I->getCapturedVar(); 322 II = CapVar->getIdentifier(); 323 } else if (I->capturesThis()) 324 II = &Ctx.Idents.get("this"); 325 else { 326 assert(I->capturesVariableArrayType()); 327 II = &Ctx.Idents.get("vla"); 328 } 329 if (ArgType->isVariablyModifiedType()) 330 ArgType = getCanonicalParamType(Ctx, ArgType.getNonReferenceType()); 331 auto *Arg = 332 ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(), II, 333 ArgType, ImplicitParamDecl::Other); 334 Args.emplace_back(Arg); 335 // Do not cast arguments if we emit function with non-original types. 336 TargetArgs.emplace_back( 337 FO.UIntPtrCastRequired 338 ? Arg 339 : CGM.getOpenMPRuntime().translateParameter(FD, Arg)); 340 ++I; 341 } 342 Args.append( 343 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 344 CD->param_end()); 345 TargetArgs.append( 346 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 347 CD->param_end()); 348 349 // Create the function declaration. 350 FunctionType::ExtInfo ExtInfo; 351 const CGFunctionInfo &FuncInfo = 352 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs); 353 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 354 355 llvm::Function *F = 356 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 357 FO.FunctionName, &CGM.getModule()); 358 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 359 if (CD->isNothrow()) 360 F->setDoesNotThrow(); 361 362 // Generate the function. 363 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs, 364 FO.S->getLocStart(), CD->getBody()->getLocStart()); 365 unsigned Cnt = CD->getContextParamPosition(); 366 I = FO.S->captures().begin(); 367 for (auto *FD : RD->fields()) { 368 // Do not map arguments if we emit function with non-original types. 369 Address LocalAddr(Address::invalid()); 370 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) { 371 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt], 372 TargetArgs[Cnt]); 373 } else { 374 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]); 375 } 376 // If we are capturing a pointer by copy we don't need to do anything, just 377 // use the value that we get from the arguments. 378 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 379 const VarDecl *CurVD = I->getCapturedVar(); 380 // If the variable is a reference we need to materialize it here. 381 if (CurVD->getType()->isReferenceType()) { 382 Address RefAddr = CGF.CreateMemTemp( 383 CurVD->getType(), CGM.getPointerAlign(), ".materialized_ref"); 384 CGF.EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, 385 /*Volatile=*/false, CurVD->getType()); 386 LocalAddr = RefAddr; 387 } 388 if (!FO.RegisterCastedArgsOnly) 389 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}}); 390 ++Cnt; 391 ++I; 392 continue; 393 } 394 395 LValueBaseInfo BaseInfo(AlignmentSource::Decl, false); 396 LValue ArgLVal = 397 CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(), BaseInfo); 398 if (FD->hasCapturedVLAType()) { 399 if (FO.UIntPtrCastRequired) { 400 ArgLVal = CGF.MakeAddrLValue(castValueFromUintptr(CGF, FD->getType(), 401 Args[Cnt]->getName(), 402 ArgLVal), 403 FD->getType(), BaseInfo); 404 } 405 auto *ExprArg = 406 CGF.EmitLoadOfLValue(ArgLVal, SourceLocation()).getScalarVal(); 407 auto VAT = FD->getCapturedVLAType(); 408 VLASizes.insert({Args[Cnt], {VAT->getSizeExpr(), ExprArg}}); 409 } else if (I->capturesVariable()) { 410 auto *Var = I->getCapturedVar(); 411 QualType VarTy = Var->getType(); 412 Address ArgAddr = ArgLVal.getAddress(); 413 if (!VarTy->isReferenceType()) { 414 if (ArgLVal.getType()->isLValueReferenceType()) { 415 ArgAddr = CGF.EmitLoadOfReference( 416 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>()); 417 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { 418 assert(ArgLVal.getType()->isPointerType()); 419 ArgAddr = CGF.EmitLoadOfPointer( 420 ArgAddr, ArgLVal.getType()->castAs<PointerType>()); 421 } 422 } 423 if (!FO.RegisterCastedArgsOnly) { 424 LocalAddrs.insert( 425 {Args[Cnt], 426 {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}}); 427 } 428 } else if (I->capturesVariableByCopy()) { 429 assert(!FD->getType()->isAnyPointerType() && 430 "Not expecting a captured pointer."); 431 auto *Var = I->getCapturedVar(); 432 QualType VarTy = Var->getType(); 433 LocalAddrs.insert( 434 {Args[Cnt], 435 {Var, 436 FO.UIntPtrCastRequired 437 ? castValueFromUintptr(CGF, FD->getType(), Args[Cnt]->getName(), 438 ArgLVal, VarTy->isReferenceType()) 439 : ArgLVal.getAddress()}}); 440 } else { 441 // If 'this' is captured, load it into CXXThisValue. 442 assert(I->capturesThis()); 443 CXXThisValue = CGF.EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()) 444 .getScalarVal(); 445 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}}); 446 } 447 ++Cnt; 448 ++I; 449 } 450 451 return F; 452 } 453 454 llvm::Function * 455 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) { 456 assert( 457 CapturedStmtInfo && 458 "CapturedStmtInfo should be set when generating the captured function"); 459 const CapturedDecl *CD = S.getCapturedDecl(); 460 // Build the argument list. 461 bool NeedWrapperFunction = 462 getDebugInfo() && 463 CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo; 464 FunctionArgList Args; 465 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs; 466 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes; 467 SmallString<256> Buffer; 468 llvm::raw_svector_ostream Out(Buffer); 469 Out << CapturedStmtInfo->getHelperName(); 470 if (NeedWrapperFunction) 471 Out << "_debug__"; 472 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false, 473 Out.str()); 474 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs, 475 VLASizes, CXXThisValue, FO); 476 for (const auto &LocalAddrPair : LocalAddrs) { 477 if (LocalAddrPair.second.first) { 478 setAddrOfLocalVar(LocalAddrPair.second.first, 479 LocalAddrPair.second.second); 480 } 481 } 482 for (const auto &VLASizePair : VLASizes) 483 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second; 484 PGO.assignRegionCounters(GlobalDecl(CD), F); 485 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 486 FinishFunction(CD->getBodyRBrace()); 487 if (!NeedWrapperFunction) 488 return F; 489 490 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true, 491 /*RegisterCastedArgsOnly=*/true, 492 CapturedStmtInfo->getHelperName()); 493 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true); 494 Args.clear(); 495 LocalAddrs.clear(); 496 VLASizes.clear(); 497 llvm::Function *WrapperF = 498 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes, 499 WrapperCGF.CXXThisValue, WrapperFO); 500 LValueBaseInfo BaseInfo(AlignmentSource::Decl, false); 501 llvm::SmallVector<llvm::Value *, 4> CallArgs; 502 for (const auto *Arg : Args) { 503 llvm::Value *CallArg; 504 auto I = LocalAddrs.find(Arg); 505 if (I != LocalAddrs.end()) { 506 LValue LV = 507 WrapperCGF.MakeAddrLValue(I->second.second, Arg->getType(), BaseInfo); 508 CallArg = WrapperCGF.EmitLoadOfScalar(LV, SourceLocation()); 509 } else { 510 auto EI = VLASizes.find(Arg); 511 if (EI != VLASizes.end()) 512 CallArg = EI->second.second; 513 else { 514 LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg), 515 Arg->getType(), BaseInfo); 516 CallArg = WrapperCGF.EmitLoadOfScalar(LV, SourceLocation()); 517 } 518 } 519 CallArgs.emplace_back(CallArg); 520 } 521 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getLocStart(), 522 F, CallArgs); 523 WrapperCGF.FinishFunction(); 524 return WrapperF; 525 } 526 527 //===----------------------------------------------------------------------===// 528 // OpenMP Directive Emission 529 //===----------------------------------------------------------------------===// 530 void CodeGenFunction::EmitOMPAggregateAssign( 531 Address DestAddr, Address SrcAddr, QualType OriginalType, 532 const llvm::function_ref<void(Address, Address)> &CopyGen) { 533 // Perform element-by-element initialization. 534 QualType ElementTy; 535 536 // Drill down to the base element type on both arrays. 537 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 538 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 539 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 540 541 auto SrcBegin = SrcAddr.getPointer(); 542 auto DestBegin = DestAddr.getPointer(); 543 // Cast from pointer to array type to pointer to single element. 544 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 545 // The basic structure here is a while-do loop. 546 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 547 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 548 auto IsEmpty = 549 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 550 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 551 552 // Enter the loop body, making that address the current address. 553 auto EntryBB = Builder.GetInsertBlock(); 554 EmitBlock(BodyBB); 555 556 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 557 558 llvm::PHINode *SrcElementPHI = 559 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 560 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 561 Address SrcElementCurrent = 562 Address(SrcElementPHI, 563 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 564 565 llvm::PHINode *DestElementPHI = 566 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 567 DestElementPHI->addIncoming(DestBegin, EntryBB); 568 Address DestElementCurrent = 569 Address(DestElementPHI, 570 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 571 572 // Emit copy. 573 CopyGen(DestElementCurrent, SrcElementCurrent); 574 575 // Shift the address forward by one element. 576 auto DestElementNext = Builder.CreateConstGEP1_32( 577 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 578 auto SrcElementNext = Builder.CreateConstGEP1_32( 579 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 580 // Check whether we've reached the end. 581 auto Done = 582 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 583 Builder.CreateCondBr(Done, DoneBB, BodyBB); 584 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 585 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 586 587 // Done. 588 EmitBlock(DoneBB, /*IsFinished=*/true); 589 } 590 591 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 592 Address SrcAddr, const VarDecl *DestVD, 593 const VarDecl *SrcVD, const Expr *Copy) { 594 if (OriginalType->isArrayType()) { 595 auto *BO = dyn_cast<BinaryOperator>(Copy); 596 if (BO && BO->getOpcode() == BO_Assign) { 597 // Perform simple memcpy for simple copying. 598 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 599 } else { 600 // For arrays with complex element types perform element by element 601 // copying. 602 EmitOMPAggregateAssign( 603 DestAddr, SrcAddr, OriginalType, 604 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 605 // Working with the single array element, so have to remap 606 // destination and source variables to corresponding array 607 // elements. 608 CodeGenFunction::OMPPrivateScope Remap(*this); 609 Remap.addPrivate(DestVD, [DestElement]() -> Address { 610 return DestElement; 611 }); 612 Remap.addPrivate( 613 SrcVD, [SrcElement]() -> Address { return SrcElement; }); 614 (void)Remap.Privatize(); 615 EmitIgnoredExpr(Copy); 616 }); 617 } 618 } else { 619 // Remap pseudo source variable to private copy. 620 CodeGenFunction::OMPPrivateScope Remap(*this); 621 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; }); 622 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; }); 623 (void)Remap.Privatize(); 624 // Emit copying of the whole variable. 625 EmitIgnoredExpr(Copy); 626 } 627 } 628 629 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 630 OMPPrivateScope &PrivateScope) { 631 if (!HaveInsertPoint()) 632 return false; 633 bool FirstprivateIsLastprivate = false; 634 llvm::DenseSet<const VarDecl *> Lastprivates; 635 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 636 for (const auto *D : C->varlists()) 637 Lastprivates.insert( 638 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl()); 639 } 640 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 641 CGCapturedStmtInfo CapturesInfo(cast<CapturedStmt>(*D.getAssociatedStmt())); 642 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 643 auto IRef = C->varlist_begin(); 644 auto InitsRef = C->inits().begin(); 645 for (auto IInit : C->private_copies()) { 646 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 647 bool ThisFirstprivateIsLastprivate = 648 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0; 649 auto *CapFD = CapturesInfo.lookup(OrigVD); 650 auto *FD = CapturedStmtInfo->lookup(OrigVD); 651 if (!ThisFirstprivateIsLastprivate && FD && (FD == CapFD) && 652 !FD->getType()->isReferenceType()) { 653 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()); 654 ++IRef; 655 ++InitsRef; 656 continue; 657 } 658 FirstprivateIsLastprivate = 659 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate; 660 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) { 661 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 662 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 663 bool IsRegistered; 664 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 665 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr, 666 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 667 Address OriginalAddr = EmitLValue(&DRE).getAddress(); 668 QualType Type = VD->getType(); 669 if (Type->isArrayType()) { 670 // Emit VarDecl with copy init for arrays. 671 // Get the address of the original variable captured in current 672 // captured region. 673 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 674 auto Emission = EmitAutoVarAlloca(*VD); 675 auto *Init = VD->getInit(); 676 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 677 // Perform simple memcpy. 678 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 679 Type); 680 } else { 681 EmitOMPAggregateAssign( 682 Emission.getAllocatedAddress(), OriginalAddr, Type, 683 [this, VDInit, Init](Address DestElement, 684 Address SrcElement) { 685 // Clean up any temporaries needed by the initialization. 686 RunCleanupsScope InitScope(*this); 687 // Emit initialization for single element. 688 setAddrOfLocalVar(VDInit, SrcElement); 689 EmitAnyExprToMem(Init, DestElement, 690 Init->getType().getQualifiers(), 691 /*IsInitializer*/ false); 692 LocalDeclMap.erase(VDInit); 693 }); 694 } 695 EmitAutoVarCleanups(Emission); 696 return Emission.getAllocatedAddress(); 697 }); 698 } else { 699 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 700 // Emit private VarDecl with copy init. 701 // Remap temp VDInit variable to the address of the original 702 // variable 703 // (for proper handling of captured global variables). 704 setAddrOfLocalVar(VDInit, OriginalAddr); 705 EmitDecl(*VD); 706 LocalDeclMap.erase(VDInit); 707 return GetAddrOfLocalVar(VD); 708 }); 709 } 710 assert(IsRegistered && 711 "firstprivate var already registered as private"); 712 // Silence the warning about unused variable. 713 (void)IsRegistered; 714 } 715 ++IRef; 716 ++InitsRef; 717 } 718 } 719 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty(); 720 } 721 722 void CodeGenFunction::EmitOMPPrivateClause( 723 const OMPExecutableDirective &D, 724 CodeGenFunction::OMPPrivateScope &PrivateScope) { 725 if (!HaveInsertPoint()) 726 return; 727 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 728 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 729 auto IRef = C->varlist_begin(); 730 for (auto IInit : C->private_copies()) { 731 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 732 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 733 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 734 bool IsRegistered = 735 PrivateScope.addPrivate(OrigVD, [&]() -> Address { 736 // Emit private VarDecl with copy init. 737 EmitDecl(*VD); 738 return GetAddrOfLocalVar(VD); 739 }); 740 assert(IsRegistered && "private var already registered as private"); 741 // Silence the warning about unused variable. 742 (void)IsRegistered; 743 } 744 ++IRef; 745 } 746 } 747 } 748 749 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 750 if (!HaveInsertPoint()) 751 return false; 752 // threadprivate_var1 = master_threadprivate_var1; 753 // operator=(threadprivate_var2, master_threadprivate_var2); 754 // ... 755 // __kmpc_barrier(&loc, global_tid); 756 llvm::DenseSet<const VarDecl *> CopiedVars; 757 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 758 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 759 auto IRef = C->varlist_begin(); 760 auto ISrcRef = C->source_exprs().begin(); 761 auto IDestRef = C->destination_exprs().begin(); 762 for (auto *AssignOp : C->assignment_ops()) { 763 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 764 QualType Type = VD->getType(); 765 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 766 // Get the address of the master variable. If we are emitting code with 767 // TLS support, the address is passed from the master as field in the 768 // captured declaration. 769 Address MasterAddr = Address::invalid(); 770 if (getLangOpts().OpenMPUseTLS && 771 getContext().getTargetInfo().isTLSSupported()) { 772 assert(CapturedStmtInfo->lookup(VD) && 773 "Copyin threadprivates should have been captured!"); 774 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(), 775 VK_LValue, (*IRef)->getExprLoc()); 776 MasterAddr = EmitLValue(&DRE).getAddress(); 777 LocalDeclMap.erase(VD); 778 } else { 779 MasterAddr = 780 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 781 : CGM.GetAddrOfGlobal(VD), 782 getContext().getDeclAlign(VD)); 783 } 784 // Get the address of the threadprivate variable. 785 Address PrivateAddr = EmitLValue(*IRef).getAddress(); 786 if (CopiedVars.size() == 1) { 787 // At first check if current thread is a master thread. If it is, no 788 // need to copy data. 789 CopyBegin = createBasicBlock("copyin.not.master"); 790 CopyEnd = createBasicBlock("copyin.not.master.end"); 791 Builder.CreateCondBr( 792 Builder.CreateICmpNE( 793 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 794 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)), 795 CopyBegin, CopyEnd); 796 EmitBlock(CopyBegin); 797 } 798 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 799 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 800 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 801 } 802 ++IRef; 803 ++ISrcRef; 804 ++IDestRef; 805 } 806 } 807 if (CopyEnd) { 808 // Exit out of copying procedure for non-master thread. 809 EmitBlock(CopyEnd, /*IsFinished=*/true); 810 return true; 811 } 812 return false; 813 } 814 815 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 816 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 817 if (!HaveInsertPoint()) 818 return false; 819 bool HasAtLeastOneLastprivate = false; 820 llvm::DenseSet<const VarDecl *> SIMDLCVs; 821 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 822 auto *LoopDirective = cast<OMPLoopDirective>(&D); 823 for (auto *C : LoopDirective->counters()) { 824 SIMDLCVs.insert( 825 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 826 } 827 } 828 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 829 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 830 HasAtLeastOneLastprivate = true; 831 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) 832 break; 833 auto IRef = C->varlist_begin(); 834 auto IDestRef = C->destination_exprs().begin(); 835 for (auto *IInit : C->private_copies()) { 836 // Keep the address of the original variable for future update at the end 837 // of the loop. 838 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 839 // Taskloops do not require additional initialization, it is done in 840 // runtime support library. 841 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 842 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 843 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address { 844 DeclRefExpr DRE( 845 const_cast<VarDecl *>(OrigVD), 846 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 847 OrigVD) != nullptr, 848 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 849 return EmitLValue(&DRE).getAddress(); 850 }); 851 // Check if the variable is also a firstprivate: in this case IInit is 852 // not generated. Initialization of this variable will happen in codegen 853 // for 'firstprivate' clause. 854 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) { 855 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 856 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 857 // Emit private VarDecl with copy init. 858 EmitDecl(*VD); 859 return GetAddrOfLocalVar(VD); 860 }); 861 assert(IsRegistered && 862 "lastprivate var already registered as private"); 863 (void)IsRegistered; 864 } 865 } 866 ++IRef; 867 ++IDestRef; 868 } 869 } 870 return HasAtLeastOneLastprivate; 871 } 872 873 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 874 const OMPExecutableDirective &D, bool NoFinals, 875 llvm::Value *IsLastIterCond) { 876 if (!HaveInsertPoint()) 877 return; 878 // Emit following code: 879 // if (<IsLastIterCond>) { 880 // orig_var1 = private_orig_var1; 881 // ... 882 // orig_varn = private_orig_varn; 883 // } 884 llvm::BasicBlock *ThenBB = nullptr; 885 llvm::BasicBlock *DoneBB = nullptr; 886 if (IsLastIterCond) { 887 ThenBB = createBasicBlock(".omp.lastprivate.then"); 888 DoneBB = createBasicBlock(".omp.lastprivate.done"); 889 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 890 EmitBlock(ThenBB); 891 } 892 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 893 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates; 894 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 895 auto IC = LoopDirective->counters().begin(); 896 for (auto F : LoopDirective->finals()) { 897 auto *D = 898 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl(); 899 if (NoFinals) 900 AlreadyEmittedVars.insert(D); 901 else 902 LoopCountersAndUpdates[D] = F; 903 ++IC; 904 } 905 } 906 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 907 auto IRef = C->varlist_begin(); 908 auto ISrcRef = C->source_exprs().begin(); 909 auto IDestRef = C->destination_exprs().begin(); 910 for (auto *AssignOp : C->assignment_ops()) { 911 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 912 QualType Type = PrivateVD->getType(); 913 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 914 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 915 // If lastprivate variable is a loop control variable for loop-based 916 // directive, update its value before copyin back to original 917 // variable. 918 if (auto *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) 919 EmitIgnoredExpr(FinalExpr); 920 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 921 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 922 // Get the address of the original variable. 923 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 924 // Get the address of the private variable. 925 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 926 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 927 PrivateAddr = 928 Address(Builder.CreateLoad(PrivateAddr), 929 getNaturalTypeAlignment(RefTy->getPointeeType())); 930 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 931 } 932 ++IRef; 933 ++ISrcRef; 934 ++IDestRef; 935 } 936 if (auto *PostUpdate = C->getPostUpdateExpr()) 937 EmitIgnoredExpr(PostUpdate); 938 } 939 if (IsLastIterCond) 940 EmitBlock(DoneBB, /*IsFinished=*/true); 941 } 942 943 void CodeGenFunction::EmitOMPReductionClauseInit( 944 const OMPExecutableDirective &D, 945 CodeGenFunction::OMPPrivateScope &PrivateScope) { 946 if (!HaveInsertPoint()) 947 return; 948 SmallVector<const Expr *, 4> Shareds; 949 SmallVector<const Expr *, 4> Privates; 950 SmallVector<const Expr *, 4> ReductionOps; 951 SmallVector<const Expr *, 4> LHSs; 952 SmallVector<const Expr *, 4> RHSs; 953 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 954 auto IPriv = C->privates().begin(); 955 auto IRed = C->reduction_ops().begin(); 956 auto ILHS = C->lhs_exprs().begin(); 957 auto IRHS = C->rhs_exprs().begin(); 958 for (const auto *Ref : C->varlists()) { 959 Shareds.emplace_back(Ref); 960 Privates.emplace_back(*IPriv); 961 ReductionOps.emplace_back(*IRed); 962 LHSs.emplace_back(*ILHS); 963 RHSs.emplace_back(*IRHS); 964 std::advance(IPriv, 1); 965 std::advance(IRed, 1); 966 std::advance(ILHS, 1); 967 std::advance(IRHS, 1); 968 } 969 } 970 ReductionCodeGen RedCG(Shareds, Privates, ReductionOps); 971 unsigned Count = 0; 972 auto ILHS = LHSs.begin(); 973 auto IRHS = RHSs.begin(); 974 auto IPriv = Privates.begin(); 975 for (const auto *IRef : Shareds) { 976 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 977 // Emit private VarDecl with reduction init. 978 RedCG.emitSharedLValue(*this, Count); 979 RedCG.emitAggregateType(*this, Count); 980 auto Emission = EmitAutoVarAlloca(*PrivateVD); 981 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(), 982 RedCG.getSharedLValue(Count), 983 [&Emission](CodeGenFunction &CGF) { 984 CGF.EmitAutoVarInit(Emission); 985 return true; 986 }); 987 EmitAutoVarCleanups(Emission); 988 Address BaseAddr = RedCG.adjustPrivateAddress( 989 *this, Count, Emission.getAllocatedAddress()); 990 bool IsRegistered = PrivateScope.addPrivate( 991 RedCG.getBaseDecl(Count), [BaseAddr]() -> Address { return BaseAddr; }); 992 assert(IsRegistered && "private var already registered as private"); 993 // Silence the warning about unused variable. 994 (void)IsRegistered; 995 996 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 997 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 998 if (isa<OMPArraySectionExpr>(IRef)) { 999 // Store the address of the original variable associated with the LHS 1000 // implicit variable. 1001 PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() -> Address { 1002 return RedCG.getSharedLValue(Count).getAddress(); 1003 }); 1004 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address { 1005 return GetAddrOfLocalVar(PrivateVD); 1006 }); 1007 } else if (isa<ArraySubscriptExpr>(IRef)) { 1008 // Store the address of the original variable associated with the LHS 1009 // implicit variable. 1010 PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() -> Address { 1011 return RedCG.getSharedLValue(Count).getAddress(); 1012 }); 1013 PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() -> Address { 1014 return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), 1015 ConvertTypeForMem(RHSVD->getType()), 1016 "rhs.begin"); 1017 }); 1018 } else { 1019 QualType Type = PrivateVD->getType(); 1020 bool IsArray = getContext().getAsArrayType(Type) != nullptr; 1021 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(); 1022 // Store the address of the original variable associated with the LHS 1023 // implicit variable. 1024 if (IsArray) { 1025 OriginalAddr = Builder.CreateElementBitCast( 1026 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin"); 1027 } 1028 PrivateScope.addPrivate( 1029 LHSVD, [OriginalAddr]() -> Address { return OriginalAddr; }); 1030 PrivateScope.addPrivate( 1031 RHSVD, [this, PrivateVD, RHSVD, IsArray]() -> Address { 1032 return IsArray 1033 ? Builder.CreateElementBitCast( 1034 GetAddrOfLocalVar(PrivateVD), 1035 ConvertTypeForMem(RHSVD->getType()), "rhs.begin") 1036 : GetAddrOfLocalVar(PrivateVD); 1037 }); 1038 } 1039 ++ILHS; 1040 ++IRHS; 1041 ++IPriv; 1042 ++Count; 1043 } 1044 } 1045 1046 void CodeGenFunction::EmitOMPReductionClauseFinal( 1047 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) { 1048 if (!HaveInsertPoint()) 1049 return; 1050 llvm::SmallVector<const Expr *, 8> Privates; 1051 llvm::SmallVector<const Expr *, 8> LHSExprs; 1052 llvm::SmallVector<const Expr *, 8> RHSExprs; 1053 llvm::SmallVector<const Expr *, 8> ReductionOps; 1054 bool HasAtLeastOneReduction = false; 1055 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1056 HasAtLeastOneReduction = true; 1057 Privates.append(C->privates().begin(), C->privates().end()); 1058 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 1059 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 1060 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 1061 } 1062 if (HasAtLeastOneReduction) { 1063 bool WithNowait = D.getSingleClause<OMPNowaitClause>() || 1064 isOpenMPParallelDirective(D.getDirectiveKind()) || 1065 D.getDirectiveKind() == OMPD_simd; 1066 bool SimpleReduction = D.getDirectiveKind() == OMPD_simd; 1067 // Emit nowait reduction if nowait clause is present or directive is a 1068 // parallel directive (it always has implicit barrier). 1069 CGM.getOpenMPRuntime().emitReduction( 1070 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps, 1071 {WithNowait, SimpleReduction, ReductionKind}); 1072 } 1073 } 1074 1075 static void emitPostUpdateForReductionClause( 1076 CodeGenFunction &CGF, const OMPExecutableDirective &D, 1077 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) { 1078 if (!CGF.HaveInsertPoint()) 1079 return; 1080 llvm::BasicBlock *DoneBB = nullptr; 1081 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 1082 if (auto *PostUpdate = C->getPostUpdateExpr()) { 1083 if (!DoneBB) { 1084 if (auto *Cond = CondGen(CGF)) { 1085 // If the first post-update expression is found, emit conditional 1086 // block if it was requested. 1087 auto *ThenBB = CGF.createBasicBlock(".omp.reduction.pu"); 1088 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done"); 1089 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1090 CGF.EmitBlock(ThenBB); 1091 } 1092 } 1093 CGF.EmitIgnoredExpr(PostUpdate); 1094 } 1095 } 1096 if (DoneBB) 1097 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 1098 } 1099 1100 namespace { 1101 /// Codegen lambda for appending distribute lower and upper bounds to outlined 1102 /// parallel function. This is necessary for combined constructs such as 1103 /// 'distribute parallel for' 1104 typedef llvm::function_ref<void(CodeGenFunction &, 1105 const OMPExecutableDirective &, 1106 llvm::SmallVectorImpl<llvm::Value *> &)> 1107 CodeGenBoundParametersTy; 1108 } // anonymous namespace 1109 1110 static void emitCommonOMPParallelDirective( 1111 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1112 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1113 const CodeGenBoundParametersTy &CodeGenBoundParameters) { 1114 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel); 1115 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 1116 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 1117 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 1118 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 1119 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 1120 /*IgnoreResultAssign*/ true); 1121 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 1122 CGF, NumThreads, NumThreadsClause->getLocStart()); 1123 } 1124 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 1125 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); 1126 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 1127 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart()); 1128 } 1129 const Expr *IfCond = nullptr; 1130 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1131 if (C->getNameModifier() == OMPD_unknown || 1132 C->getNameModifier() == OMPD_parallel) { 1133 IfCond = C->getCondition(); 1134 break; 1135 } 1136 } 1137 1138 OMPParallelScope Scope(CGF, S); 1139 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 1140 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk 1141 // lower and upper bounds with the pragma 'for' chunking mechanism. 1142 // The following lambda takes care of appending the lower and upper bound 1143 // parameters when necessary 1144 CodeGenBoundParameters(CGF, S, CapturedVars); 1145 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 1146 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 1147 CapturedVars, IfCond); 1148 } 1149 1150 static void emitEmptyBoundParameters(CodeGenFunction &, 1151 const OMPExecutableDirective &, 1152 llvm::SmallVectorImpl<llvm::Value *> &) {} 1153 1154 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 1155 // Emit parallel region as a standalone region. 1156 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 1157 OMPPrivateScope PrivateScope(CGF); 1158 bool Copyins = CGF.EmitOMPCopyinClause(S); 1159 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 1160 if (Copyins) { 1161 // Emit implicit barrier to synchronize threads and avoid data races on 1162 // propagation master's thread values of threadprivate variables to local 1163 // instances of that variables of all other implicit threads. 1164 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1165 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 1166 /*ForceSimpleCall=*/true); 1167 } 1168 CGF.EmitOMPPrivateClause(S, PrivateScope); 1169 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 1170 (void)PrivateScope.Privatize(); 1171 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1172 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 1173 }; 1174 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen, 1175 emitEmptyBoundParameters); 1176 emitPostUpdateForReductionClause( 1177 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 1178 } 1179 1180 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 1181 JumpDest LoopExit) { 1182 RunCleanupsScope BodyScope(*this); 1183 // Update counters values on current iteration. 1184 for (auto I : D.updates()) { 1185 EmitIgnoredExpr(I); 1186 } 1187 // Update the linear variables. 1188 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1189 for (auto *U : C->updates()) 1190 EmitIgnoredExpr(U); 1191 } 1192 1193 // On a continue in the body, jump to the end. 1194 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 1195 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1196 // Emit loop body. 1197 EmitStmt(D.getBody()); 1198 // The end (updates/cleanups). 1199 EmitBlock(Continue.getBlock()); 1200 BreakContinueStack.pop_back(); 1201 } 1202 1203 void CodeGenFunction::EmitOMPInnerLoop( 1204 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 1205 const Expr *IncExpr, 1206 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 1207 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 1208 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 1209 1210 // Start the loop with a block that tests the condition. 1211 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 1212 EmitBlock(CondBlock); 1213 const SourceRange &R = S.getSourceRange(); 1214 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1215 SourceLocToDebugLoc(R.getEnd())); 1216 1217 // If there are any cleanups between here and the loop-exit scope, 1218 // create a block to stage a loop exit along. 1219 auto ExitBlock = LoopExit.getBlock(); 1220 if (RequiresCleanup) 1221 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 1222 1223 auto LoopBody = createBasicBlock("omp.inner.for.body"); 1224 1225 // Emit condition. 1226 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 1227 if (ExitBlock != LoopExit.getBlock()) { 1228 EmitBlock(ExitBlock); 1229 EmitBranchThroughCleanup(LoopExit); 1230 } 1231 1232 EmitBlock(LoopBody); 1233 incrementProfileCounter(&S); 1234 1235 // Create a block for the increment. 1236 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 1237 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1238 1239 BodyGen(*this); 1240 1241 // Emit "IV = IV + 1" and a back-edge to the condition block. 1242 EmitBlock(Continue.getBlock()); 1243 EmitIgnoredExpr(IncExpr); 1244 PostIncGen(*this); 1245 BreakContinueStack.pop_back(); 1246 EmitBranch(CondBlock); 1247 LoopStack.pop(); 1248 // Emit the fall-through block. 1249 EmitBlock(LoopExit.getBlock()); 1250 } 1251 1252 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 1253 if (!HaveInsertPoint()) 1254 return false; 1255 // Emit inits for the linear variables. 1256 bool HasLinears = false; 1257 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1258 for (auto *Init : C->inits()) { 1259 HasLinears = true; 1260 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 1261 if (auto *Ref = dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) { 1262 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 1263 auto *OrigVD = cast<VarDecl>(Ref->getDecl()); 1264 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 1265 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1266 VD->getInit()->getType(), VK_LValue, 1267 VD->getInit()->getExprLoc()); 1268 EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(), 1269 VD->getType()), 1270 /*capturedByInit=*/false); 1271 EmitAutoVarCleanups(Emission); 1272 } else 1273 EmitVarDecl(*VD); 1274 } 1275 // Emit the linear steps for the linear clauses. 1276 // If a step is not constant, it is pre-calculated before the loop. 1277 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 1278 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 1279 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 1280 // Emit calculation of the linear step. 1281 EmitIgnoredExpr(CS); 1282 } 1283 } 1284 return HasLinears; 1285 } 1286 1287 void CodeGenFunction::EmitOMPLinearClauseFinal( 1288 const OMPLoopDirective &D, 1289 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) { 1290 if (!HaveInsertPoint()) 1291 return; 1292 llvm::BasicBlock *DoneBB = nullptr; 1293 // Emit the final values of the linear variables. 1294 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1295 auto IC = C->varlist_begin(); 1296 for (auto *F : C->finals()) { 1297 if (!DoneBB) { 1298 if (auto *Cond = CondGen(*this)) { 1299 // If the first post-update expression is found, emit conditional 1300 // block if it was requested. 1301 auto *ThenBB = createBasicBlock(".omp.linear.pu"); 1302 DoneBB = createBasicBlock(".omp.linear.pu.done"); 1303 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1304 EmitBlock(ThenBB); 1305 } 1306 } 1307 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1308 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 1309 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1310 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1311 Address OrigAddr = EmitLValue(&DRE).getAddress(); 1312 CodeGenFunction::OMPPrivateScope VarScope(*this); 1313 VarScope.addPrivate(OrigVD, [OrigAddr]() -> Address { return OrigAddr; }); 1314 (void)VarScope.Privatize(); 1315 EmitIgnoredExpr(F); 1316 ++IC; 1317 } 1318 if (auto *PostUpdate = C->getPostUpdateExpr()) 1319 EmitIgnoredExpr(PostUpdate); 1320 } 1321 if (DoneBB) 1322 EmitBlock(DoneBB, /*IsFinished=*/true); 1323 } 1324 1325 static void emitAlignedClause(CodeGenFunction &CGF, 1326 const OMPExecutableDirective &D) { 1327 if (!CGF.HaveInsertPoint()) 1328 return; 1329 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1330 unsigned ClauseAlignment = 0; 1331 if (auto AlignmentExpr = Clause->getAlignment()) { 1332 auto AlignmentCI = 1333 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1334 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 1335 } 1336 for (auto E : Clause->varlists()) { 1337 unsigned Alignment = ClauseAlignment; 1338 if (Alignment == 0) { 1339 // OpenMP [2.8.1, Description] 1340 // If no optional parameter is specified, implementation-defined default 1341 // alignments for SIMD instructions on the target platforms are assumed. 1342 Alignment = 1343 CGF.getContext() 1344 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1345 E->getType()->getPointeeType())) 1346 .getQuantity(); 1347 } 1348 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 1349 "alignment is not power of 2"); 1350 if (Alignment != 0) { 1351 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1352 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 1353 } 1354 } 1355 } 1356 } 1357 1358 void CodeGenFunction::EmitOMPPrivateLoopCounters( 1359 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) { 1360 if (!HaveInsertPoint()) 1361 return; 1362 auto I = S.private_counters().begin(); 1363 for (auto *E : S.counters()) { 1364 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1365 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1366 (void)LoopScope.addPrivate(VD, [&]() -> Address { 1367 // Emit var without initialization. 1368 if (!LocalDeclMap.count(PrivateVD)) { 1369 auto VarEmission = EmitAutoVarAlloca(*PrivateVD); 1370 EmitAutoVarCleanups(VarEmission); 1371 } 1372 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD), 1373 /*RefersToEnclosingVariableOrCapture=*/false, 1374 (*I)->getType(), VK_LValue, (*I)->getExprLoc()); 1375 return EmitLValue(&DRE).getAddress(); 1376 }); 1377 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) || 1378 VD->hasGlobalStorage()) { 1379 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address { 1380 DeclRefExpr DRE(const_cast<VarDecl *>(VD), 1381 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), 1382 E->getType(), VK_LValue, E->getExprLoc()); 1383 return EmitLValue(&DRE).getAddress(); 1384 }); 1385 } 1386 ++I; 1387 } 1388 } 1389 1390 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1391 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1392 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1393 if (!CGF.HaveInsertPoint()) 1394 return; 1395 { 1396 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1397 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope); 1398 (void)PreCondScope.Privatize(); 1399 // Get initial values of real counters. 1400 for (auto I : S.inits()) { 1401 CGF.EmitIgnoredExpr(I); 1402 } 1403 } 1404 // Check that loop is executed at least one time. 1405 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 1406 } 1407 1408 void CodeGenFunction::EmitOMPLinearClause( 1409 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { 1410 if (!HaveInsertPoint()) 1411 return; 1412 llvm::DenseSet<const VarDecl *> SIMDLCVs; 1413 if (isOpenMPSimdDirective(D.getDirectiveKind())) { 1414 auto *LoopDirective = cast<OMPLoopDirective>(&D); 1415 for (auto *C : LoopDirective->counters()) { 1416 SIMDLCVs.insert( 1417 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl()); 1418 } 1419 } 1420 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1421 auto CurPrivate = C->privates().begin(); 1422 for (auto *E : C->varlists()) { 1423 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1424 auto *PrivateVD = 1425 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 1426 if (!SIMDLCVs.count(VD->getCanonicalDecl())) { 1427 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address { 1428 // Emit private VarDecl with copy init. 1429 EmitVarDecl(*PrivateVD); 1430 return GetAddrOfLocalVar(PrivateVD); 1431 }); 1432 assert(IsRegistered && "linear var already registered as private"); 1433 // Silence the warning about unused variable. 1434 (void)IsRegistered; 1435 } else 1436 EmitVarDecl(*PrivateVD); 1437 ++CurPrivate; 1438 } 1439 } 1440 } 1441 1442 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 1443 const OMPExecutableDirective &D, 1444 bool IsMonotonic) { 1445 if (!CGF.HaveInsertPoint()) 1446 return; 1447 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 1448 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 1449 /*ignoreResult=*/true); 1450 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1451 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1452 // In presence of finite 'safelen', it may be unsafe to mark all 1453 // the memory instructions parallel, because loop-carried 1454 // dependences of 'safelen' iterations are possible. 1455 if (!IsMonotonic) 1456 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 1457 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 1458 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 1459 /*ignoreResult=*/true); 1460 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1461 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1462 // In presence of finite 'safelen', it may be unsafe to mark all 1463 // the memory instructions parallel, because loop-carried 1464 // dependences of 'safelen' iterations are possible. 1465 CGF.LoopStack.setParallel(false); 1466 } 1467 } 1468 1469 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 1470 bool IsMonotonic) { 1471 // Walk clauses and process safelen/lastprivate. 1472 LoopStack.setParallel(!IsMonotonic); 1473 LoopStack.setVectorizeEnable(true); 1474 emitSimdlenSafelenClause(*this, D, IsMonotonic); 1475 } 1476 1477 void CodeGenFunction::EmitOMPSimdFinal( 1478 const OMPLoopDirective &D, 1479 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen) { 1480 if (!HaveInsertPoint()) 1481 return; 1482 llvm::BasicBlock *DoneBB = nullptr; 1483 auto IC = D.counters().begin(); 1484 auto IPC = D.private_counters().begin(); 1485 for (auto F : D.finals()) { 1486 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 1487 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl()); 1488 auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD); 1489 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) || 1490 OrigVD->hasGlobalStorage() || CED) { 1491 if (!DoneBB) { 1492 if (auto *Cond = CondGen(*this)) { 1493 // If the first post-update expression is found, emit conditional 1494 // block if it was requested. 1495 auto *ThenBB = createBasicBlock(".omp.final.then"); 1496 DoneBB = createBasicBlock(".omp.final.done"); 1497 Builder.CreateCondBr(Cond, ThenBB, DoneBB); 1498 EmitBlock(ThenBB); 1499 } 1500 } 1501 Address OrigAddr = Address::invalid(); 1502 if (CED) 1503 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(); 1504 else { 1505 DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD), 1506 /*RefersToEnclosingVariableOrCapture=*/false, 1507 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); 1508 OrigAddr = EmitLValue(&DRE).getAddress(); 1509 } 1510 OMPPrivateScope VarScope(*this); 1511 VarScope.addPrivate(OrigVD, 1512 [OrigAddr]() -> Address { return OrigAddr; }); 1513 (void)VarScope.Privatize(); 1514 EmitIgnoredExpr(F); 1515 } 1516 ++IC; 1517 ++IPC; 1518 } 1519 if (DoneBB) 1520 EmitBlock(DoneBB, /*IsFinished=*/true); 1521 } 1522 1523 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF, 1524 const OMPLoopDirective &S, 1525 CodeGenFunction::JumpDest LoopExit) { 1526 CGF.EmitOMPLoopBody(S, LoopExit); 1527 CGF.EmitStopPoint(&S); 1528 } 1529 1530 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 1531 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 1532 OMPLoopScope PreInitScope(CGF, S); 1533 // if (PreCond) { 1534 // for (IV in 0..LastIteration) BODY; 1535 // <Final counter/linear vars updates>; 1536 // } 1537 // 1538 1539 // Emit: if (PreCond) - begin. 1540 // If the condition constant folds and can be elided, avoid emitting the 1541 // whole loop. 1542 bool CondConstant; 1543 llvm::BasicBlock *ContBlock = nullptr; 1544 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1545 if (!CondConstant) 1546 return; 1547 } else { 1548 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 1549 ContBlock = CGF.createBasicBlock("simd.if.end"); 1550 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 1551 CGF.getProfileCount(&S)); 1552 CGF.EmitBlock(ThenBlock); 1553 CGF.incrementProfileCounter(&S); 1554 } 1555 1556 // Emit the loop iteration variable. 1557 const Expr *IVExpr = S.getIterationVariable(); 1558 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 1559 CGF.EmitVarDecl(*IVDecl); 1560 CGF.EmitIgnoredExpr(S.getInit()); 1561 1562 // Emit the iterations count variable. 1563 // If it is not a variable, Sema decided to calculate iterations count on 1564 // each iteration (e.g., it is foldable into a constant). 1565 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1566 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1567 // Emit calculation of the iterations count. 1568 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 1569 } 1570 1571 CGF.EmitOMPSimdInit(S); 1572 1573 emitAlignedClause(CGF, S); 1574 (void)CGF.EmitOMPLinearClauseInit(S); 1575 { 1576 OMPPrivateScope LoopScope(CGF); 1577 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 1578 CGF.EmitOMPLinearClause(S, LoopScope); 1579 CGF.EmitOMPPrivateClause(S, LoopScope); 1580 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1581 bool HasLastprivateClause = 1582 CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1583 (void)LoopScope.Privatize(); 1584 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1585 S.getInc(), 1586 [&S](CodeGenFunction &CGF) { 1587 CGF.EmitOMPLoopBody(S, JumpDest()); 1588 CGF.EmitStopPoint(&S); 1589 }, 1590 [](CodeGenFunction &) {}); 1591 CGF.EmitOMPSimdFinal( 1592 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 1593 // Emit final copy of the lastprivate variables at the end of loops. 1594 if (HasLastprivateClause) 1595 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true); 1596 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd); 1597 emitPostUpdateForReductionClause( 1598 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 1599 } 1600 CGF.EmitOMPLinearClauseFinal( 1601 S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 1602 // Emit: if (PreCond) - end. 1603 if (ContBlock) { 1604 CGF.EmitBranch(ContBlock); 1605 CGF.EmitBlock(ContBlock, true); 1606 } 1607 }; 1608 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 1609 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1610 } 1611 1612 void CodeGenFunction::EmitOMPOuterLoop( 1613 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S, 1614 CodeGenFunction::OMPPrivateScope &LoopScope, 1615 const CodeGenFunction::OMPLoopArguments &LoopArgs, 1616 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop, 1617 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) { 1618 auto &RT = CGM.getOpenMPRuntime(); 1619 1620 const Expr *IVExpr = S.getIterationVariable(); 1621 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1622 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1623 1624 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 1625 1626 // Start the loop with a block that tests the condition. 1627 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 1628 EmitBlock(CondBlock); 1629 const SourceRange &R = S.getSourceRange(); 1630 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()), 1631 SourceLocToDebugLoc(R.getEnd())); 1632 1633 llvm::Value *BoolCondVal = nullptr; 1634 if (!DynamicOrOrdered) { 1635 // UB = min(UB, GlobalUB) or 1636 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g. 1637 // 'distribute parallel for') 1638 EmitIgnoredExpr(LoopArgs.EUB); 1639 // IV = LB 1640 EmitIgnoredExpr(LoopArgs.Init); 1641 // IV < UB 1642 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond); 1643 } else { 1644 BoolCondVal = 1645 RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, LoopArgs.IL, 1646 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST); 1647 } 1648 1649 // If there are any cleanups between here and the loop-exit scope, 1650 // create a block to stage a loop exit along. 1651 auto ExitBlock = LoopExit.getBlock(); 1652 if (LoopScope.requiresCleanups()) 1653 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 1654 1655 auto LoopBody = createBasicBlock("omp.dispatch.body"); 1656 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 1657 if (ExitBlock != LoopExit.getBlock()) { 1658 EmitBlock(ExitBlock); 1659 EmitBranchThroughCleanup(LoopExit); 1660 } 1661 EmitBlock(LoopBody); 1662 1663 // Emit "IV = LB" (in case of static schedule, we have already calculated new 1664 // LB for loop condition and emitted it above). 1665 if (DynamicOrOrdered) 1666 EmitIgnoredExpr(LoopArgs.Init); 1667 1668 // Create a block for the increment. 1669 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 1670 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1671 1672 // Generate !llvm.loop.parallel metadata for loads and stores for loops 1673 // with dynamic/guided scheduling and without ordered clause. 1674 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 1675 LoopStack.setParallel(!IsMonotonic); 1676 else 1677 EmitOMPSimdInit(S, IsMonotonic); 1678 1679 SourceLocation Loc = S.getLocStart(); 1680 1681 // when 'distribute' is not combined with a 'for': 1682 // while (idx <= UB) { BODY; ++idx; } 1683 // when 'distribute' is combined with a 'for' 1684 // (e.g. 'distribute parallel for') 1685 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 1686 EmitOMPInnerLoop( 1687 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr, 1688 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 1689 CodeGenLoop(CGF, S, LoopExit); 1690 }, 1691 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) { 1692 CodeGenOrdered(CGF, Loc, IVSize, IVSigned); 1693 }); 1694 1695 EmitBlock(Continue.getBlock()); 1696 BreakContinueStack.pop_back(); 1697 if (!DynamicOrOrdered) { 1698 // Emit "LB = LB + Stride", "UB = UB + Stride". 1699 EmitIgnoredExpr(LoopArgs.NextLB); 1700 EmitIgnoredExpr(LoopArgs.NextUB); 1701 } 1702 1703 EmitBranch(CondBlock); 1704 LoopStack.pop(); 1705 // Emit the fall-through block. 1706 EmitBlock(LoopExit.getBlock()); 1707 1708 // Tell the runtime we are done. 1709 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) { 1710 if (!DynamicOrOrdered) 1711 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(), 1712 S.getDirectiveKind()); 1713 }; 1714 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 1715 } 1716 1717 void CodeGenFunction::EmitOMPForOuterLoop( 1718 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic, 1719 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 1720 const OMPLoopArguments &LoopArgs, 1721 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 1722 auto &RT = CGM.getOpenMPRuntime(); 1723 1724 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 1725 const bool DynamicOrOrdered = 1726 Ordered || RT.isDynamic(ScheduleKind.Schedule); 1727 1728 assert((Ordered || 1729 !RT.isStaticNonchunked(ScheduleKind.Schedule, 1730 LoopArgs.Chunk != nullptr)) && 1731 "static non-chunked schedule does not need outer loop"); 1732 1733 // Emit outer loop. 1734 // 1735 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1736 // When schedule(dynamic,chunk_size) is specified, the iterations are 1737 // distributed to threads in the team in chunks as the threads request them. 1738 // Each thread executes a chunk of iterations, then requests another chunk, 1739 // until no chunks remain to be distributed. Each chunk contains chunk_size 1740 // iterations, except for the last chunk to be distributed, which may have 1741 // fewer iterations. When no chunk_size is specified, it defaults to 1. 1742 // 1743 // When schedule(guided,chunk_size) is specified, the iterations are assigned 1744 // to threads in the team in chunks as the executing threads request them. 1745 // Each thread executes a chunk of iterations, then requests another chunk, 1746 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 1747 // each chunk is proportional to the number of unassigned iterations divided 1748 // by the number of threads in the team, decreasing to 1. For a chunk_size 1749 // with value k (greater than 1), the size of each chunk is determined in the 1750 // same way, with the restriction that the chunks do not contain fewer than k 1751 // iterations (except for the last chunk to be assigned, which may have fewer 1752 // than k iterations). 1753 // 1754 // When schedule(auto) is specified, the decision regarding scheduling is 1755 // delegated to the compiler and/or runtime system. The programmer gives the 1756 // implementation the freedom to choose any possible mapping of iterations to 1757 // threads in the team. 1758 // 1759 // When schedule(runtime) is specified, the decision regarding scheduling is 1760 // deferred until run time, and the schedule and chunk size are taken from the 1761 // run-sched-var ICV. If the ICV is set to auto, the schedule is 1762 // implementation defined 1763 // 1764 // while(__kmpc_dispatch_next(&LB, &UB)) { 1765 // idx = LB; 1766 // while (idx <= UB) { BODY; ++idx; 1767 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 1768 // } // inner loop 1769 // } 1770 // 1771 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1772 // When schedule(static, chunk_size) is specified, iterations are divided into 1773 // chunks of size chunk_size, and the chunks are assigned to the threads in 1774 // the team in a round-robin fashion in the order of the thread number. 1775 // 1776 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 1777 // while (idx <= UB) { BODY; ++idx; } // inner loop 1778 // LB = LB + ST; 1779 // UB = UB + ST; 1780 // } 1781 // 1782 1783 const Expr *IVExpr = S.getIterationVariable(); 1784 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1785 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1786 1787 if (DynamicOrOrdered) { 1788 auto DispatchBounds = CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB); 1789 llvm::Value *LBVal = DispatchBounds.first; 1790 llvm::Value *UBVal = DispatchBounds.second; 1791 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal, 1792 LoopArgs.Chunk}; 1793 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize, 1794 IVSigned, Ordered, DipatchRTInputValues); 1795 } else { 1796 CGOpenMPRuntime::StaticRTInput StaticInit( 1797 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB, 1798 LoopArgs.ST, LoopArgs.Chunk); 1799 RT.emitForStaticInit(*this, S.getLocStart(), S.getDirectiveKind(), 1800 ScheduleKind, StaticInit); 1801 } 1802 1803 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc, 1804 const unsigned IVSize, 1805 const bool IVSigned) { 1806 if (Ordered) { 1807 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize, 1808 IVSigned); 1809 } 1810 }; 1811 1812 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST, 1813 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB); 1814 OuterLoopArgs.IncExpr = S.getInc(); 1815 OuterLoopArgs.Init = S.getInit(); 1816 OuterLoopArgs.Cond = S.getCond(); 1817 OuterLoopArgs.NextLB = S.getNextLowerBound(); 1818 OuterLoopArgs.NextUB = S.getNextUpperBound(); 1819 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs, 1820 emitOMPLoopBodyWithStopPoint, CodeGenOrdered); 1821 } 1822 1823 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc, 1824 const unsigned IVSize, const bool IVSigned) {} 1825 1826 void CodeGenFunction::EmitOMPDistributeOuterLoop( 1827 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S, 1828 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs, 1829 const CodeGenLoopTy &CodeGenLoopContent) { 1830 1831 auto &RT = CGM.getOpenMPRuntime(); 1832 1833 // Emit outer loop. 1834 // Same behavior as a OMPForOuterLoop, except that schedule cannot be 1835 // dynamic 1836 // 1837 1838 const Expr *IVExpr = S.getIterationVariable(); 1839 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1840 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1841 1842 CGOpenMPRuntime::StaticRTInput StaticInit( 1843 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB, 1844 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk); 1845 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, StaticInit); 1846 1847 // for combined 'distribute' and 'for' the increment expression of distribute 1848 // is store in DistInc. For 'distribute' alone, it is in Inc. 1849 Expr *IncExpr; 1850 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())) 1851 IncExpr = S.getDistInc(); 1852 else 1853 IncExpr = S.getInc(); 1854 1855 // this routine is shared by 'omp distribute parallel for' and 1856 // 'omp distribute': select the right EUB expression depending on the 1857 // directive 1858 OMPLoopArguments OuterLoopArgs; 1859 OuterLoopArgs.LB = LoopArgs.LB; 1860 OuterLoopArgs.UB = LoopArgs.UB; 1861 OuterLoopArgs.ST = LoopArgs.ST; 1862 OuterLoopArgs.IL = LoopArgs.IL; 1863 OuterLoopArgs.Chunk = LoopArgs.Chunk; 1864 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 1865 ? S.getCombinedEnsureUpperBound() 1866 : S.getEnsureUpperBound(); 1867 OuterLoopArgs.IncExpr = IncExpr; 1868 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 1869 ? S.getCombinedInit() 1870 : S.getInit(); 1871 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 1872 ? S.getCombinedCond() 1873 : S.getCond(); 1874 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 1875 ? S.getCombinedNextLowerBound() 1876 : S.getNextLowerBound(); 1877 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 1878 ? S.getCombinedNextUpperBound() 1879 : S.getNextUpperBound(); 1880 1881 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S, 1882 LoopScope, OuterLoopArgs, CodeGenLoopContent, 1883 emitEmptyOrdered); 1884 } 1885 1886 /// Emit a helper variable and return corresponding lvalue. 1887 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1888 const DeclRefExpr *Helper) { 1889 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1890 CGF.EmitVarDecl(*VDecl); 1891 return CGF.EmitLValue(Helper); 1892 } 1893 1894 static std::pair<LValue, LValue> 1895 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF, 1896 const OMPExecutableDirective &S) { 1897 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 1898 LValue LB = 1899 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 1900 LValue UB = 1901 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 1902 1903 // When composing 'distribute' with 'for' (e.g. as in 'distribute 1904 // parallel for') we need to use the 'distribute' 1905 // chunk lower and upper bounds rather than the whole loop iteration 1906 // space. These are parameters to the outlined function for 'parallel' 1907 // and we copy the bounds of the previous schedule into the 1908 // the current ones. 1909 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable()); 1910 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable()); 1911 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(PrevLB, SourceLocation()); 1912 PrevLBVal = CGF.EmitScalarConversion( 1913 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(), 1914 LS.getIterationVariable()->getType(), SourceLocation()); 1915 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(PrevUB, SourceLocation()); 1916 PrevUBVal = CGF.EmitScalarConversion( 1917 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(), 1918 LS.getIterationVariable()->getType(), SourceLocation()); 1919 1920 CGF.EmitStoreOfScalar(PrevLBVal, LB); 1921 CGF.EmitStoreOfScalar(PrevUBVal, UB); 1922 1923 return {LB, UB}; 1924 } 1925 1926 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then 1927 /// we need to use the LB and UB expressions generated by the worksharing 1928 /// code generation support, whereas in non combined situations we would 1929 /// just emit 0 and the LastIteration expression 1930 /// This function is necessary due to the difference of the LB and UB 1931 /// types for the RT emission routines for 'for_static_init' and 1932 /// 'for_dispatch_init' 1933 static std::pair<llvm::Value *, llvm::Value *> 1934 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF, 1935 const OMPExecutableDirective &S, 1936 Address LB, Address UB) { 1937 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 1938 const Expr *IVExpr = LS.getIterationVariable(); 1939 // when implementing a dynamic schedule for a 'for' combined with a 1940 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop 1941 // is not normalized as each team only executes its own assigned 1942 // distribute chunk 1943 QualType IteratorTy = IVExpr->getType(); 1944 llvm::Value *LBVal = CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, 1945 SourceLocation()); 1946 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, 1947 SourceLocation()); 1948 return {LBVal, UBVal}; 1949 } 1950 1951 static void emitDistributeParallelForDistributeInnerBoundParams( 1952 CodeGenFunction &CGF, const OMPExecutableDirective &S, 1953 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) { 1954 const auto &Dir = cast<OMPLoopDirective>(S); 1955 LValue LB = 1956 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable())); 1957 auto LBCast = CGF.Builder.CreateIntCast( 1958 CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false); 1959 CapturedVars.push_back(LBCast); 1960 LValue UB = 1961 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable())); 1962 1963 auto UBCast = CGF.Builder.CreateIntCast( 1964 CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false); 1965 CapturedVars.push_back(UBCast); 1966 } 1967 1968 static void 1969 emitInnerParallelForWhenCombined(CodeGenFunction &CGF, 1970 const OMPLoopDirective &S, 1971 CodeGenFunction::JumpDest LoopExit) { 1972 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF, 1973 PrePostActionTy &) { 1974 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(), 1975 emitDistributeParallelForInnerBounds, 1976 emitDistributeParallelForDispatchBounds); 1977 }; 1978 1979 emitCommonOMPParallelDirective( 1980 CGF, S, OMPD_for, CGInlinedWorksharingLoop, 1981 emitDistributeParallelForDistributeInnerBoundParams); 1982 } 1983 1984 void CodeGenFunction::EmitOMPDistributeParallelForDirective( 1985 const OMPDistributeParallelForDirective &S) { 1986 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 1987 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined, 1988 S.getDistInc()); 1989 }; 1990 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 1991 OMPCancelStackRAII CancelRegion(*this, OMPD_distribute_parallel_for, 1992 /*HasCancel=*/false); 1993 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen, 1994 /*HasCancel=*/false); 1995 } 1996 1997 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective( 1998 const OMPDistributeParallelForSimdDirective &S) { 1999 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2000 CGM.getOpenMPRuntime().emitInlinedDirective( 2001 *this, OMPD_distribute_parallel_for_simd, 2002 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2003 OMPLoopScope PreInitScope(CGF, S); 2004 CGF.EmitStmt( 2005 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2006 }); 2007 } 2008 2009 void CodeGenFunction::EmitOMPDistributeSimdDirective( 2010 const OMPDistributeSimdDirective &S) { 2011 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2012 CGM.getOpenMPRuntime().emitInlinedDirective( 2013 *this, OMPD_distribute_simd, 2014 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2015 OMPLoopScope PreInitScope(CGF, S); 2016 CGF.EmitStmt( 2017 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2018 }); 2019 } 2020 2021 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective( 2022 const OMPTargetParallelForSimdDirective &S) { 2023 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2024 CGM.getOpenMPRuntime().emitInlinedDirective( 2025 *this, OMPD_target_parallel_for_simd, 2026 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2027 OMPLoopScope PreInitScope(CGF, S); 2028 CGF.EmitStmt( 2029 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2030 }); 2031 } 2032 2033 void CodeGenFunction::EmitOMPTargetSimdDirective( 2034 const OMPTargetSimdDirective &S) { 2035 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2036 CGM.getOpenMPRuntime().emitInlinedDirective( 2037 *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2038 OMPLoopScope PreInitScope(CGF, S); 2039 CGF.EmitStmt( 2040 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2041 }); 2042 } 2043 2044 void CodeGenFunction::EmitOMPTeamsDistributeDirective( 2045 const OMPTeamsDistributeDirective &S) { 2046 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2047 CGM.getOpenMPRuntime().emitInlinedDirective( 2048 *this, OMPD_teams_distribute, 2049 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2050 OMPLoopScope PreInitScope(CGF, S); 2051 CGF.EmitStmt( 2052 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2053 }); 2054 } 2055 2056 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective( 2057 const OMPTeamsDistributeSimdDirective &S) { 2058 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2059 CGM.getOpenMPRuntime().emitInlinedDirective( 2060 *this, OMPD_teams_distribute_simd, 2061 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2062 OMPLoopScope PreInitScope(CGF, S); 2063 CGF.EmitStmt( 2064 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2065 }); 2066 } 2067 2068 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective( 2069 const OMPTeamsDistributeParallelForSimdDirective &S) { 2070 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2071 CGM.getOpenMPRuntime().emitInlinedDirective( 2072 *this, OMPD_teams_distribute_parallel_for_simd, 2073 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2074 OMPLoopScope PreInitScope(CGF, S); 2075 CGF.EmitStmt( 2076 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2077 }); 2078 } 2079 2080 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective( 2081 const OMPTeamsDistributeParallelForDirective &S) { 2082 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2083 CGM.getOpenMPRuntime().emitInlinedDirective( 2084 *this, OMPD_teams_distribute_parallel_for, 2085 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2086 OMPLoopScope PreInitScope(CGF, S); 2087 CGF.EmitStmt( 2088 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2089 }); 2090 } 2091 2092 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective( 2093 const OMPTargetTeamsDistributeDirective &S) { 2094 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2095 CGM.getOpenMPRuntime().emitInlinedDirective( 2096 *this, OMPD_target_teams_distribute, 2097 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2098 CGF.EmitStmt( 2099 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2100 }); 2101 } 2102 2103 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective( 2104 const OMPTargetTeamsDistributeParallelForDirective &S) { 2105 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2106 CGM.getOpenMPRuntime().emitInlinedDirective( 2107 *this, OMPD_target_teams_distribute_parallel_for, 2108 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2109 CGF.EmitStmt( 2110 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2111 }); 2112 } 2113 2114 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective( 2115 const OMPTargetTeamsDistributeParallelForSimdDirective &S) { 2116 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2117 CGM.getOpenMPRuntime().emitInlinedDirective( 2118 *this, OMPD_target_teams_distribute_parallel_for_simd, 2119 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2120 CGF.EmitStmt( 2121 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2122 }); 2123 } 2124 2125 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective( 2126 const OMPTargetTeamsDistributeSimdDirective &S) { 2127 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2128 CGM.getOpenMPRuntime().emitInlinedDirective( 2129 *this, OMPD_target_teams_distribute_simd, 2130 [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2131 CGF.EmitStmt( 2132 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2133 }); 2134 } 2135 2136 namespace { 2137 struct ScheduleKindModifiersTy { 2138 OpenMPScheduleClauseKind Kind; 2139 OpenMPScheduleClauseModifier M1; 2140 OpenMPScheduleClauseModifier M2; 2141 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 2142 OpenMPScheduleClauseModifier M1, 2143 OpenMPScheduleClauseModifier M2) 2144 : Kind(Kind), M1(M1), M2(M2) {} 2145 }; 2146 } // namespace 2147 2148 bool CodeGenFunction::EmitOMPWorksharingLoop( 2149 const OMPLoopDirective &S, Expr *EUB, 2150 const CodeGenLoopBoundsTy &CodeGenLoopBounds, 2151 const CodeGenDispatchBoundsTy &CGDispatchBounds) { 2152 // Emit the loop iteration variable. 2153 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 2154 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 2155 EmitVarDecl(*IVDecl); 2156 2157 // Emit the iterations count variable. 2158 // If it is not a variable, Sema decided to calculate iterations count on each 2159 // iteration (e.g., it is foldable into a constant). 2160 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 2161 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 2162 // Emit calculation of the iterations count. 2163 EmitIgnoredExpr(S.getCalcLastIteration()); 2164 } 2165 2166 auto &RT = CGM.getOpenMPRuntime(); 2167 2168 bool HasLastprivateClause; 2169 // Check pre-condition. 2170 { 2171 OMPLoopScope PreInitScope(*this, S); 2172 // Skip the entire loop if we don't meet the precondition. 2173 // If the condition constant folds and can be elided, avoid emitting the 2174 // whole loop. 2175 bool CondConstant; 2176 llvm::BasicBlock *ContBlock = nullptr; 2177 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 2178 if (!CondConstant) 2179 return false; 2180 } else { 2181 auto *ThenBlock = createBasicBlock("omp.precond.then"); 2182 ContBlock = createBasicBlock("omp.precond.end"); 2183 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 2184 getProfileCount(&S)); 2185 EmitBlock(ThenBlock); 2186 incrementProfileCounter(&S); 2187 } 2188 2189 bool Ordered = false; 2190 if (auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) { 2191 if (OrderedClause->getNumForLoops()) 2192 RT.emitDoacrossInit(*this, S); 2193 else 2194 Ordered = true; 2195 } 2196 2197 llvm::DenseSet<const Expr *> EmittedFinals; 2198 emitAlignedClause(*this, S); 2199 bool HasLinears = EmitOMPLinearClauseInit(S); 2200 // Emit helper vars inits. 2201 2202 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S); 2203 LValue LB = Bounds.first; 2204 LValue UB = Bounds.second; 2205 LValue ST = 2206 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 2207 LValue IL = 2208 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 2209 2210 // Emit 'then' code. 2211 { 2212 OMPPrivateScope LoopScope(*this); 2213 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) { 2214 // Emit implicit barrier to synchronize threads and avoid data races on 2215 // initialization of firstprivate variables and post-update of 2216 // lastprivate variables. 2217 CGM.getOpenMPRuntime().emitBarrierCall( 2218 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 2219 /*ForceSimpleCall=*/true); 2220 } 2221 EmitOMPPrivateClause(S, LoopScope); 2222 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 2223 EmitOMPReductionClauseInit(S, LoopScope); 2224 EmitOMPPrivateLoopCounters(S, LoopScope); 2225 EmitOMPLinearClause(S, LoopScope); 2226 (void)LoopScope.Privatize(); 2227 2228 // Detect the loop schedule kind and chunk. 2229 llvm::Value *Chunk = nullptr; 2230 OpenMPScheduleTy ScheduleKind; 2231 if (auto *C = S.getSingleClause<OMPScheduleClause>()) { 2232 ScheduleKind.Schedule = C->getScheduleKind(); 2233 ScheduleKind.M1 = C->getFirstScheduleModifier(); 2234 ScheduleKind.M2 = C->getSecondScheduleModifier(); 2235 if (const auto *Ch = C->getChunkSize()) { 2236 Chunk = EmitScalarExpr(Ch); 2237 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 2238 S.getIterationVariable()->getType(), 2239 S.getLocStart()); 2240 } 2241 } 2242 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 2243 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 2244 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 2245 // If the static schedule kind is specified or if the ordered clause is 2246 // specified, and if no monotonic modifier is specified, the effect will 2247 // be as if the monotonic modifier was specified. 2248 if (RT.isStaticNonchunked(ScheduleKind.Schedule, 2249 /* Chunked */ Chunk != nullptr) && 2250 !Ordered) { 2251 if (isOpenMPSimdDirective(S.getDirectiveKind())) 2252 EmitOMPSimdInit(S, /*IsMonotonic=*/true); 2253 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 2254 // When no chunk_size is specified, the iteration space is divided into 2255 // chunks that are approximately equal in size, and at most one chunk is 2256 // distributed to each thread. Note that the size of the chunks is 2257 // unspecified in this case. 2258 CGOpenMPRuntime::StaticRTInput StaticInit( 2259 IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(), 2260 UB.getAddress(), ST.getAddress()); 2261 RT.emitForStaticInit(*this, S.getLocStart(), S.getDirectiveKind(), 2262 ScheduleKind, StaticInit); 2263 auto LoopExit = 2264 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 2265 // UB = min(UB, GlobalUB); 2266 EmitIgnoredExpr(S.getEnsureUpperBound()); 2267 // IV = LB; 2268 EmitIgnoredExpr(S.getInit()); 2269 // while (idx <= UB) { BODY; ++idx; } 2270 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 2271 S.getInc(), 2272 [&S, LoopExit](CodeGenFunction &CGF) { 2273 CGF.EmitOMPLoopBody(S, LoopExit); 2274 CGF.EmitStopPoint(&S); 2275 }, 2276 [](CodeGenFunction &) {}); 2277 EmitBlock(LoopExit.getBlock()); 2278 // Tell the runtime we are done. 2279 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2280 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(), 2281 S.getDirectiveKind()); 2282 }; 2283 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen); 2284 } else { 2285 const bool IsMonotonic = 2286 Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static || 2287 ScheduleKind.Schedule == OMPC_SCHEDULE_unknown || 2288 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 2289 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 2290 // Emit the outer loop, which requests its work chunk [LB..UB] from 2291 // runtime and runs the inner loop to process it. 2292 const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(), 2293 ST.getAddress(), IL.getAddress(), 2294 Chunk, EUB); 2295 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 2296 LoopArguments, CGDispatchBounds); 2297 } 2298 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 2299 EmitOMPSimdFinal(S, 2300 [&](CodeGenFunction &CGF) -> llvm::Value * { 2301 return CGF.Builder.CreateIsNotNull( 2302 CGF.EmitLoadOfScalar(IL, S.getLocStart())); 2303 }); 2304 } 2305 EmitOMPReductionClauseFinal( 2306 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind()) 2307 ? /*Parallel and Simd*/ OMPD_parallel_for_simd 2308 : /*Parallel only*/ OMPD_parallel); 2309 // Emit post-update of the reduction variables if IsLastIter != 0. 2310 emitPostUpdateForReductionClause( 2311 *this, S, [&](CodeGenFunction &CGF) -> llvm::Value * { 2312 return CGF.Builder.CreateIsNotNull( 2313 CGF.EmitLoadOfScalar(IL, S.getLocStart())); 2314 }); 2315 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2316 if (HasLastprivateClause) 2317 EmitOMPLastprivateClauseFinal( 2318 S, isOpenMPSimdDirective(S.getDirectiveKind()), 2319 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 2320 } 2321 EmitOMPLinearClauseFinal(S, [&](CodeGenFunction &CGF) -> llvm::Value * { 2322 return CGF.Builder.CreateIsNotNull( 2323 CGF.EmitLoadOfScalar(IL, S.getLocStart())); 2324 }); 2325 // We're now done with the loop, so jump to the continuation block. 2326 if (ContBlock) { 2327 EmitBranch(ContBlock); 2328 EmitBlock(ContBlock, true); 2329 } 2330 } 2331 return HasLastprivateClause; 2332 } 2333 2334 /// The following two functions generate expressions for the loop lower 2335 /// and upper bounds in case of static and dynamic (dispatch) schedule 2336 /// of the associated 'for' or 'distribute' loop. 2337 static std::pair<LValue, LValue> 2338 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) { 2339 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2340 LValue LB = 2341 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable())); 2342 LValue UB = 2343 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable())); 2344 return {LB, UB}; 2345 } 2346 2347 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not 2348 /// consider the lower and upper bound expressions generated by the 2349 /// worksharing loop support, but we use 0 and the iteration space size as 2350 /// constants 2351 static std::pair<llvm::Value *, llvm::Value *> 2352 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S, 2353 Address LB, Address UB) { 2354 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S); 2355 const Expr *IVExpr = LS.getIterationVariable(); 2356 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType()); 2357 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0); 2358 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration()); 2359 return {LBVal, UBVal}; 2360 } 2361 2362 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 2363 bool HasLastprivates = false; 2364 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2365 PrePostActionTy &) { 2366 OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel()); 2367 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2368 emitForLoopBounds, 2369 emitDispatchForLoopBounds); 2370 }; 2371 { 2372 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2373 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 2374 S.hasCancel()); 2375 } 2376 2377 // Emit an implicit barrier at the end. 2378 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 2379 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 2380 } 2381 } 2382 2383 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 2384 bool HasLastprivates = false; 2385 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF, 2386 PrePostActionTy &) { 2387 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), 2388 emitForLoopBounds, 2389 emitDispatchForLoopBounds); 2390 }; 2391 { 2392 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2393 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 2394 } 2395 2396 // Emit an implicit barrier at the end. 2397 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 2398 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 2399 } 2400 } 2401 2402 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 2403 const Twine &Name, 2404 llvm::Value *Init = nullptr) { 2405 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 2406 if (Init) 2407 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true); 2408 return LVal; 2409 } 2410 2411 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 2412 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 2413 auto *CS = dyn_cast<CompoundStmt>(Stmt); 2414 bool HasLastprivates = false; 2415 auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF, 2416 PrePostActionTy &) { 2417 auto &C = CGF.CGM.getContext(); 2418 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 2419 // Emit helper vars inits. 2420 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 2421 CGF.Builder.getInt32(0)); 2422 auto *GlobalUBVal = CS != nullptr ? CGF.Builder.getInt32(CS->size() - 1) 2423 : CGF.Builder.getInt32(0); 2424 LValue UB = 2425 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 2426 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 2427 CGF.Builder.getInt32(1)); 2428 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 2429 CGF.Builder.getInt32(0)); 2430 // Loop counter. 2431 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 2432 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 2433 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 2434 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 2435 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 2436 // Generate condition for loop. 2437 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 2438 OK_Ordinary, S.getLocStart(), FPOptions()); 2439 // Increment for loop counter. 2440 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary, 2441 S.getLocStart()); 2442 auto BodyGen = [Stmt, CS, &S, &IV](CodeGenFunction &CGF) { 2443 // Iterate through all sections and emit a switch construct: 2444 // switch (IV) { 2445 // case 0: 2446 // <SectionStmt[0]>; 2447 // break; 2448 // ... 2449 // case <NumSection> - 1: 2450 // <SectionStmt[<NumSection> - 1]>; 2451 // break; 2452 // } 2453 // .omp.sections.exit: 2454 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 2455 auto *SwitchStmt = CGF.Builder.CreateSwitch( 2456 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 2457 CS == nullptr ? 1 : CS->size()); 2458 if (CS) { 2459 unsigned CaseNumber = 0; 2460 for (auto *SubStmt : CS->children()) { 2461 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2462 CGF.EmitBlock(CaseBB); 2463 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 2464 CGF.EmitStmt(SubStmt); 2465 CGF.EmitBranch(ExitBB); 2466 ++CaseNumber; 2467 } 2468 } else { 2469 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 2470 CGF.EmitBlock(CaseBB); 2471 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB); 2472 CGF.EmitStmt(Stmt); 2473 CGF.EmitBranch(ExitBB); 2474 } 2475 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2476 }; 2477 2478 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 2479 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 2480 // Emit implicit barrier to synchronize threads and avoid data races on 2481 // initialization of firstprivate variables and post-update of lastprivate 2482 // variables. 2483 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 2484 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 2485 /*ForceSimpleCall=*/true); 2486 } 2487 CGF.EmitOMPPrivateClause(S, LoopScope); 2488 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 2489 CGF.EmitOMPReductionClauseInit(S, LoopScope); 2490 (void)LoopScope.Privatize(); 2491 2492 // Emit static non-chunked loop. 2493 OpenMPScheduleTy ScheduleKind; 2494 ScheduleKind.Schedule = OMPC_SCHEDULE_static; 2495 CGOpenMPRuntime::StaticRTInput StaticInit( 2496 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 2497 LB.getAddress(), UB.getAddress(), ST.getAddress()); 2498 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 2499 CGF, S.getLocStart(), S.getDirectiveKind(), ScheduleKind, StaticInit); 2500 // UB = min(UB, GlobalUB); 2501 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 2502 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 2503 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 2504 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 2505 // IV = LB; 2506 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 2507 // while (idx <= UB) { BODY; ++idx; } 2508 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 2509 [](CodeGenFunction &) {}); 2510 // Tell the runtime we are done. 2511 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2512 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(), 2513 S.getDirectiveKind()); 2514 }; 2515 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen); 2516 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 2517 // Emit post-update of the reduction variables if IsLastIter != 0. 2518 emitPostUpdateForReductionClause( 2519 CGF, S, [&](CodeGenFunction &CGF) -> llvm::Value * { 2520 return CGF.Builder.CreateIsNotNull( 2521 CGF.EmitLoadOfScalar(IL, S.getLocStart())); 2522 }); 2523 2524 // Emit final copy of the lastprivate variables if IsLastIter != 0. 2525 if (HasLastprivates) 2526 CGF.EmitOMPLastprivateClauseFinal( 2527 S, /*NoFinals=*/false, 2528 CGF.Builder.CreateIsNotNull( 2529 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 2530 }; 2531 2532 bool HasCancel = false; 2533 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 2534 HasCancel = OSD->hasCancel(); 2535 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 2536 HasCancel = OPSD->hasCancel(); 2537 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel); 2538 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 2539 HasCancel); 2540 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 2541 // clause. Otherwise the barrier will be generated by the codegen for the 2542 // directive. 2543 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 2544 // Emit implicit barrier to synchronize threads and avoid data races on 2545 // initialization of firstprivate variables. 2546 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 2547 OMPD_unknown); 2548 } 2549 } 2550 2551 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 2552 { 2553 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2554 EmitSections(S); 2555 } 2556 // Emit an implicit barrier at the end. 2557 if (!S.getSingleClause<OMPNowaitClause>()) { 2558 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 2559 OMPD_sections); 2560 } 2561 } 2562 2563 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 2564 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2565 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2566 }; 2567 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2568 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 2569 S.hasCancel()); 2570 } 2571 2572 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 2573 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 2574 llvm::SmallVector<const Expr *, 8> DestExprs; 2575 llvm::SmallVector<const Expr *, 8> SrcExprs; 2576 llvm::SmallVector<const Expr *, 8> AssignmentOps; 2577 // Check if there are any 'copyprivate' clauses associated with this 2578 // 'single' construct. 2579 // Build a list of copyprivate variables along with helper expressions 2580 // (<source>, <destination>, <destination>=<source> expressions) 2581 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 2582 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 2583 DestExprs.append(C->destination_exprs().begin(), 2584 C->destination_exprs().end()); 2585 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 2586 AssignmentOps.append(C->assignment_ops().begin(), 2587 C->assignment_ops().end()); 2588 } 2589 // Emit code for 'single' region along with 'copyprivate' clauses 2590 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2591 Action.Enter(CGF); 2592 OMPPrivateScope SingleScope(CGF); 2593 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope); 2594 CGF.EmitOMPPrivateClause(S, SingleScope); 2595 (void)SingleScope.Privatize(); 2596 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2597 }; 2598 { 2599 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2600 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 2601 CopyprivateVars, DestExprs, 2602 SrcExprs, AssignmentOps); 2603 } 2604 // Emit an implicit barrier at the end (to avoid data race on firstprivate 2605 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 2606 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) { 2607 CGM.getOpenMPRuntime().emitBarrierCall( 2608 *this, S.getLocStart(), 2609 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 2610 } 2611 } 2612 2613 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 2614 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2615 Action.Enter(CGF); 2616 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2617 }; 2618 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2619 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 2620 } 2621 2622 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 2623 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2624 Action.Enter(CGF); 2625 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2626 }; 2627 Expr *Hint = nullptr; 2628 if (auto *HintClause = S.getSingleClause<OMPHintClause>()) 2629 Hint = HintClause->getHint(); 2630 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 2631 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 2632 S.getDirectiveName().getAsString(), 2633 CodeGen, S.getLocStart(), Hint); 2634 } 2635 2636 void CodeGenFunction::EmitOMPParallelForDirective( 2637 const OMPParallelForDirective &S) { 2638 // Emit directive as a combined directive that consists of two implicit 2639 // directives: 'parallel' with 'for' directive. 2640 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2641 OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel()); 2642 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 2643 emitDispatchForLoopBounds); 2644 }; 2645 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen, 2646 emitEmptyBoundParameters); 2647 } 2648 2649 void CodeGenFunction::EmitOMPParallelForSimdDirective( 2650 const OMPParallelForSimdDirective &S) { 2651 // Emit directive as a combined directive that consists of two implicit 2652 // directives: 'parallel' with 'for' directive. 2653 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2654 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds, 2655 emitDispatchForLoopBounds); 2656 }; 2657 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen, 2658 emitEmptyBoundParameters); 2659 } 2660 2661 void CodeGenFunction::EmitOMPParallelSectionsDirective( 2662 const OMPParallelSectionsDirective &S) { 2663 // Emit directive as a combined directive that consists of two implicit 2664 // directives: 'parallel' with 'sections' directive. 2665 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 2666 CGF.EmitSections(S); 2667 }; 2668 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen, 2669 emitEmptyBoundParameters); 2670 } 2671 2672 void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S, 2673 const RegionCodeGenTy &BodyGen, 2674 const TaskGenTy &TaskGen, 2675 OMPTaskDataTy &Data) { 2676 // Emit outlined function for task construct. 2677 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2678 auto *I = CS->getCapturedDecl()->param_begin(); 2679 auto *PartId = std::next(I); 2680 auto *TaskT = std::next(I, 4); 2681 // Check if the task is final 2682 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 2683 // If the condition constant folds and can be elided, try to avoid emitting 2684 // the condition and the dead arm of the if/else. 2685 auto *Cond = Clause->getCondition(); 2686 bool CondConstant; 2687 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 2688 Data.Final.setInt(CondConstant); 2689 else 2690 Data.Final.setPointer(EvaluateExprAsBool(Cond)); 2691 } else { 2692 // By default the task is not final. 2693 Data.Final.setInt(/*IntVal=*/false); 2694 } 2695 // Check if the task has 'priority' clause. 2696 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) { 2697 auto *Prio = Clause->getPriority(); 2698 Data.Priority.setInt(/*IntVal=*/true); 2699 Data.Priority.setPointer(EmitScalarConversion( 2700 EmitScalarExpr(Prio), Prio->getType(), 2701 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1), 2702 Prio->getExprLoc())); 2703 } 2704 // The first function argument for tasks is a thread id, the second one is a 2705 // part id (0 for tied tasks, >=0 for untied task). 2706 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 2707 // Get list of private variables. 2708 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 2709 auto IRef = C->varlist_begin(); 2710 for (auto *IInit : C->private_copies()) { 2711 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 2712 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 2713 Data.PrivateVars.push_back(*IRef); 2714 Data.PrivateCopies.push_back(IInit); 2715 } 2716 ++IRef; 2717 } 2718 } 2719 EmittedAsPrivate.clear(); 2720 // Get list of firstprivate variables. 2721 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 2722 auto IRef = C->varlist_begin(); 2723 auto IElemInitRef = C->inits().begin(); 2724 for (auto *IInit : C->private_copies()) { 2725 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 2726 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 2727 Data.FirstprivateVars.push_back(*IRef); 2728 Data.FirstprivateCopies.push_back(IInit); 2729 Data.FirstprivateInits.push_back(*IElemInitRef); 2730 } 2731 ++IRef; 2732 ++IElemInitRef; 2733 } 2734 } 2735 // Get list of lastprivate variables (for taskloops). 2736 llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs; 2737 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) { 2738 auto IRef = C->varlist_begin(); 2739 auto ID = C->destination_exprs().begin(); 2740 for (auto *IInit : C->private_copies()) { 2741 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 2742 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 2743 Data.LastprivateVars.push_back(*IRef); 2744 Data.LastprivateCopies.push_back(IInit); 2745 } 2746 LastprivateDstsOrigs.insert( 2747 {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()), 2748 cast<DeclRefExpr>(*IRef)}); 2749 ++IRef; 2750 ++ID; 2751 } 2752 } 2753 SmallVector<const Expr *, 4> LHSs; 2754 SmallVector<const Expr *, 4> RHSs; 2755 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) { 2756 auto IPriv = C->privates().begin(); 2757 auto IRed = C->reduction_ops().begin(); 2758 auto ILHS = C->lhs_exprs().begin(); 2759 auto IRHS = C->rhs_exprs().begin(); 2760 for (const auto *Ref : C->varlists()) { 2761 Data.ReductionVars.emplace_back(Ref); 2762 Data.ReductionCopies.emplace_back(*IPriv); 2763 Data.ReductionOps.emplace_back(*IRed); 2764 LHSs.emplace_back(*ILHS); 2765 RHSs.emplace_back(*IRHS); 2766 std::advance(IPriv, 1); 2767 std::advance(IRed, 1); 2768 std::advance(ILHS, 1); 2769 std::advance(IRHS, 1); 2770 } 2771 } 2772 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit( 2773 *this, S.getLocStart(), LHSs, RHSs, Data); 2774 // Build list of dependences. 2775 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) 2776 for (auto *IRef : C->varlists()) 2777 Data.Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef)); 2778 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs]( 2779 CodeGenFunction &CGF, PrePostActionTy &Action) { 2780 // Set proper addresses for generated private copies. 2781 OMPPrivateScope Scope(CGF); 2782 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() || 2783 !Data.LastprivateVars.empty()) { 2784 enum { PrivatesParam = 2, CopyFnParam = 3 }; 2785 auto *CopyFn = CGF.Builder.CreateLoad( 2786 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3))); 2787 auto *PrivatesPtr = CGF.Builder.CreateLoad( 2788 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2))); 2789 // Map privates. 2790 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs; 2791 llvm::SmallVector<llvm::Value *, 16> CallArgs; 2792 CallArgs.push_back(PrivatesPtr); 2793 for (auto *E : Data.PrivateVars) { 2794 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2795 Address PrivatePtr = CGF.CreateMemTemp( 2796 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr"); 2797 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 2798 CallArgs.push_back(PrivatePtr.getPointer()); 2799 } 2800 for (auto *E : Data.FirstprivateVars) { 2801 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2802 Address PrivatePtr = 2803 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 2804 ".firstpriv.ptr.addr"); 2805 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 2806 CallArgs.push_back(PrivatePtr.getPointer()); 2807 } 2808 for (auto *E : Data.LastprivateVars) { 2809 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2810 Address PrivatePtr = 2811 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()), 2812 ".lastpriv.ptr.addr"); 2813 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 2814 CallArgs.push_back(PrivatePtr.getPointer()); 2815 } 2816 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(), 2817 CopyFn, CallArgs); 2818 for (auto &&Pair : LastprivateDstsOrigs) { 2819 auto *OrigVD = cast<VarDecl>(Pair.second->getDecl()); 2820 DeclRefExpr DRE( 2821 const_cast<VarDecl *>(OrigVD), 2822 /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup( 2823 OrigVD) != nullptr, 2824 Pair.second->getType(), VK_LValue, Pair.second->getExprLoc()); 2825 Scope.addPrivate(Pair.first, [&CGF, &DRE]() { 2826 return CGF.EmitLValue(&DRE).getAddress(); 2827 }); 2828 } 2829 for (auto &&Pair : PrivatePtrs) { 2830 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 2831 CGF.getContext().getDeclAlign(Pair.first)); 2832 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 2833 } 2834 } 2835 if (Data.Reductions) { 2836 OMPLexicalScope LexScope(CGF, S, /*AsInlined=*/true); 2837 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionCopies, 2838 Data.ReductionOps); 2839 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad( 2840 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9))); 2841 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) { 2842 RedCG.emitSharedLValue(CGF, Cnt); 2843 RedCG.emitAggregateType(CGF, Cnt); 2844 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 2845 CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 2846 Replacement = 2847 Address(CGF.EmitScalarConversion( 2848 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 2849 CGF.getContext().getPointerType( 2850 Data.ReductionCopies[Cnt]->getType()), 2851 SourceLocation()), 2852 Replacement.getAlignment()); 2853 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 2854 Scope.addPrivate(RedCG.getBaseDecl(Cnt), 2855 [Replacement]() { return Replacement; }); 2856 // FIXME: This must removed once the runtime library is fixed. 2857 // Emit required threadprivate variables for 2858 // initilizer/combiner/finalizer. 2859 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(), 2860 RedCG, Cnt); 2861 } 2862 } 2863 // Privatize all private variables except for in_reduction items. 2864 (void)Scope.Privatize(); 2865 SmallVector<const Expr *, 4> InRedVars; 2866 SmallVector<const Expr *, 4> InRedPrivs; 2867 SmallVector<const Expr *, 4> InRedOps; 2868 SmallVector<const Expr *, 4> TaskgroupDescriptors; 2869 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) { 2870 auto IPriv = C->privates().begin(); 2871 auto IRed = C->reduction_ops().begin(); 2872 auto ITD = C->taskgroup_descriptors().begin(); 2873 for (const auto *Ref : C->varlists()) { 2874 InRedVars.emplace_back(Ref); 2875 InRedPrivs.emplace_back(*IPriv); 2876 InRedOps.emplace_back(*IRed); 2877 TaskgroupDescriptors.emplace_back(*ITD); 2878 std::advance(IPriv, 1); 2879 std::advance(IRed, 1); 2880 std::advance(ITD, 1); 2881 } 2882 } 2883 // Privatize in_reduction items here, because taskgroup descriptors must be 2884 // privatized earlier. 2885 OMPPrivateScope InRedScope(CGF); 2886 if (!InRedVars.empty()) { 2887 ReductionCodeGen RedCG(InRedVars, InRedPrivs, InRedOps); 2888 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) { 2889 RedCG.emitSharedLValue(CGF, Cnt); 2890 RedCG.emitAggregateType(CGF, Cnt); 2891 // The taskgroup descriptor variable is always implicit firstprivate and 2892 // privatized already during procoessing of the firstprivates. 2893 llvm::Value *ReductionsPtr = CGF.EmitLoadOfScalar( 2894 CGF.EmitLValue(TaskgroupDescriptors[Cnt]), SourceLocation()); 2895 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem( 2896 CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt)); 2897 Replacement = Address( 2898 CGF.EmitScalarConversion( 2899 Replacement.getPointer(), CGF.getContext().VoidPtrTy, 2900 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()), 2901 SourceLocation()), 2902 Replacement.getAlignment()); 2903 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement); 2904 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), 2905 [Replacement]() { return Replacement; }); 2906 // FIXME: This must removed once the runtime library is fixed. 2907 // Emit required threadprivate variables for 2908 // initilizer/combiner/finalizer. 2909 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(), 2910 RedCG, Cnt); 2911 } 2912 } 2913 (void)InRedScope.Privatize(); 2914 2915 Action.Enter(CGF); 2916 BodyGen(CGF); 2917 }; 2918 auto *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 2919 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied, 2920 Data.NumberOfParts); 2921 OMPLexicalScope Scope(*this, S); 2922 TaskGen(*this, OutlinedFn, Data); 2923 } 2924 2925 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 2926 // Emit outlined function for task construct. 2927 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2928 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 2929 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 2930 const Expr *IfCond = nullptr; 2931 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2932 if (C->getNameModifier() == OMPD_unknown || 2933 C->getNameModifier() == OMPD_task) { 2934 IfCond = C->getCondition(); 2935 break; 2936 } 2937 } 2938 2939 OMPTaskDataTy Data; 2940 // Check if we should emit tied or untied task. 2941 Data.Tied = !S.getSingleClause<OMPUntiedClause>(); 2942 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 2943 CGF.EmitStmt(CS->getCapturedStmt()); 2944 }; 2945 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 2946 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn, 2947 const OMPTaskDataTy &Data) { 2948 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn, 2949 SharedsTy, CapturedStruct, IfCond, 2950 Data); 2951 }; 2952 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data); 2953 } 2954 2955 void CodeGenFunction::EmitOMPTaskyieldDirective( 2956 const OMPTaskyieldDirective &S) { 2957 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 2958 } 2959 2960 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 2961 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 2962 } 2963 2964 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 2965 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 2966 } 2967 2968 void CodeGenFunction::EmitOMPTaskgroupDirective( 2969 const OMPTaskgroupDirective &S) { 2970 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 2971 Action.Enter(CGF); 2972 if (const Expr *E = S.getReductionRef()) { 2973 SmallVector<const Expr *, 4> LHSs; 2974 SmallVector<const Expr *, 4> RHSs; 2975 OMPTaskDataTy Data; 2976 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) { 2977 auto IPriv = C->privates().begin(); 2978 auto IRed = C->reduction_ops().begin(); 2979 auto ILHS = C->lhs_exprs().begin(); 2980 auto IRHS = C->rhs_exprs().begin(); 2981 for (const auto *Ref : C->varlists()) { 2982 Data.ReductionVars.emplace_back(Ref); 2983 Data.ReductionCopies.emplace_back(*IPriv); 2984 Data.ReductionOps.emplace_back(*IRed); 2985 LHSs.emplace_back(*ILHS); 2986 RHSs.emplace_back(*IRHS); 2987 std::advance(IPriv, 1); 2988 std::advance(IRed, 1); 2989 std::advance(ILHS, 1); 2990 std::advance(IRHS, 1); 2991 } 2992 } 2993 llvm::Value *ReductionDesc = 2994 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getLocStart(), 2995 LHSs, RHSs, Data); 2996 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 2997 CGF.EmitVarDecl(*VD); 2998 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD), 2999 /*Volatile=*/false, E->getType()); 3000 } 3001 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 3002 }; 3003 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 3004 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart()); 3005 } 3006 3007 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 3008 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 3009 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) { 3010 return llvm::makeArrayRef(FlushClause->varlist_begin(), 3011 FlushClause->varlist_end()); 3012 } 3013 return llvm::None; 3014 }(), S.getLocStart()); 3015 } 3016 3017 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, 3018 const CodeGenLoopTy &CodeGenLoop, 3019 Expr *IncExpr) { 3020 // Emit the loop iteration variable. 3021 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 3022 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 3023 EmitVarDecl(*IVDecl); 3024 3025 // Emit the iterations count variable. 3026 // If it is not a variable, Sema decided to calculate iterations count on each 3027 // iteration (e.g., it is foldable into a constant). 3028 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 3029 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 3030 // Emit calculation of the iterations count. 3031 EmitIgnoredExpr(S.getCalcLastIteration()); 3032 } 3033 3034 auto &RT = CGM.getOpenMPRuntime(); 3035 3036 bool HasLastprivateClause = false; 3037 // Check pre-condition. 3038 { 3039 OMPLoopScope PreInitScope(*this, S); 3040 // Skip the entire loop if we don't meet the precondition. 3041 // If the condition constant folds and can be elided, avoid emitting the 3042 // whole loop. 3043 bool CondConstant; 3044 llvm::BasicBlock *ContBlock = nullptr; 3045 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 3046 if (!CondConstant) 3047 return; 3048 } else { 3049 auto *ThenBlock = createBasicBlock("omp.precond.then"); 3050 ContBlock = createBasicBlock("omp.precond.end"); 3051 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 3052 getProfileCount(&S)); 3053 EmitBlock(ThenBlock); 3054 incrementProfileCounter(&S); 3055 } 3056 3057 // Emit 'then' code. 3058 { 3059 // Emit helper vars inits. 3060 3061 LValue LB = EmitOMPHelperVar( 3062 *this, cast<DeclRefExpr>( 3063 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3064 ? S.getCombinedLowerBoundVariable() 3065 : S.getLowerBoundVariable()))); 3066 LValue UB = EmitOMPHelperVar( 3067 *this, cast<DeclRefExpr>( 3068 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3069 ? S.getCombinedUpperBoundVariable() 3070 : S.getUpperBoundVariable()))); 3071 LValue ST = 3072 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 3073 LValue IL = 3074 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 3075 3076 OMPPrivateScope LoopScope(*this); 3077 if (EmitOMPFirstprivateClause(S, LoopScope)) { 3078 // Emit implicit barrier to synchronize threads and avoid data races on 3079 // initialization of firstprivate variables and post-update of 3080 // lastprivate variables. 3081 CGM.getOpenMPRuntime().emitBarrierCall( 3082 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 3083 /*ForceSimpleCall=*/true); 3084 } 3085 EmitOMPPrivateClause(S, LoopScope); 3086 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 3087 EmitOMPPrivateLoopCounters(S, LoopScope); 3088 (void)LoopScope.Privatize(); 3089 3090 // Detect the distribute schedule kind and chunk. 3091 llvm::Value *Chunk = nullptr; 3092 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown; 3093 if (auto *C = S.getSingleClause<OMPDistScheduleClause>()) { 3094 ScheduleKind = C->getDistScheduleKind(); 3095 if (const auto *Ch = C->getChunkSize()) { 3096 Chunk = EmitScalarExpr(Ch); 3097 Chunk = EmitScalarConversion(Chunk, Ch->getType(), 3098 S.getIterationVariable()->getType(), 3099 S.getLocStart()); 3100 } 3101 } 3102 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 3103 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 3104 3105 // OpenMP [2.10.8, distribute Construct, Description] 3106 // If dist_schedule is specified, kind must be static. If specified, 3107 // iterations are divided into chunks of size chunk_size, chunks are 3108 // assigned to the teams of the league in a round-robin fashion in the 3109 // order of the team number. When no chunk_size is specified, the 3110 // iteration space is divided into chunks that are approximately equal 3111 // in size, and at most one chunk is distributed to each team of the 3112 // league. The size of the chunks is unspecified in this case. 3113 if (RT.isStaticNonchunked(ScheduleKind, 3114 /* Chunked */ Chunk != nullptr)) { 3115 CGOpenMPRuntime::StaticRTInput StaticInit( 3116 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(), 3117 LB.getAddress(), UB.getAddress(), ST.getAddress()); 3118 RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, 3119 StaticInit); 3120 auto LoopExit = 3121 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 3122 // UB = min(UB, GlobalUB); 3123 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3124 ? S.getCombinedEnsureUpperBound() 3125 : S.getEnsureUpperBound()); 3126 // IV = LB; 3127 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3128 ? S.getCombinedInit() 3129 : S.getInit()); 3130 3131 Expr *Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()) 3132 ? S.getCombinedCond() 3133 : S.getCond(); 3134 3135 // for distribute alone, codegen 3136 // while (idx <= UB) { BODY; ++idx; } 3137 // when combined with 'for' (e.g. as in 'distribute parallel for') 3138 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; } 3139 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr, 3140 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) { 3141 CodeGenLoop(CGF, S, LoopExit); 3142 }, 3143 [](CodeGenFunction &) {}); 3144 EmitBlock(LoopExit.getBlock()); 3145 // Tell the runtime we are done. 3146 RT.emitForStaticFinish(*this, S.getLocStart(), S.getDirectiveKind()); 3147 } else { 3148 // Emit the outer loop, which requests its work chunk [LB..UB] from 3149 // runtime and runs the inner loop to process it. 3150 const OMPLoopArguments LoopArguments = { 3151 LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(), 3152 Chunk}; 3153 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, 3154 CodeGenLoop); 3155 } 3156 3157 // Emit final copy of the lastprivate variables if IsLastIter != 0. 3158 if (HasLastprivateClause) 3159 EmitOMPLastprivateClauseFinal( 3160 S, /*NoFinals=*/false, 3161 Builder.CreateIsNotNull( 3162 EmitLoadOfScalar(IL, S.getLocStart()))); 3163 } 3164 3165 // We're now done with the loop, so jump to the continuation block. 3166 if (ContBlock) { 3167 EmitBranch(ContBlock); 3168 EmitBlock(ContBlock, true); 3169 } 3170 } 3171 } 3172 3173 void CodeGenFunction::EmitOMPDistributeDirective( 3174 const OMPDistributeDirective &S) { 3175 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3176 3177 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc()); 3178 }; 3179 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 3180 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen, 3181 false); 3182 } 3183 3184 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 3185 const CapturedStmt *S) { 3186 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 3187 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 3188 CGF.CapturedStmtInfo = &CapStmtInfo; 3189 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S); 3190 Fn->addFnAttr(llvm::Attribute::NoInline); 3191 return Fn; 3192 } 3193 3194 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 3195 if (!S.getAssociatedStmt()) { 3196 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) 3197 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC); 3198 return; 3199 } 3200 auto *C = S.getSingleClause<OMPSIMDClause>(); 3201 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF, 3202 PrePostActionTy &Action) { 3203 if (C) { 3204 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 3205 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 3206 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 3207 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS); 3208 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(), 3209 OutlinedFn, CapturedVars); 3210 } else { 3211 Action.Enter(CGF); 3212 CGF.EmitStmt( 3213 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 3214 } 3215 }; 3216 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 3217 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C); 3218 } 3219 3220 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 3221 QualType SrcType, QualType DestType, 3222 SourceLocation Loc) { 3223 assert(CGF.hasScalarEvaluationKind(DestType) && 3224 "DestType must have scalar evaluation kind."); 3225 assert(!Val.isAggregate() && "Must be a scalar or complex."); 3226 return Val.isScalar() 3227 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType, 3228 Loc) 3229 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 3230 DestType, Loc); 3231 } 3232 3233 static CodeGenFunction::ComplexPairTy 3234 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 3235 QualType DestType, SourceLocation Loc) { 3236 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 3237 "DestType must have complex evaluation kind."); 3238 CodeGenFunction::ComplexPairTy ComplexVal; 3239 if (Val.isScalar()) { 3240 // Convert the input element to the element type of the complex. 3241 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 3242 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 3243 DestElementType, Loc); 3244 ComplexVal = CodeGenFunction::ComplexPairTy( 3245 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 3246 } else { 3247 assert(Val.isComplex() && "Must be a scalar or complex."); 3248 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 3249 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 3250 ComplexVal.first = CGF.EmitScalarConversion( 3251 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 3252 ComplexVal.second = CGF.EmitScalarConversion( 3253 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 3254 } 3255 return ComplexVal; 3256 } 3257 3258 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 3259 LValue LVal, RValue RVal) { 3260 if (LVal.isGlobalReg()) { 3261 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 3262 } else { 3263 CGF.EmitAtomicStore(RVal, LVal, 3264 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 3265 : llvm::AtomicOrdering::Monotonic, 3266 LVal.isVolatile(), /*IsInit=*/false); 3267 } 3268 } 3269 3270 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal, 3271 QualType RValTy, SourceLocation Loc) { 3272 switch (getEvaluationKind(LVal.getType())) { 3273 case TEK_Scalar: 3274 EmitStoreThroughLValue(RValue::get(convertToScalarValue( 3275 *this, RVal, RValTy, LVal.getType(), Loc)), 3276 LVal); 3277 break; 3278 case TEK_Complex: 3279 EmitStoreOfComplex( 3280 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal, 3281 /*isInit=*/false); 3282 break; 3283 case TEK_Aggregate: 3284 llvm_unreachable("Must be a scalar or complex."); 3285 } 3286 } 3287 3288 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 3289 const Expr *X, const Expr *V, 3290 SourceLocation Loc) { 3291 // v = x; 3292 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 3293 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 3294 LValue XLValue = CGF.EmitLValue(X); 3295 LValue VLValue = CGF.EmitLValue(V); 3296 RValue Res = XLValue.isGlobalReg() 3297 ? CGF.EmitLoadOfLValue(XLValue, Loc) 3298 : CGF.EmitAtomicLoad( 3299 XLValue, Loc, 3300 IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 3301 : llvm::AtomicOrdering::Monotonic, 3302 XLValue.isVolatile()); 3303 // OpenMP, 2.12.6, atomic Construct 3304 // Any atomic construct with a seq_cst clause forces the atomically 3305 // performed operation to include an implicit flush operation without a 3306 // list. 3307 if (IsSeqCst) 3308 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 3309 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc); 3310 } 3311 3312 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 3313 const Expr *X, const Expr *E, 3314 SourceLocation Loc) { 3315 // x = expr; 3316 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 3317 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 3318 // OpenMP, 2.12.6, atomic Construct 3319 // Any atomic construct with a seq_cst clause forces the atomically 3320 // performed operation to include an implicit flush operation without a 3321 // list. 3322 if (IsSeqCst) 3323 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 3324 } 3325 3326 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 3327 RValue Update, 3328 BinaryOperatorKind BO, 3329 llvm::AtomicOrdering AO, 3330 bool IsXLHSInRHSPart) { 3331 auto &Context = CGF.CGM.getContext(); 3332 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 3333 // expression is simple and atomic is allowed for the given type for the 3334 // target platform. 3335 if (BO == BO_Comma || !Update.isScalar() || 3336 !Update.getScalarVal()->getType()->isIntegerTy() || 3337 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 3338 (Update.getScalarVal()->getType() != 3339 X.getAddress().getElementType())) || 3340 !X.getAddress().getElementType()->isIntegerTy() || 3341 !Context.getTargetInfo().hasBuiltinAtomic( 3342 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 3343 return std::make_pair(false, RValue::get(nullptr)); 3344 3345 llvm::AtomicRMWInst::BinOp RMWOp; 3346 switch (BO) { 3347 case BO_Add: 3348 RMWOp = llvm::AtomicRMWInst::Add; 3349 break; 3350 case BO_Sub: 3351 if (!IsXLHSInRHSPart) 3352 return std::make_pair(false, RValue::get(nullptr)); 3353 RMWOp = llvm::AtomicRMWInst::Sub; 3354 break; 3355 case BO_And: 3356 RMWOp = llvm::AtomicRMWInst::And; 3357 break; 3358 case BO_Or: 3359 RMWOp = llvm::AtomicRMWInst::Or; 3360 break; 3361 case BO_Xor: 3362 RMWOp = llvm::AtomicRMWInst::Xor; 3363 break; 3364 case BO_LT: 3365 RMWOp = X.getType()->hasSignedIntegerRepresentation() 3366 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 3367 : llvm::AtomicRMWInst::Max) 3368 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 3369 : llvm::AtomicRMWInst::UMax); 3370 break; 3371 case BO_GT: 3372 RMWOp = X.getType()->hasSignedIntegerRepresentation() 3373 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 3374 : llvm::AtomicRMWInst::Min) 3375 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 3376 : llvm::AtomicRMWInst::UMin); 3377 break; 3378 case BO_Assign: 3379 RMWOp = llvm::AtomicRMWInst::Xchg; 3380 break; 3381 case BO_Mul: 3382 case BO_Div: 3383 case BO_Rem: 3384 case BO_Shl: 3385 case BO_Shr: 3386 case BO_LAnd: 3387 case BO_LOr: 3388 return std::make_pair(false, RValue::get(nullptr)); 3389 case BO_PtrMemD: 3390 case BO_PtrMemI: 3391 case BO_LE: 3392 case BO_GE: 3393 case BO_EQ: 3394 case BO_NE: 3395 case BO_AddAssign: 3396 case BO_SubAssign: 3397 case BO_AndAssign: 3398 case BO_OrAssign: 3399 case BO_XorAssign: 3400 case BO_MulAssign: 3401 case BO_DivAssign: 3402 case BO_RemAssign: 3403 case BO_ShlAssign: 3404 case BO_ShrAssign: 3405 case BO_Comma: 3406 llvm_unreachable("Unsupported atomic update operation"); 3407 } 3408 auto *UpdateVal = Update.getScalarVal(); 3409 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 3410 UpdateVal = CGF.Builder.CreateIntCast( 3411 IC, X.getAddress().getElementType(), 3412 X.getType()->hasSignedIntegerRepresentation()); 3413 } 3414 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO); 3415 return std::make_pair(true, RValue::get(Res)); 3416 } 3417 3418 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 3419 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 3420 llvm::AtomicOrdering AO, SourceLocation Loc, 3421 const llvm::function_ref<RValue(RValue)> &CommonGen) { 3422 // Update expressions are allowed to have the following forms: 3423 // x binop= expr; -> xrval + expr; 3424 // x++, ++x -> xrval + 1; 3425 // x--, --x -> xrval - 1; 3426 // x = x binop expr; -> xrval binop expr 3427 // x = expr Op x; - > expr binop xrval; 3428 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 3429 if (!Res.first) { 3430 if (X.isGlobalReg()) { 3431 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 3432 // 'xrval'. 3433 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 3434 } else { 3435 // Perform compare-and-swap procedure. 3436 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 3437 } 3438 } 3439 return Res; 3440 } 3441 3442 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 3443 const Expr *X, const Expr *E, 3444 const Expr *UE, bool IsXLHSInRHSPart, 3445 SourceLocation Loc) { 3446 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 3447 "Update expr in 'atomic update' must be a binary operator."); 3448 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 3449 // Update expressions are allowed to have the following forms: 3450 // x binop= expr; -> xrval + expr; 3451 // x++, ++x -> xrval + 1; 3452 // x--, --x -> xrval - 1; 3453 // x = x binop expr; -> xrval binop expr 3454 // x = expr Op x; - > expr binop xrval; 3455 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 3456 LValue XLValue = CGF.EmitLValue(X); 3457 RValue ExprRValue = CGF.EmitAnyExpr(E); 3458 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 3459 : llvm::AtomicOrdering::Monotonic; 3460 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 3461 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 3462 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 3463 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 3464 auto Gen = 3465 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 3466 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 3467 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 3468 return CGF.EmitAnyExpr(UE); 3469 }; 3470 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 3471 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 3472 // OpenMP, 2.12.6, atomic Construct 3473 // Any atomic construct with a seq_cst clause forces the atomically 3474 // performed operation to include an implicit flush operation without a 3475 // list. 3476 if (IsSeqCst) 3477 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 3478 } 3479 3480 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 3481 QualType SourceType, QualType ResType, 3482 SourceLocation Loc) { 3483 switch (CGF.getEvaluationKind(ResType)) { 3484 case TEK_Scalar: 3485 return RValue::get( 3486 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 3487 case TEK_Complex: { 3488 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 3489 return RValue::getComplex(Res.first, Res.second); 3490 } 3491 case TEK_Aggregate: 3492 break; 3493 } 3494 llvm_unreachable("Must be a scalar or complex."); 3495 } 3496 3497 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 3498 bool IsPostfixUpdate, const Expr *V, 3499 const Expr *X, const Expr *E, 3500 const Expr *UE, bool IsXLHSInRHSPart, 3501 SourceLocation Loc) { 3502 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 3503 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 3504 RValue NewVVal; 3505 LValue VLValue = CGF.EmitLValue(V); 3506 LValue XLValue = CGF.EmitLValue(X); 3507 RValue ExprRValue = CGF.EmitAnyExpr(E); 3508 auto AO = IsSeqCst ? llvm::AtomicOrdering::SequentiallyConsistent 3509 : llvm::AtomicOrdering::Monotonic; 3510 QualType NewVValType; 3511 if (UE) { 3512 // 'x' is updated with some additional value. 3513 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 3514 "Update expr in 'atomic capture' must be a binary operator."); 3515 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 3516 // Update expressions are allowed to have the following forms: 3517 // x binop= expr; -> xrval + expr; 3518 // x++, ++x -> xrval + 1; 3519 // x--, --x -> xrval - 1; 3520 // x = x binop expr; -> xrval binop expr 3521 // x = expr Op x; - > expr binop xrval; 3522 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 3523 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 3524 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 3525 NewVValType = XRValExpr->getType(); 3526 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 3527 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 3528 IsPostfixUpdate](RValue XRValue) -> RValue { 3529 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 3530 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 3531 RValue Res = CGF.EmitAnyExpr(UE); 3532 NewVVal = IsPostfixUpdate ? XRValue : Res; 3533 return Res; 3534 }; 3535 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 3536 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 3537 if (Res.first) { 3538 // 'atomicrmw' instruction was generated. 3539 if (IsPostfixUpdate) { 3540 // Use old value from 'atomicrmw'. 3541 NewVVal = Res.second; 3542 } else { 3543 // 'atomicrmw' does not provide new value, so evaluate it using old 3544 // value of 'x'. 3545 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 3546 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 3547 NewVVal = CGF.EmitAnyExpr(UE); 3548 } 3549 } 3550 } else { 3551 // 'x' is simply rewritten with some 'expr'. 3552 NewVValType = X->getType().getNonReferenceType(); 3553 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 3554 X->getType().getNonReferenceType(), Loc); 3555 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) -> RValue { 3556 NewVVal = XRValue; 3557 return ExprRValue; 3558 }; 3559 // Try to perform atomicrmw xchg, otherwise simple exchange. 3560 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 3561 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 3562 Loc, Gen); 3563 if (Res.first) { 3564 // 'atomicrmw' instruction was generated. 3565 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 3566 } 3567 } 3568 // Emit post-update store to 'v' of old/new 'x' value. 3569 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc); 3570 // OpenMP, 2.12.6, atomic Construct 3571 // Any atomic construct with a seq_cst clause forces the atomically 3572 // performed operation to include an implicit flush operation without a 3573 // list. 3574 if (IsSeqCst) 3575 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 3576 } 3577 3578 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 3579 bool IsSeqCst, bool IsPostfixUpdate, 3580 const Expr *X, const Expr *V, const Expr *E, 3581 const Expr *UE, bool IsXLHSInRHSPart, 3582 SourceLocation Loc) { 3583 switch (Kind) { 3584 case OMPC_read: 3585 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 3586 break; 3587 case OMPC_write: 3588 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 3589 break; 3590 case OMPC_unknown: 3591 case OMPC_update: 3592 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 3593 break; 3594 case OMPC_capture: 3595 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 3596 IsXLHSInRHSPart, Loc); 3597 break; 3598 case OMPC_if: 3599 case OMPC_final: 3600 case OMPC_num_threads: 3601 case OMPC_private: 3602 case OMPC_firstprivate: 3603 case OMPC_lastprivate: 3604 case OMPC_reduction: 3605 case OMPC_task_reduction: 3606 case OMPC_in_reduction: 3607 case OMPC_safelen: 3608 case OMPC_simdlen: 3609 case OMPC_collapse: 3610 case OMPC_default: 3611 case OMPC_seq_cst: 3612 case OMPC_shared: 3613 case OMPC_linear: 3614 case OMPC_aligned: 3615 case OMPC_copyin: 3616 case OMPC_copyprivate: 3617 case OMPC_flush: 3618 case OMPC_proc_bind: 3619 case OMPC_schedule: 3620 case OMPC_ordered: 3621 case OMPC_nowait: 3622 case OMPC_untied: 3623 case OMPC_threadprivate: 3624 case OMPC_depend: 3625 case OMPC_mergeable: 3626 case OMPC_device: 3627 case OMPC_threads: 3628 case OMPC_simd: 3629 case OMPC_map: 3630 case OMPC_num_teams: 3631 case OMPC_thread_limit: 3632 case OMPC_priority: 3633 case OMPC_grainsize: 3634 case OMPC_nogroup: 3635 case OMPC_num_tasks: 3636 case OMPC_hint: 3637 case OMPC_dist_schedule: 3638 case OMPC_defaultmap: 3639 case OMPC_uniform: 3640 case OMPC_to: 3641 case OMPC_from: 3642 case OMPC_use_device_ptr: 3643 case OMPC_is_device_ptr: 3644 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 3645 } 3646 } 3647 3648 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 3649 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>(); 3650 OpenMPClauseKind Kind = OMPC_unknown; 3651 for (auto *C : S.clauses()) { 3652 // Find first clause (skip seq_cst clause, if it is first). 3653 if (C->getClauseKind() != OMPC_seq_cst) { 3654 Kind = C->getClauseKind(); 3655 break; 3656 } 3657 } 3658 3659 const auto *CS = 3660 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 3661 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 3662 enterFullExpression(EWC); 3663 } 3664 // Processing for statements under 'atomic capture'. 3665 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 3666 for (const auto *C : Compound->body()) { 3667 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 3668 enterFullExpression(EWC); 3669 } 3670 } 3671 } 3672 3673 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF, 3674 PrePostActionTy &) { 3675 CGF.EmitStopPoint(CS); 3676 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 3677 S.getV(), S.getExpr(), S.getUpdateExpr(), 3678 S.isXLHSInRHSPart(), S.getLocStart()); 3679 }; 3680 OMPLexicalScope Scope(*this, S, /*AsInlined=*/true); 3681 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 3682 } 3683 3684 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF, 3685 const OMPExecutableDirective &S, 3686 const RegionCodeGenTy &CodeGen) { 3687 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind())); 3688 CodeGenModule &CGM = CGF.CGM; 3689 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt()); 3690 3691 llvm::Function *Fn = nullptr; 3692 llvm::Constant *FnID = nullptr; 3693 3694 const Expr *IfCond = nullptr; 3695 // Check for the at most one if clause associated with the target region. 3696 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 3697 if (C->getNameModifier() == OMPD_unknown || 3698 C->getNameModifier() == OMPD_target) { 3699 IfCond = C->getCondition(); 3700 break; 3701 } 3702 } 3703 3704 // Check if we have any device clause associated with the directive. 3705 const Expr *Device = nullptr; 3706 if (auto *C = S.getSingleClause<OMPDeviceClause>()) { 3707 Device = C->getDevice(); 3708 } 3709 3710 // Check if we have an if clause whose conditional always evaluates to false 3711 // or if we do not have any targets specified. If so the target region is not 3712 // an offload entry point. 3713 bool IsOffloadEntry = true; 3714 if (IfCond) { 3715 bool Val; 3716 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 3717 IsOffloadEntry = false; 3718 } 3719 if (CGM.getLangOpts().OMPTargetTriples.empty()) 3720 IsOffloadEntry = false; 3721 3722 assert(CGF.CurFuncDecl && "No parent declaration for target region!"); 3723 StringRef ParentName; 3724 // In case we have Ctors/Dtors we use the complete type variant to produce 3725 // the mangling of the device outlined kernel. 3726 if (auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl)) 3727 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 3728 else if (auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl)) 3729 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 3730 else 3731 ParentName = 3732 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl))); 3733 3734 // Emit target region as a standalone region. 3735 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 3736 IsOffloadEntry, CodeGen); 3737 OMPLexicalScope Scope(CGF, S); 3738 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 3739 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); 3740 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device, 3741 CapturedVars); 3742 } 3743 3744 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S, 3745 PrePostActionTy &Action) { 3746 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 3747 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3748 CGF.EmitOMPPrivateClause(S, PrivateScope); 3749 (void)PrivateScope.Privatize(); 3750 3751 Action.Enter(CGF); 3752 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 3753 } 3754 3755 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM, 3756 StringRef ParentName, 3757 const OMPTargetDirective &S) { 3758 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3759 emitTargetRegion(CGF, S, Action); 3760 }; 3761 llvm::Function *Fn; 3762 llvm::Constant *Addr; 3763 // Emit target region as a standalone region. 3764 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3765 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3766 assert(Fn && Addr && "Target device function emission failed."); 3767 } 3768 3769 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 3770 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3771 emitTargetRegion(CGF, S, Action); 3772 }; 3773 emitCommonOMPTargetDirective(*this, S, CodeGen); 3774 } 3775 3776 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF, 3777 const OMPExecutableDirective &S, 3778 OpenMPDirectiveKind InnermostKind, 3779 const RegionCodeGenTy &CodeGen) { 3780 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams); 3781 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction( 3782 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 3783 3784 const OMPNumTeamsClause *NT = S.getSingleClause<OMPNumTeamsClause>(); 3785 const OMPThreadLimitClause *TL = S.getSingleClause<OMPThreadLimitClause>(); 3786 if (NT || TL) { 3787 Expr *NumTeams = (NT) ? NT->getNumTeams() : nullptr; 3788 Expr *ThreadLimit = (TL) ? TL->getThreadLimit() : nullptr; 3789 3790 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit, 3791 S.getLocStart()); 3792 } 3793 3794 OMPTeamsScope Scope(CGF, S); 3795 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 3796 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 3797 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn, 3798 CapturedVars); 3799 } 3800 3801 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) { 3802 // Emit teams region as a standalone region. 3803 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3804 OMPPrivateScope PrivateScope(CGF); 3805 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 3806 CGF.EmitOMPPrivateClause(S, PrivateScope); 3807 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 3808 (void)PrivateScope.Privatize(); 3809 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 3810 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams); 3811 }; 3812 emitCommonOMPTeamsDirective(*this, S, OMPD_teams, CodeGen); 3813 emitPostUpdateForReductionClause( 3814 *this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 3815 } 3816 3817 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action, 3818 const OMPTargetTeamsDirective &S) { 3819 auto *CS = S.getCapturedStmt(OMPD_teams); 3820 Action.Enter(CGF); 3821 auto &&CodeGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) { 3822 // TODO: Add support for clauses. 3823 CGF.EmitStmt(CS->getCapturedStmt()); 3824 }; 3825 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen); 3826 } 3827 3828 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 3829 CodeGenModule &CGM, StringRef ParentName, 3830 const OMPTargetTeamsDirective &S) { 3831 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3832 emitTargetTeamsRegion(CGF, Action, S); 3833 }; 3834 llvm::Function *Fn; 3835 llvm::Constant *Addr; 3836 // Emit target region as a standalone region. 3837 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 3838 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 3839 assert(Fn && Addr && "Target device function emission failed."); 3840 } 3841 3842 void CodeGenFunction::EmitOMPTargetTeamsDirective( 3843 const OMPTargetTeamsDirective &S) { 3844 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 3845 emitTargetTeamsRegion(CGF, Action, S); 3846 }; 3847 emitCommonOMPTargetDirective(*this, S, CodeGen); 3848 } 3849 3850 void CodeGenFunction::EmitOMPCancellationPointDirective( 3851 const OMPCancellationPointDirective &S) { 3852 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(), 3853 S.getCancelRegion()); 3854 } 3855 3856 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 3857 const Expr *IfCond = nullptr; 3858 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 3859 if (C->getNameModifier() == OMPD_unknown || 3860 C->getNameModifier() == OMPD_cancel) { 3861 IfCond = C->getCondition(); 3862 break; 3863 } 3864 } 3865 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond, 3866 S.getCancelRegion()); 3867 } 3868 3869 CodeGenFunction::JumpDest 3870 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 3871 if (Kind == OMPD_parallel || Kind == OMPD_task || 3872 Kind == OMPD_target_parallel) 3873 return ReturnBlock; 3874 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 3875 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for || 3876 Kind == OMPD_distribute_parallel_for || 3877 Kind == OMPD_target_parallel_for); 3878 return OMPCancelStack.getExitBlock(); 3879 } 3880 3881 void CodeGenFunction::EmitOMPUseDevicePtrClause( 3882 const OMPClause &NC, OMPPrivateScope &PrivateScope, 3883 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) { 3884 const auto &C = cast<OMPUseDevicePtrClause>(NC); 3885 auto OrigVarIt = C.varlist_begin(); 3886 auto InitIt = C.inits().begin(); 3887 for (auto PvtVarIt : C.private_copies()) { 3888 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl()); 3889 auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl()); 3890 auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl()); 3891 3892 // In order to identify the right initializer we need to match the 3893 // declaration used by the mapping logic. In some cases we may get 3894 // OMPCapturedExprDecl that refers to the original declaration. 3895 const ValueDecl *MatchingVD = OrigVD; 3896 if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) { 3897 // OMPCapturedExprDecl are used to privative fields of the current 3898 // structure. 3899 auto *ME = cast<MemberExpr>(OED->getInit()); 3900 assert(isa<CXXThisExpr>(ME->getBase()) && 3901 "Base should be the current struct!"); 3902 MatchingVD = ME->getMemberDecl(); 3903 } 3904 3905 // If we don't have information about the current list item, move on to 3906 // the next one. 3907 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD); 3908 if (InitAddrIt == CaptureDeviceAddrMap.end()) 3909 continue; 3910 3911 bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 3912 // Initialize the temporary initialization variable with the address we 3913 // get from the runtime library. We have to cast the source address 3914 // because it is always a void *. References are materialized in the 3915 // privatization scope, so the initialization here disregards the fact 3916 // the original variable is a reference. 3917 QualType AddrQTy = 3918 getContext().getPointerType(OrigVD->getType().getNonReferenceType()); 3919 llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy); 3920 Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy); 3921 setAddrOfLocalVar(InitVD, InitAddr); 3922 3923 // Emit private declaration, it will be initialized by the value we 3924 // declaration we just added to the local declarations map. 3925 EmitDecl(*PvtVD); 3926 3927 // The initialization variables reached its purpose in the emission 3928 // ofthe previous declaration, so we don't need it anymore. 3929 LocalDeclMap.erase(InitVD); 3930 3931 // Return the address of the private variable. 3932 return GetAddrOfLocalVar(PvtVD); 3933 }); 3934 assert(IsRegistered && "firstprivate var already registered as private"); 3935 // Silence the warning about unused variable. 3936 (void)IsRegistered; 3937 3938 ++OrigVarIt; 3939 ++InitIt; 3940 } 3941 } 3942 3943 // Generate the instructions for '#pragma omp target data' directive. 3944 void CodeGenFunction::EmitOMPTargetDataDirective( 3945 const OMPTargetDataDirective &S) { 3946 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true); 3947 3948 // Create a pre/post action to signal the privatization of the device pointer. 3949 // This action can be replaced by the OpenMP runtime code generation to 3950 // deactivate privatization. 3951 bool PrivatizeDevicePointers = false; 3952 class DevicePointerPrivActionTy : public PrePostActionTy { 3953 bool &PrivatizeDevicePointers; 3954 3955 public: 3956 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers) 3957 : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {} 3958 void Enter(CodeGenFunction &CGF) override { 3959 PrivatizeDevicePointers = true; 3960 } 3961 }; 3962 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers); 3963 3964 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers]( 3965 CodeGenFunction &CGF, PrePostActionTy &Action) { 3966 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) { 3967 CGF.EmitStmt( 3968 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 3969 }; 3970 3971 // Codegen that selects wheather to generate the privatization code or not. 3972 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers, 3973 &InnermostCodeGen](CodeGenFunction &CGF, 3974 PrePostActionTy &Action) { 3975 RegionCodeGenTy RCG(InnermostCodeGen); 3976 PrivatizeDevicePointers = false; 3977 3978 // Call the pre-action to change the status of PrivatizeDevicePointers if 3979 // needed. 3980 Action.Enter(CGF); 3981 3982 if (PrivatizeDevicePointers) { 3983 OMPPrivateScope PrivateScope(CGF); 3984 // Emit all instances of the use_device_ptr clause. 3985 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>()) 3986 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope, 3987 Info.CaptureDeviceAddrMap); 3988 (void)PrivateScope.Privatize(); 3989 RCG(CGF); 3990 } else 3991 RCG(CGF); 3992 }; 3993 3994 // Forward the provided action to the privatization codegen. 3995 RegionCodeGenTy PrivRCG(PrivCodeGen); 3996 PrivRCG.setAction(Action); 3997 3998 // Notwithstanding the body of the region is emitted as inlined directive, 3999 // we don't use an inline scope as changes in the references inside the 4000 // region are expected to be visible outside, so we do not privative them. 4001 OMPLexicalScope Scope(CGF, S); 4002 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, 4003 PrivRCG); 4004 }; 4005 4006 RegionCodeGenTy RCG(CodeGen); 4007 4008 // If we don't have target devices, don't bother emitting the data mapping 4009 // code. 4010 if (CGM.getLangOpts().OMPTargetTriples.empty()) { 4011 RCG(*this); 4012 return; 4013 } 4014 4015 // Check if we have any if clause associated with the directive. 4016 const Expr *IfCond = nullptr; 4017 if (auto *C = S.getSingleClause<OMPIfClause>()) 4018 IfCond = C->getCondition(); 4019 4020 // Check if we have any device clause associated with the directive. 4021 const Expr *Device = nullptr; 4022 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4023 Device = C->getDevice(); 4024 4025 // Set the action to signal privatization of device pointers. 4026 RCG.setAction(PrivAction); 4027 4028 // Emit region code. 4029 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG, 4030 Info); 4031 } 4032 4033 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 4034 const OMPTargetEnterDataDirective &S) { 4035 // If we don't have target devices, don't bother emitting the data mapping 4036 // code. 4037 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4038 return; 4039 4040 // Check if we have any if clause associated with the directive. 4041 const Expr *IfCond = nullptr; 4042 if (auto *C = S.getSingleClause<OMPIfClause>()) 4043 IfCond = C->getCondition(); 4044 4045 // Check if we have any device clause associated with the directive. 4046 const Expr *Device = nullptr; 4047 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4048 Device = C->getDevice(); 4049 4050 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 4051 } 4052 4053 void CodeGenFunction::EmitOMPTargetExitDataDirective( 4054 const OMPTargetExitDataDirective &S) { 4055 // If we don't have target devices, don't bother emitting the data mapping 4056 // code. 4057 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4058 return; 4059 4060 // Check if we have any if clause associated with the directive. 4061 const Expr *IfCond = nullptr; 4062 if (auto *C = S.getSingleClause<OMPIfClause>()) 4063 IfCond = C->getCondition(); 4064 4065 // Check if we have any device clause associated with the directive. 4066 const Expr *Device = nullptr; 4067 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4068 Device = C->getDevice(); 4069 4070 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 4071 } 4072 4073 static void emitTargetParallelRegion(CodeGenFunction &CGF, 4074 const OMPTargetParallelDirective &S, 4075 PrePostActionTy &Action) { 4076 // Get the captured statement associated with the 'parallel' region. 4077 auto *CS = S.getCapturedStmt(OMPD_parallel); 4078 Action.Enter(CGF); 4079 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &) { 4080 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 4081 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope); 4082 CGF.EmitOMPPrivateClause(S, PrivateScope); 4083 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 4084 (void)PrivateScope.Privatize(); 4085 // TODO: Add support for clauses. 4086 CGF.EmitStmt(CS->getCapturedStmt()); 4087 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel); 4088 }; 4089 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen, 4090 emitEmptyBoundParameters); 4091 emitPostUpdateForReductionClause( 4092 CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; }); 4093 } 4094 4095 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 4096 CodeGenModule &CGM, StringRef ParentName, 4097 const OMPTargetParallelDirective &S) { 4098 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4099 emitTargetParallelRegion(CGF, S, Action); 4100 }; 4101 llvm::Function *Fn; 4102 llvm::Constant *Addr; 4103 // Emit target region as a standalone region. 4104 CGM.getOpenMPRuntime().emitTargetOutlinedFunction( 4105 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen); 4106 assert(Fn && Addr && "Target device function emission failed."); 4107 } 4108 4109 void CodeGenFunction::EmitOMPTargetParallelDirective( 4110 const OMPTargetParallelDirective &S) { 4111 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) { 4112 emitTargetParallelRegion(CGF, S, Action); 4113 }; 4114 emitCommonOMPTargetDirective(*this, S, CodeGen); 4115 } 4116 4117 void CodeGenFunction::EmitOMPTargetParallelForDirective( 4118 const OMPTargetParallelForDirective &S) { 4119 // TODO: codegen for target parallel for. 4120 } 4121 4122 /// Emit a helper variable and return corresponding lvalue. 4123 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper, 4124 const ImplicitParamDecl *PVD, 4125 CodeGenFunction::OMPPrivateScope &Privates) { 4126 auto *VDecl = cast<VarDecl>(Helper->getDecl()); 4127 Privates.addPrivate( 4128 VDecl, [&CGF, PVD]() -> Address { return CGF.GetAddrOfLocalVar(PVD); }); 4129 } 4130 4131 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) { 4132 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind())); 4133 // Emit outlined function for task construct. 4134 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 4135 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 4136 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 4137 const Expr *IfCond = nullptr; 4138 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 4139 if (C->getNameModifier() == OMPD_unknown || 4140 C->getNameModifier() == OMPD_taskloop) { 4141 IfCond = C->getCondition(); 4142 break; 4143 } 4144 } 4145 4146 OMPTaskDataTy Data; 4147 // Check if taskloop must be emitted without taskgroup. 4148 Data.Nogroup = S.getSingleClause<OMPNogroupClause>(); 4149 // TODO: Check if we should emit tied or untied task. 4150 Data.Tied = true; 4151 // Set scheduling for taskloop 4152 if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) { 4153 // grainsize clause 4154 Data.Schedule.setInt(/*IntVal=*/false); 4155 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize())); 4156 } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) { 4157 // num_tasks clause 4158 Data.Schedule.setInt(/*IntVal=*/true); 4159 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks())); 4160 } 4161 4162 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) { 4163 // if (PreCond) { 4164 // for (IV in 0..LastIteration) BODY; 4165 // <Final counter/linear vars updates>; 4166 // } 4167 // 4168 4169 // Emit: if (PreCond) - begin. 4170 // If the condition constant folds and can be elided, avoid emitting the 4171 // whole loop. 4172 bool CondConstant; 4173 llvm::BasicBlock *ContBlock = nullptr; 4174 OMPLoopScope PreInitScope(CGF, S); 4175 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 4176 if (!CondConstant) 4177 return; 4178 } else { 4179 auto *ThenBlock = CGF.createBasicBlock("taskloop.if.then"); 4180 ContBlock = CGF.createBasicBlock("taskloop.if.end"); 4181 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 4182 CGF.getProfileCount(&S)); 4183 CGF.EmitBlock(ThenBlock); 4184 CGF.incrementProfileCounter(&S); 4185 } 4186 4187 if (isOpenMPSimdDirective(S.getDirectiveKind())) 4188 CGF.EmitOMPSimdInit(S); 4189 4190 OMPPrivateScope LoopScope(CGF); 4191 // Emit helper vars inits. 4192 enum { LowerBound = 5, UpperBound, Stride, LastIter }; 4193 auto *I = CS->getCapturedDecl()->param_begin(); 4194 auto *LBP = std::next(I, LowerBound); 4195 auto *UBP = std::next(I, UpperBound); 4196 auto *STP = std::next(I, Stride); 4197 auto *LIP = std::next(I, LastIter); 4198 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP, 4199 LoopScope); 4200 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP, 4201 LoopScope); 4202 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope); 4203 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP, 4204 LoopScope); 4205 CGF.EmitOMPPrivateLoopCounters(S, LoopScope); 4206 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 4207 (void)LoopScope.Privatize(); 4208 // Emit the loop iteration variable. 4209 const Expr *IVExpr = S.getIterationVariable(); 4210 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 4211 CGF.EmitVarDecl(*IVDecl); 4212 CGF.EmitIgnoredExpr(S.getInit()); 4213 4214 // Emit the iterations count variable. 4215 // If it is not a variable, Sema decided to calculate iterations count on 4216 // each iteration (e.g., it is foldable into a constant). 4217 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 4218 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 4219 // Emit calculation of the iterations count. 4220 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 4221 } 4222 4223 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 4224 S.getInc(), 4225 [&S](CodeGenFunction &CGF) { 4226 CGF.EmitOMPLoopBody(S, JumpDest()); 4227 CGF.EmitStopPoint(&S); 4228 }, 4229 [](CodeGenFunction &) {}); 4230 // Emit: if (PreCond) - end. 4231 if (ContBlock) { 4232 CGF.EmitBranch(ContBlock); 4233 CGF.EmitBlock(ContBlock, true); 4234 } 4235 // Emit final copy of the lastprivate variables if IsLastIter != 0. 4236 if (HasLastprivateClause) { 4237 CGF.EmitOMPLastprivateClauseFinal( 4238 S, isOpenMPSimdDirective(S.getDirectiveKind()), 4239 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar( 4240 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false, 4241 (*LIP)->getType(), S.getLocStart()))); 4242 } 4243 }; 4244 auto &&TaskGen = [&S, SharedsTy, CapturedStruct, 4245 IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn, 4246 const OMPTaskDataTy &Data) { 4247 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &) { 4248 OMPLoopScope PreInitScope(CGF, S); 4249 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S, 4250 OutlinedFn, SharedsTy, 4251 CapturedStruct, IfCond, Data); 4252 }; 4253 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop, 4254 CodeGen); 4255 }; 4256 if (Data.Nogroup) 4257 EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data); 4258 else { 4259 CGM.getOpenMPRuntime().emitTaskgroupRegion( 4260 *this, 4261 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF, 4262 PrePostActionTy &Action) { 4263 Action.Enter(CGF); 4264 CGF.EmitOMPTaskBasedDirective(S, BodyGen, TaskGen, Data); 4265 }, 4266 S.getLocStart()); 4267 } 4268 } 4269 4270 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 4271 EmitOMPTaskLoopBasedDirective(S); 4272 } 4273 4274 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 4275 const OMPTaskLoopSimdDirective &S) { 4276 EmitOMPTaskLoopBasedDirective(S); 4277 } 4278 4279 // Generate the instructions for '#pragma omp target update' directive. 4280 void CodeGenFunction::EmitOMPTargetUpdateDirective( 4281 const OMPTargetUpdateDirective &S) { 4282 // If we don't have target devices, don't bother emitting the data mapping 4283 // code. 4284 if (CGM.getLangOpts().OMPTargetTriples.empty()) 4285 return; 4286 4287 // Check if we have any if clause associated with the directive. 4288 const Expr *IfCond = nullptr; 4289 if (auto *C = S.getSingleClause<OMPIfClause>()) 4290 IfCond = C->getCondition(); 4291 4292 // Check if we have any device clause associated with the directive. 4293 const Expr *Device = nullptr; 4294 if (auto *C = S.getSingleClause<OMPDeviceClause>()) 4295 Device = C->getDevice(); 4296 4297 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device); 4298 } 4299