1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Stmt.h" 19 #include "clang/AST/StmtOpenMP.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 void CodeGenFunction::GenerateOpenMPCapturedVars( 24 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 25 const RecordDecl *RD = S.getCapturedRecordDecl(); 26 auto CurField = RD->field_begin(); 27 auto CurCap = S.captures().begin(); 28 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 29 E = S.capture_init_end(); 30 I != E; ++I, ++CurField, ++CurCap) { 31 if (CurField->hasCapturedVLAType()) { 32 auto VAT = CurField->getCapturedVLAType(); 33 auto *Val = VLASizeMap[VAT->getSizeExpr()]; 34 CapturedVars.push_back(Val); 35 } else if (CurCap->capturesThis()) 36 CapturedVars.push_back(CXXThisValue); 37 else if (CurCap->capturesVariableByCopy()) 38 CapturedVars.push_back( 39 EmitLoadOfLValue(EmitLValue(*I), SourceLocation()).getScalarVal()); 40 else { 41 assert(CurCap->capturesVariable() && "Expected capture by reference."); 42 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer()); 43 } 44 } 45 } 46 47 static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType, 48 StringRef Name, LValue AddrLV, 49 bool isReferenceType = false) { 50 ASTContext &Ctx = CGF.getContext(); 51 52 auto *CastedPtr = CGF.EmitScalarConversion( 53 AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(), 54 Ctx.getPointerType(DstType), SourceLocation()); 55 auto TmpAddr = 56 CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) 57 .getAddress(); 58 59 // If we are dealing with references we need to return the address of the 60 // reference instead of the reference of the value. 61 if (isReferenceType) { 62 QualType RefType = Ctx.getLValueReferenceType(DstType); 63 auto *RefVal = TmpAddr.getPointer(); 64 TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref"); 65 auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType); 66 CGF.EmitScalarInit(RefVal, TmpLVal); 67 } 68 69 return TmpAddr; 70 } 71 72 llvm::Function * 73 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) { 74 assert( 75 CapturedStmtInfo && 76 "CapturedStmtInfo should be set when generating the captured function"); 77 const CapturedDecl *CD = S.getCapturedDecl(); 78 const RecordDecl *RD = S.getCapturedRecordDecl(); 79 assert(CD->hasBody() && "missing CapturedDecl body"); 80 81 // Build the argument list. 82 ASTContext &Ctx = CGM.getContext(); 83 FunctionArgList Args; 84 Args.append(CD->param_begin(), 85 std::next(CD->param_begin(), CD->getContextParamPosition())); 86 auto I = S.captures().begin(); 87 for (auto *FD : RD->fields()) { 88 QualType ArgType = FD->getType(); 89 IdentifierInfo *II = nullptr; 90 VarDecl *CapVar = nullptr; 91 92 // If this is a capture by copy and the type is not a pointer, the outlined 93 // function argument type should be uintptr and the value properly casted to 94 // uintptr. This is necessary given that the runtime library is only able to 95 // deal with pointers. We can pass in the same way the VLA type sizes to the 96 // outlined function. 97 if ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) || 98 I->capturesVariableArrayType()) 99 ArgType = Ctx.getUIntPtrType(); 100 101 if (I->capturesVariable() || I->capturesVariableByCopy()) { 102 CapVar = I->getCapturedVar(); 103 II = CapVar->getIdentifier(); 104 } else if (I->capturesThis()) 105 II = &getContext().Idents.get("this"); 106 else { 107 assert(I->capturesVariableArrayType()); 108 II = &getContext().Idents.get("vla"); 109 } 110 if (ArgType->isVariablyModifiedType()) 111 ArgType = getContext().getVariableArrayDecayedType(ArgType); 112 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr, 113 FD->getLocation(), II, ArgType)); 114 ++I; 115 } 116 Args.append( 117 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 118 CD->param_end()); 119 120 // Create the function declaration. 121 FunctionType::ExtInfo ExtInfo; 122 const CGFunctionInfo &FuncInfo = 123 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo, 124 /*IsVariadic=*/false); 125 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 126 127 llvm::Function *F = llvm::Function::Create( 128 FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 129 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 130 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 131 if (CD->isNothrow()) 132 F->addFnAttr(llvm::Attribute::NoUnwind); 133 134 // Generate the function. 135 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 136 CD->getBody()->getLocStart()); 137 unsigned Cnt = CD->getContextParamPosition(); 138 I = S.captures().begin(); 139 for (auto *FD : RD->fields()) { 140 // If we are capturing a pointer by copy we don't need to do anything, just 141 // use the value that we get from the arguments. 142 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) { 143 setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt])); 144 ++Cnt, ++I; 145 continue; 146 } 147 148 LValue ArgLVal = 149 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(), 150 AlignmentSource::Decl); 151 if (FD->hasCapturedVLAType()) { 152 LValue CastedArgLVal = 153 MakeAddrLValue(castValueFromUintptr(*this, FD->getType(), 154 Args[Cnt]->getName(), ArgLVal), 155 FD->getType(), AlignmentSource::Decl); 156 auto *ExprArg = 157 EmitLoadOfLValue(CastedArgLVal, SourceLocation()).getScalarVal(); 158 auto VAT = FD->getCapturedVLAType(); 159 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 160 } else if (I->capturesVariable()) { 161 auto *Var = I->getCapturedVar(); 162 QualType VarTy = Var->getType(); 163 Address ArgAddr = ArgLVal.getAddress(); 164 if (!VarTy->isReferenceType()) { 165 ArgAddr = EmitLoadOfReference( 166 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>()); 167 } 168 setAddrOfLocalVar( 169 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var))); 170 } else if (I->capturesVariableByCopy()) { 171 assert(!FD->getType()->isAnyPointerType() && 172 "Not expecting a captured pointer."); 173 auto *Var = I->getCapturedVar(); 174 QualType VarTy = Var->getType(); 175 setAddrOfLocalVar(I->getCapturedVar(), 176 castValueFromUintptr(*this, FD->getType(), 177 Args[Cnt]->getName(), ArgLVal, 178 VarTy->isReferenceType())); 179 } else { 180 // If 'this' is captured, load it into CXXThisValue. 181 assert(I->capturesThis()); 182 CXXThisValue = 183 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal(); 184 } 185 ++Cnt, ++I; 186 } 187 188 PGO.assignRegionCounters(GlobalDecl(CD), F); 189 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 190 FinishFunction(CD->getBodyRBrace()); 191 192 return F; 193 } 194 195 //===----------------------------------------------------------------------===// 196 // OpenMP Directive Emission 197 //===----------------------------------------------------------------------===// 198 void CodeGenFunction::EmitOMPAggregateAssign( 199 Address DestAddr, Address SrcAddr, QualType OriginalType, 200 const llvm::function_ref<void(Address, Address)> &CopyGen) { 201 // Perform element-by-element initialization. 202 QualType ElementTy; 203 204 // Drill down to the base element type on both arrays. 205 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 206 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 207 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 208 209 auto SrcBegin = SrcAddr.getPointer(); 210 auto DestBegin = DestAddr.getPointer(); 211 // Cast from pointer to array type to pointer to single element. 212 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 213 // The basic structure here is a while-do loop. 214 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 215 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 216 auto IsEmpty = 217 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 218 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 219 220 // Enter the loop body, making that address the current address. 221 auto EntryBB = Builder.GetInsertBlock(); 222 EmitBlock(BodyBB); 223 224 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 225 226 llvm::PHINode *SrcElementPHI = 227 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 228 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 229 Address SrcElementCurrent = 230 Address(SrcElementPHI, 231 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 232 233 llvm::PHINode *DestElementPHI = 234 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 235 DestElementPHI->addIncoming(DestBegin, EntryBB); 236 Address DestElementCurrent = 237 Address(DestElementPHI, 238 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 239 240 // Emit copy. 241 CopyGen(DestElementCurrent, SrcElementCurrent); 242 243 // Shift the address forward by one element. 244 auto DestElementNext = Builder.CreateConstGEP1_32( 245 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 246 auto SrcElementNext = Builder.CreateConstGEP1_32( 247 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 248 // Check whether we've reached the end. 249 auto Done = 250 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 251 Builder.CreateCondBr(Done, DoneBB, BodyBB); 252 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 253 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 254 255 // Done. 256 EmitBlock(DoneBB, /*IsFinished=*/true); 257 } 258 259 /// \brief Emit initialization of arrays of complex types. 260 /// \param DestAddr Address of the array. 261 /// \param Type Type of array. 262 /// \param Init Initial expression of array. 263 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr, 264 QualType Type, const Expr *Init) { 265 // Perform element-by-element initialization. 266 QualType ElementTy; 267 268 // Drill down to the base element type on both arrays. 269 auto ArrayTy = Type->getAsArrayTypeUnsafe(); 270 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr); 271 DestAddr = 272 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType()); 273 274 auto DestBegin = DestAddr.getPointer(); 275 // Cast from pointer to array type to pointer to single element. 276 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements); 277 // The basic structure here is a while-do loop. 278 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body"); 279 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done"); 280 auto IsEmpty = 281 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty"); 282 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 283 284 // Enter the loop body, making that address the current address. 285 auto EntryBB = CGF.Builder.GetInsertBlock(); 286 CGF.EmitBlock(BodyBB); 287 288 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy); 289 290 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI( 291 DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 292 DestElementPHI->addIncoming(DestBegin, EntryBB); 293 Address DestElementCurrent = 294 Address(DestElementPHI, 295 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 296 297 // Emit copy. 298 { 299 CodeGenFunction::RunCleanupsScope InitScope(CGF); 300 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(), 301 /*IsInitializer=*/false); 302 } 303 304 // Shift the address forward by one element. 305 auto DestElementNext = CGF.Builder.CreateConstGEP1_32( 306 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 307 // Check whether we've reached the end. 308 auto Done = 309 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 310 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB); 311 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock()); 312 313 // Done. 314 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 315 } 316 317 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 318 Address SrcAddr, const VarDecl *DestVD, 319 const VarDecl *SrcVD, const Expr *Copy) { 320 if (OriginalType->isArrayType()) { 321 auto *BO = dyn_cast<BinaryOperator>(Copy); 322 if (BO && BO->getOpcode() == BO_Assign) { 323 // Perform simple memcpy for simple copying. 324 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 325 } else { 326 // For arrays with complex element types perform element by element 327 // copying. 328 EmitOMPAggregateAssign( 329 DestAddr, SrcAddr, OriginalType, 330 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 331 // Working with the single array element, so have to remap 332 // destination and source variables to corresponding array 333 // elements. 334 CodeGenFunction::OMPPrivateScope Remap(*this); 335 Remap.addPrivate(DestVD, [DestElement]() -> Address { 336 return DestElement; 337 }); 338 Remap.addPrivate( 339 SrcVD, [SrcElement]() -> Address { return SrcElement; }); 340 (void)Remap.Privatize(); 341 EmitIgnoredExpr(Copy); 342 }); 343 } 344 } else { 345 // Remap pseudo source variable to private copy. 346 CodeGenFunction::OMPPrivateScope Remap(*this); 347 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; }); 348 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; }); 349 (void)Remap.Privatize(); 350 // Emit copying of the whole variable. 351 EmitIgnoredExpr(Copy); 352 } 353 } 354 355 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 356 OMPPrivateScope &PrivateScope) { 357 if (!HaveInsertPoint()) 358 return false; 359 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 360 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 361 auto IRef = C->varlist_begin(); 362 auto InitsRef = C->inits().begin(); 363 for (auto IInit : C->private_copies()) { 364 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 365 if (EmittedAsFirstprivate.count(OrigVD) == 0) { 366 EmittedAsFirstprivate.insert(OrigVD); 367 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 368 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 369 bool IsRegistered; 370 DeclRefExpr DRE( 371 const_cast<VarDecl *>(OrigVD), 372 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 373 OrigVD) != nullptr, 374 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 375 Address OriginalAddr = EmitLValue(&DRE).getAddress(); 376 QualType Type = OrigVD->getType(); 377 if (Type->isArrayType()) { 378 // Emit VarDecl with copy init for arrays. 379 // Get the address of the original variable captured in current 380 // captured region. 381 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 382 auto Emission = EmitAutoVarAlloca(*VD); 383 auto *Init = VD->getInit(); 384 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 385 // Perform simple memcpy. 386 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 387 Type); 388 } else { 389 EmitOMPAggregateAssign( 390 Emission.getAllocatedAddress(), OriginalAddr, Type, 391 [this, VDInit, Init](Address DestElement, 392 Address SrcElement) { 393 // Clean up any temporaries needed by the initialization. 394 RunCleanupsScope InitScope(*this); 395 // Emit initialization for single element. 396 setAddrOfLocalVar(VDInit, SrcElement); 397 EmitAnyExprToMem(Init, DestElement, 398 Init->getType().getQualifiers(), 399 /*IsInitializer*/ false); 400 LocalDeclMap.erase(VDInit); 401 }); 402 } 403 EmitAutoVarCleanups(Emission); 404 return Emission.getAllocatedAddress(); 405 }); 406 } else { 407 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 408 // Emit private VarDecl with copy init. 409 // Remap temp VDInit variable to the address of the original 410 // variable 411 // (for proper handling of captured global variables). 412 setAddrOfLocalVar(VDInit, OriginalAddr); 413 EmitDecl(*VD); 414 LocalDeclMap.erase(VDInit); 415 return GetAddrOfLocalVar(VD); 416 }); 417 } 418 assert(IsRegistered && 419 "firstprivate var already registered as private"); 420 // Silence the warning about unused variable. 421 (void)IsRegistered; 422 } 423 ++IRef, ++InitsRef; 424 } 425 } 426 return !EmittedAsFirstprivate.empty(); 427 } 428 429 void CodeGenFunction::EmitOMPPrivateClause( 430 const OMPExecutableDirective &D, 431 CodeGenFunction::OMPPrivateScope &PrivateScope) { 432 if (!HaveInsertPoint()) 433 return; 434 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 435 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 436 auto IRef = C->varlist_begin(); 437 for (auto IInit : C->private_copies()) { 438 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 439 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 440 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 441 bool IsRegistered = 442 PrivateScope.addPrivate(OrigVD, [&]() -> Address { 443 // Emit private VarDecl with copy init. 444 EmitDecl(*VD); 445 return GetAddrOfLocalVar(VD); 446 }); 447 assert(IsRegistered && "private var already registered as private"); 448 // Silence the warning about unused variable. 449 (void)IsRegistered; 450 } 451 ++IRef; 452 } 453 } 454 } 455 456 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 457 if (!HaveInsertPoint()) 458 return false; 459 // threadprivate_var1 = master_threadprivate_var1; 460 // operator=(threadprivate_var2, master_threadprivate_var2); 461 // ... 462 // __kmpc_barrier(&loc, global_tid); 463 llvm::DenseSet<const VarDecl *> CopiedVars; 464 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 465 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 466 auto IRef = C->varlist_begin(); 467 auto ISrcRef = C->source_exprs().begin(); 468 auto IDestRef = C->destination_exprs().begin(); 469 for (auto *AssignOp : C->assignment_ops()) { 470 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 471 QualType Type = VD->getType(); 472 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 473 474 // Get the address of the master variable. If we are emitting code with 475 // TLS support, the address is passed from the master as field in the 476 // captured declaration. 477 Address MasterAddr = Address::invalid(); 478 if (getLangOpts().OpenMPUseTLS && 479 getContext().getTargetInfo().isTLSSupported()) { 480 assert(CapturedStmtInfo->lookup(VD) && 481 "Copyin threadprivates should have been captured!"); 482 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(), 483 VK_LValue, (*IRef)->getExprLoc()); 484 MasterAddr = EmitLValue(&DRE).getAddress(); 485 LocalDeclMap.erase(VD); 486 } else { 487 MasterAddr = 488 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 489 : CGM.GetAddrOfGlobal(VD), 490 getContext().getDeclAlign(VD)); 491 } 492 // Get the address of the threadprivate variable. 493 Address PrivateAddr = EmitLValue(*IRef).getAddress(); 494 if (CopiedVars.size() == 1) { 495 // At first check if current thread is a master thread. If it is, no 496 // need to copy data. 497 CopyBegin = createBasicBlock("copyin.not.master"); 498 CopyEnd = createBasicBlock("copyin.not.master.end"); 499 Builder.CreateCondBr( 500 Builder.CreateICmpNE( 501 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 502 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)), 503 CopyBegin, CopyEnd); 504 EmitBlock(CopyBegin); 505 } 506 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 507 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 508 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 509 } 510 ++IRef; 511 ++ISrcRef; 512 ++IDestRef; 513 } 514 } 515 if (CopyEnd) { 516 // Exit out of copying procedure for non-master thread. 517 EmitBlock(CopyEnd, /*IsFinished=*/true); 518 return true; 519 } 520 return false; 521 } 522 523 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 524 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 525 if (!HaveInsertPoint()) 526 return false; 527 bool HasAtLeastOneLastprivate = false; 528 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 529 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 530 HasAtLeastOneLastprivate = true; 531 auto IRef = C->varlist_begin(); 532 auto IDestRef = C->destination_exprs().begin(); 533 for (auto *IInit : C->private_copies()) { 534 // Keep the address of the original variable for future update at the end 535 // of the loop. 536 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 537 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 538 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 539 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address { 540 DeclRefExpr DRE( 541 const_cast<VarDecl *>(OrigVD), 542 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 543 OrigVD) != nullptr, 544 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 545 return EmitLValue(&DRE).getAddress(); 546 }); 547 // Check if the variable is also a firstprivate: in this case IInit is 548 // not generated. Initialization of this variable will happen in codegen 549 // for 'firstprivate' clause. 550 if (IInit) { 551 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 552 bool IsRegistered = 553 PrivateScope.addPrivate(OrigVD, [&]() -> Address { 554 // Emit private VarDecl with copy init. 555 EmitDecl(*VD); 556 return GetAddrOfLocalVar(VD); 557 }); 558 assert(IsRegistered && 559 "lastprivate var already registered as private"); 560 (void)IsRegistered; 561 } 562 } 563 ++IRef, ++IDestRef; 564 } 565 } 566 return HasAtLeastOneLastprivate; 567 } 568 569 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 570 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { 571 if (!HaveInsertPoint()) 572 return; 573 // Emit following code: 574 // if (<IsLastIterCond>) { 575 // orig_var1 = private_orig_var1; 576 // ... 577 // orig_varn = private_orig_varn; 578 // } 579 llvm::BasicBlock *ThenBB = nullptr; 580 llvm::BasicBlock *DoneBB = nullptr; 581 if (IsLastIterCond) { 582 ThenBB = createBasicBlock(".omp.lastprivate.then"); 583 DoneBB = createBasicBlock(".omp.lastprivate.done"); 584 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 585 EmitBlock(ThenBB); 586 } 587 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; 588 const Expr *LastIterVal = nullptr; 589 const Expr *IVExpr = nullptr; 590 const Expr *IncExpr = nullptr; 591 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 592 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) { 593 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>( 594 LoopDirective->getUpperBoundVariable()) 595 ->getDecl()) 596 ->getAnyInitializer(); 597 IVExpr = LoopDirective->getIterationVariable(); 598 IncExpr = LoopDirective->getInc(); 599 auto IUpdate = LoopDirective->updates().begin(); 600 for (auto *E : LoopDirective->counters()) { 601 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 602 LoopCountersAndUpdates[D] = *IUpdate; 603 ++IUpdate; 604 } 605 } 606 } 607 { 608 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 609 bool FirstLCV = true; 610 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 611 auto IRef = C->varlist_begin(); 612 auto ISrcRef = C->source_exprs().begin(); 613 auto IDestRef = C->destination_exprs().begin(); 614 for (auto *AssignOp : C->assignment_ops()) { 615 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 616 QualType Type = PrivateVD->getType(); 617 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 618 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 619 // If lastprivate variable is a loop control variable for loop-based 620 // directive, update its value before copyin back to original 621 // variable. 622 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { 623 if (FirstLCV && LastIterVal) { 624 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), 625 IVExpr->getType().getQualifiers(), 626 /*IsInitializer=*/false); 627 EmitIgnoredExpr(IncExpr); 628 FirstLCV = false; 629 } 630 EmitIgnoredExpr(UpExpr); 631 } 632 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 633 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 634 // Get the address of the original variable. 635 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 636 // Get the address of the private variable. 637 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 638 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 639 PrivateAddr = 640 Address(Builder.CreateLoad(PrivateAddr), 641 getNaturalTypeAlignment(RefTy->getPointeeType())); 642 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 643 } 644 ++IRef; 645 ++ISrcRef; 646 ++IDestRef; 647 } 648 } 649 } 650 if (IsLastIterCond) { 651 EmitBlock(DoneBB, /*IsFinished=*/true); 652 } 653 } 654 655 void CodeGenFunction::EmitOMPReductionClauseInit( 656 const OMPExecutableDirective &D, 657 CodeGenFunction::OMPPrivateScope &PrivateScope) { 658 if (!HaveInsertPoint()) 659 return; 660 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 661 auto ILHS = C->lhs_exprs().begin(); 662 auto IRHS = C->rhs_exprs().begin(); 663 auto IPriv = C->privates().begin(); 664 for (auto IRef : C->varlists()) { 665 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 666 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 667 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl()); 668 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(IRef)) { 669 auto *Base = OASE->getBase()->IgnoreParenImpCasts(); 670 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 671 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 672 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 673 Base = TempASE->getBase()->IgnoreParenImpCasts(); 674 auto *DE = cast<DeclRefExpr>(Base); 675 auto *OrigVD = cast<VarDecl>(DE->getDecl()); 676 auto OASELValueLB = EmitOMPArraySectionExpr(OASE); 677 auto OASELValueUB = 678 EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false); 679 auto OriginalBaseLValue = EmitLValue(DE); 680 auto BaseLValue = OriginalBaseLValue; 681 auto *Zero = Builder.getInt64(/*C=*/0); 682 llvm::SmallVector<llvm::Value *, 4> Indexes; 683 Indexes.push_back(Zero); 684 auto *ItemTy = 685 OASELValueLB.getPointer()->getType()->getPointerElementType(); 686 auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType(); 687 while (Ty != ItemTy) { 688 Indexes.push_back(Zero); 689 Ty = Ty->getPointerElementType(); 690 } 691 BaseLValue = MakeAddrLValue( 692 Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes), 693 OASELValueLB.getAlignment()), 694 OASELValueLB.getType(), OASELValueLB.getAlignmentSource()); 695 // Store the address of the original variable associated with the LHS 696 // implicit variable. 697 PrivateScope.addPrivate(LHSVD, [this, OASELValueLB]() -> Address { 698 return OASELValueLB.getAddress(); 699 }); 700 // Emit reduction copy. 701 bool IsRegistered = PrivateScope.addPrivate( 702 OrigVD, [this, PrivateVD, BaseLValue, OASELValueLB, OASELValueUB, 703 OriginalBaseLValue]() -> Address { 704 // Emit VarDecl with copy init for arrays. 705 // Get the address of the original variable captured in current 706 // captured region. 707 auto *Size = Builder.CreatePtrDiff(OASELValueUB.getPointer(), 708 OASELValueLB.getPointer()); 709 Size = Builder.CreateNUWAdd( 710 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1)); 711 CodeGenFunction::OpaqueValueMapping OpaqueMap( 712 *this, cast<OpaqueValueExpr>( 713 getContext() 714 .getAsVariableArrayType(PrivateVD->getType()) 715 ->getSizeExpr()), 716 RValue::get(Size)); 717 EmitVariablyModifiedType(PrivateVD->getType()); 718 auto Emission = EmitAutoVarAlloca(*PrivateVD); 719 auto Addr = Emission.getAllocatedAddress(); 720 auto *Init = PrivateVD->getInit(); 721 EmitOMPAggregateInit(*this, Addr, PrivateVD->getType(), Init); 722 EmitAutoVarCleanups(Emission); 723 // Emit private VarDecl with reduction init. 724 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(), 725 OASELValueLB.getPointer()); 726 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset); 727 Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast( 728 Ptr, OriginalBaseLValue.getPointer()->getType()); 729 return Address(Ptr, OriginalBaseLValue.getAlignment()); 730 }); 731 assert(IsRegistered && "private var already registered as private"); 732 // Silence the warning about unused variable. 733 (void)IsRegistered; 734 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address { 735 return GetAddrOfLocalVar(PrivateVD); 736 }); 737 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(IRef)) { 738 auto *Base = ASE->getBase()->IgnoreParenImpCasts(); 739 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 740 Base = TempASE->getBase()->IgnoreParenImpCasts(); 741 auto *DE = cast<DeclRefExpr>(Base); 742 auto *OrigVD = cast<VarDecl>(DE->getDecl()); 743 auto ASELValue = EmitLValue(ASE); 744 auto OriginalBaseLValue = EmitLValue(DE); 745 auto BaseLValue = OriginalBaseLValue; 746 auto *Zero = Builder.getInt64(/*C=*/0); 747 llvm::SmallVector<llvm::Value *, 4> Indexes; 748 Indexes.push_back(Zero); 749 auto *ItemTy = 750 ASELValue.getPointer()->getType()->getPointerElementType(); 751 auto *Ty = BaseLValue.getPointer()->getType()->getPointerElementType(); 752 while (Ty != ItemTy) { 753 Indexes.push_back(Zero); 754 Ty = Ty->getPointerElementType(); 755 } 756 BaseLValue = MakeAddrLValue( 757 Address(Builder.CreateInBoundsGEP(BaseLValue.getPointer(), Indexes), 758 ASELValue.getAlignment()), 759 ASELValue.getType(), ASELValue.getAlignmentSource()); 760 // Store the address of the original variable associated with the LHS 761 // implicit variable. 762 PrivateScope.addPrivate(LHSVD, [this, ASELValue]() -> Address { 763 return ASELValue.getAddress(); 764 }); 765 // Emit reduction copy. 766 bool IsRegistered = PrivateScope.addPrivate( 767 OrigVD, [this, PrivateVD, BaseLValue, ASELValue, 768 OriginalBaseLValue]() -> Address { 769 // Emit private VarDecl with reduction init. 770 EmitDecl(*PrivateVD); 771 auto Addr = GetAddrOfLocalVar(PrivateVD); 772 auto *Offset = Builder.CreatePtrDiff(BaseLValue.getPointer(), 773 ASELValue.getPointer()); 774 auto *Ptr = Builder.CreateGEP(Addr.getPointer(), Offset); 775 Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast( 776 Ptr, OriginalBaseLValue.getPointer()->getType()); 777 return Address(Ptr, OriginalBaseLValue.getAlignment()); 778 }); 779 assert(IsRegistered && "private var already registered as private"); 780 // Silence the warning about unused variable. 781 (void)IsRegistered; 782 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address { 783 return GetAddrOfLocalVar(PrivateVD); 784 }); 785 } else { 786 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 787 // Store the address of the original variable associated with the LHS 788 // implicit variable. 789 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address { 790 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 791 CapturedStmtInfo->lookup(OrigVD) != nullptr, 792 IRef->getType(), VK_LValue, IRef->getExprLoc()); 793 return EmitLValue(&DRE).getAddress(); 794 }); 795 // Emit reduction copy. 796 bool IsRegistered = 797 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address { 798 // Emit private VarDecl with reduction init. 799 EmitDecl(*PrivateVD); 800 return GetAddrOfLocalVar(PrivateVD); 801 }); 802 assert(IsRegistered && "private var already registered as private"); 803 // Silence the warning about unused variable. 804 (void)IsRegistered; 805 PrivateScope.addPrivate(RHSVD, [this, PrivateVD]() -> Address { 806 return GetAddrOfLocalVar(PrivateVD); 807 }); 808 } 809 ++ILHS, ++IRHS, ++IPriv; 810 } 811 } 812 } 813 814 void CodeGenFunction::EmitOMPReductionClauseFinal( 815 const OMPExecutableDirective &D) { 816 if (!HaveInsertPoint()) 817 return; 818 llvm::SmallVector<const Expr *, 8> Privates; 819 llvm::SmallVector<const Expr *, 8> LHSExprs; 820 llvm::SmallVector<const Expr *, 8> RHSExprs; 821 llvm::SmallVector<const Expr *, 8> ReductionOps; 822 bool HasAtLeastOneReduction = false; 823 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 824 HasAtLeastOneReduction = true; 825 Privates.append(C->privates().begin(), C->privates().end()); 826 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 827 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 828 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 829 } 830 if (HasAtLeastOneReduction) { 831 // Emit nowait reduction if nowait clause is present or directive is a 832 // parallel directive (it always has implicit barrier). 833 CGM.getOpenMPRuntime().emitReduction( 834 *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps, 835 D.getSingleClause<OMPNowaitClause>() || 836 isOpenMPParallelDirective(D.getDirectiveKind()) || 837 D.getDirectiveKind() == OMPD_simd, 838 D.getDirectiveKind() == OMPD_simd); 839 } 840 } 841 842 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, 843 const OMPExecutableDirective &S, 844 OpenMPDirectiveKind InnermostKind, 845 const RegionCodeGenTy &CodeGen) { 846 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 847 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 848 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 849 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 850 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 851 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 852 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 853 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 854 /*IgnoreResultAssign*/ true); 855 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 856 CGF, NumThreads, NumThreadsClause->getLocStart()); 857 } 858 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 859 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 860 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 861 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart()); 862 } 863 const Expr *IfCond = nullptr; 864 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 865 if (C->getNameModifier() == OMPD_unknown || 866 C->getNameModifier() == OMPD_parallel) { 867 IfCond = C->getCondition(); 868 break; 869 } 870 } 871 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 872 CapturedVars, IfCond); 873 } 874 875 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 876 LexicalScope Scope(*this, S.getSourceRange()); 877 // Emit parallel region as a standalone region. 878 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 879 OMPPrivateScope PrivateScope(CGF); 880 bool Copyins = CGF.EmitOMPCopyinClause(S); 881 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); 882 if (Copyins || Firstprivates) { 883 // Emit implicit barrier to synchronize threads and avoid data races on 884 // initialization of firstprivate variables or propagation master's thread 885 // values of threadprivate variables to local instances of that variables 886 // of all other implicit threads. 887 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 888 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 889 /*ForceSimpleCall=*/true); 890 } 891 CGF.EmitOMPPrivateClause(S, PrivateScope); 892 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 893 (void)PrivateScope.Privatize(); 894 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 895 CGF.EmitOMPReductionClauseFinal(S); 896 }; 897 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen); 898 } 899 900 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 901 JumpDest LoopExit) { 902 RunCleanupsScope BodyScope(*this); 903 // Update counters values on current iteration. 904 for (auto I : D.updates()) { 905 EmitIgnoredExpr(I); 906 } 907 // Update the linear variables. 908 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 909 for (auto U : C->updates()) { 910 EmitIgnoredExpr(U); 911 } 912 } 913 914 // On a continue in the body, jump to the end. 915 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 916 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 917 // Emit loop body. 918 EmitStmt(D.getBody()); 919 // The end (updates/cleanups). 920 EmitBlock(Continue.getBlock()); 921 BreakContinueStack.pop_back(); 922 // TODO: Update lastprivates if the SeparateIter flag is true. 923 // This will be implemented in a follow-up OMPLastprivateClause patch, but 924 // result should be still correct without it, as we do not make these 925 // variables private yet. 926 } 927 928 void CodeGenFunction::EmitOMPInnerLoop( 929 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 930 const Expr *IncExpr, 931 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 932 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 933 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 934 935 // Start the loop with a block that tests the condition. 936 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 937 EmitBlock(CondBlock); 938 LoopStack.push(CondBlock); 939 940 // If there are any cleanups between here and the loop-exit scope, 941 // create a block to stage a loop exit along. 942 auto ExitBlock = LoopExit.getBlock(); 943 if (RequiresCleanup) 944 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 945 946 auto LoopBody = createBasicBlock("omp.inner.for.body"); 947 948 // Emit condition. 949 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 950 if (ExitBlock != LoopExit.getBlock()) { 951 EmitBlock(ExitBlock); 952 EmitBranchThroughCleanup(LoopExit); 953 } 954 955 EmitBlock(LoopBody); 956 incrementProfileCounter(&S); 957 958 // Create a block for the increment. 959 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 960 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 961 962 BodyGen(*this); 963 964 // Emit "IV = IV + 1" and a back-edge to the condition block. 965 EmitBlock(Continue.getBlock()); 966 EmitIgnoredExpr(IncExpr); 967 PostIncGen(*this); 968 BreakContinueStack.pop_back(); 969 EmitBranch(CondBlock); 970 LoopStack.pop(); 971 // Emit the fall-through block. 972 EmitBlock(LoopExit.getBlock()); 973 } 974 975 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 976 if (!HaveInsertPoint()) 977 return; 978 // Emit inits for the linear variables. 979 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 980 for (auto Init : C->inits()) { 981 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 982 auto *OrigVD = cast<VarDecl>( 983 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl()); 984 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 985 CapturedStmtInfo->lookup(OrigVD) != nullptr, 986 VD->getInit()->getType(), VK_LValue, 987 VD->getInit()->getExprLoc()); 988 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 989 EmitExprAsInit(&DRE, VD, 990 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()), 991 /*capturedByInit=*/false); 992 EmitAutoVarCleanups(Emission); 993 } 994 // Emit the linear steps for the linear clauses. 995 // If a step is not constant, it is pre-calculated before the loop. 996 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 997 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 998 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 999 // Emit calculation of the linear step. 1000 EmitIgnoredExpr(CS); 1001 } 1002 } 1003 } 1004 1005 static void emitLinearClauseFinal(CodeGenFunction &CGF, 1006 const OMPLoopDirective &D) { 1007 if (!CGF.HaveInsertPoint()) 1008 return; 1009 // Emit the final values of the linear variables. 1010 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1011 auto IC = C->varlist_begin(); 1012 for (auto F : C->finals()) { 1013 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 1014 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 1015 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 1016 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1017 Address OrigAddr = CGF.EmitLValue(&DRE).getAddress(); 1018 CodeGenFunction::OMPPrivateScope VarScope(CGF); 1019 VarScope.addPrivate(OrigVD, 1020 [OrigAddr]() -> Address { return OrigAddr; }); 1021 (void)VarScope.Privatize(); 1022 CGF.EmitIgnoredExpr(F); 1023 ++IC; 1024 } 1025 } 1026 } 1027 1028 static void emitAlignedClause(CodeGenFunction &CGF, 1029 const OMPExecutableDirective &D) { 1030 if (!CGF.HaveInsertPoint()) 1031 return; 1032 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 1033 unsigned ClauseAlignment = 0; 1034 if (auto AlignmentExpr = Clause->getAlignment()) { 1035 auto AlignmentCI = 1036 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 1037 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 1038 } 1039 for (auto E : Clause->varlists()) { 1040 unsigned Alignment = ClauseAlignment; 1041 if (Alignment == 0) { 1042 // OpenMP [2.8.1, Description] 1043 // If no optional parameter is specified, implementation-defined default 1044 // alignments for SIMD instructions on the target platforms are assumed. 1045 Alignment = 1046 CGF.getContext() 1047 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 1048 E->getType()->getPointeeType())) 1049 .getQuantity(); 1050 } 1051 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 1052 "alignment is not power of 2"); 1053 if (Alignment != 0) { 1054 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 1055 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 1056 } 1057 } 1058 } 1059 } 1060 1061 static void emitPrivateLoopCounters(CodeGenFunction &CGF, 1062 CodeGenFunction::OMPPrivateScope &LoopScope, 1063 ArrayRef<Expr *> Counters, 1064 ArrayRef<Expr *> PrivateCounters) { 1065 if (!CGF.HaveInsertPoint()) 1066 return; 1067 auto I = PrivateCounters.begin(); 1068 for (auto *E : Counters) { 1069 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1070 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 1071 Address Addr = Address::invalid(); 1072 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address { 1073 // Emit var without initialization. 1074 auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD); 1075 CGF.EmitAutoVarCleanups(VarEmission); 1076 Addr = VarEmission.getAllocatedAddress(); 1077 return Addr; 1078 }); 1079 (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; }); 1080 ++I; 1081 } 1082 } 1083 1084 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 1085 const Expr *Cond, llvm::BasicBlock *TrueBlock, 1086 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 1087 if (!CGF.HaveInsertPoint()) 1088 return; 1089 { 1090 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 1091 emitPrivateLoopCounters(CGF, PreCondScope, S.counters(), 1092 S.private_counters()); 1093 (void)PreCondScope.Privatize(); 1094 // Get initial values of real counters. 1095 for (auto I : S.inits()) { 1096 CGF.EmitIgnoredExpr(I); 1097 } 1098 } 1099 // Check that loop is executed at least one time. 1100 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 1101 } 1102 1103 static void 1104 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, 1105 CodeGenFunction::OMPPrivateScope &PrivateScope) { 1106 if (!CGF.HaveInsertPoint()) 1107 return; 1108 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 1109 auto CurPrivate = C->privates().begin(); 1110 for (auto *E : C->varlists()) { 1111 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1112 auto *PrivateVD = 1113 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 1114 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address { 1115 // Emit private VarDecl with copy init. 1116 CGF.EmitVarDecl(*PrivateVD); 1117 return CGF.GetAddrOfLocalVar(PrivateVD); 1118 }); 1119 assert(IsRegistered && "linear var already registered as private"); 1120 // Silence the warning about unused variable. 1121 (void)IsRegistered; 1122 ++CurPrivate; 1123 } 1124 } 1125 } 1126 1127 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 1128 const OMPExecutableDirective &D, 1129 bool IsMonotonic) { 1130 if (!CGF.HaveInsertPoint()) 1131 return; 1132 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 1133 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 1134 /*ignoreResult=*/true); 1135 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1136 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1137 // In presence of finite 'safelen', it may be unsafe to mark all 1138 // the memory instructions parallel, because loop-carried 1139 // dependences of 'safelen' iterations are possible. 1140 if (!IsMonotonic) 1141 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 1142 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 1143 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 1144 /*ignoreResult=*/true); 1145 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 1146 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 1147 // In presence of finite 'safelen', it may be unsafe to mark all 1148 // the memory instructions parallel, because loop-carried 1149 // dependences of 'safelen' iterations are possible. 1150 CGF.LoopStack.setParallel(false); 1151 } 1152 } 1153 1154 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D, 1155 bool IsMonotonic) { 1156 // Walk clauses and process safelen/lastprivate. 1157 LoopStack.setParallel(!IsMonotonic); 1158 LoopStack.setVectorizeEnable(true); 1159 emitSimdlenSafelenClause(*this, D, IsMonotonic); 1160 } 1161 1162 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) { 1163 if (!HaveInsertPoint()) 1164 return; 1165 auto IC = D.counters().begin(); 1166 for (auto F : D.finals()) { 1167 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 1168 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) { 1169 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 1170 CapturedStmtInfo->lookup(OrigVD) != nullptr, 1171 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 1172 Address OrigAddr = EmitLValue(&DRE).getAddress(); 1173 OMPPrivateScope VarScope(*this); 1174 VarScope.addPrivate(OrigVD, 1175 [OrigAddr]() -> Address { return OrigAddr; }); 1176 (void)VarScope.Privatize(); 1177 EmitIgnoredExpr(F); 1178 } 1179 ++IC; 1180 } 1181 emitLinearClauseFinal(*this, D); 1182 } 1183 1184 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 1185 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1186 // if (PreCond) { 1187 // for (IV in 0..LastIteration) BODY; 1188 // <Final counter/linear vars updates>; 1189 // } 1190 // 1191 1192 // Emit: if (PreCond) - begin. 1193 // If the condition constant folds and can be elided, avoid emitting the 1194 // whole loop. 1195 bool CondConstant; 1196 llvm::BasicBlock *ContBlock = nullptr; 1197 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1198 if (!CondConstant) 1199 return; 1200 } else { 1201 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 1202 ContBlock = CGF.createBasicBlock("simd.if.end"); 1203 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 1204 CGF.getProfileCount(&S)); 1205 CGF.EmitBlock(ThenBlock); 1206 CGF.incrementProfileCounter(&S); 1207 } 1208 1209 // Emit the loop iteration variable. 1210 const Expr *IVExpr = S.getIterationVariable(); 1211 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 1212 CGF.EmitVarDecl(*IVDecl); 1213 CGF.EmitIgnoredExpr(S.getInit()); 1214 1215 // Emit the iterations count variable. 1216 // If it is not a variable, Sema decided to calculate iterations count on 1217 // each iteration (e.g., it is foldable into a constant). 1218 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1219 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1220 // Emit calculation of the iterations count. 1221 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 1222 } 1223 1224 CGF.EmitOMPSimdInit(S); 1225 1226 emitAlignedClause(CGF, S); 1227 CGF.EmitOMPLinearClauseInit(S); 1228 bool HasLastprivateClause; 1229 { 1230 OMPPrivateScope LoopScope(CGF); 1231 emitPrivateLoopCounters(CGF, LoopScope, S.counters(), 1232 S.private_counters()); 1233 emitPrivateLinearVars(CGF, S, LoopScope); 1234 CGF.EmitOMPPrivateClause(S, LoopScope); 1235 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1236 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1237 (void)LoopScope.Privatize(); 1238 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1239 S.getInc(), 1240 [&S](CodeGenFunction &CGF) { 1241 CGF.EmitOMPLoopBody(S, JumpDest()); 1242 CGF.EmitStopPoint(&S); 1243 }, 1244 [](CodeGenFunction &) {}); 1245 // Emit final copy of the lastprivate variables at the end of loops. 1246 if (HasLastprivateClause) { 1247 CGF.EmitOMPLastprivateClauseFinal(S); 1248 } 1249 CGF.EmitOMPReductionClauseFinal(S); 1250 } 1251 CGF.EmitOMPSimdFinal(S); 1252 // Emit: if (PreCond) - end. 1253 if (ContBlock) { 1254 CGF.EmitBranch(ContBlock); 1255 CGF.EmitBlock(ContBlock, true); 1256 } 1257 }; 1258 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1259 } 1260 1261 void CodeGenFunction::EmitOMPForOuterLoop( 1262 OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic, 1263 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered, 1264 Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) { 1265 auto &RT = CGM.getOpenMPRuntime(); 1266 1267 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 1268 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); 1269 1270 assert((Ordered || 1271 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && 1272 "static non-chunked schedule does not need outer loop"); 1273 1274 // Emit outer loop. 1275 // 1276 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1277 // When schedule(dynamic,chunk_size) is specified, the iterations are 1278 // distributed to threads in the team in chunks as the threads request them. 1279 // Each thread executes a chunk of iterations, then requests another chunk, 1280 // until no chunks remain to be distributed. Each chunk contains chunk_size 1281 // iterations, except for the last chunk to be distributed, which may have 1282 // fewer iterations. When no chunk_size is specified, it defaults to 1. 1283 // 1284 // When schedule(guided,chunk_size) is specified, the iterations are assigned 1285 // to threads in the team in chunks as the executing threads request them. 1286 // Each thread executes a chunk of iterations, then requests another chunk, 1287 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 1288 // each chunk is proportional to the number of unassigned iterations divided 1289 // by the number of threads in the team, decreasing to 1. For a chunk_size 1290 // with value k (greater than 1), the size of each chunk is determined in the 1291 // same way, with the restriction that the chunks do not contain fewer than k 1292 // iterations (except for the last chunk to be assigned, which may have fewer 1293 // than k iterations). 1294 // 1295 // When schedule(auto) is specified, the decision regarding scheduling is 1296 // delegated to the compiler and/or runtime system. The programmer gives the 1297 // implementation the freedom to choose any possible mapping of iterations to 1298 // threads in the team. 1299 // 1300 // When schedule(runtime) is specified, the decision regarding scheduling is 1301 // deferred until run time, and the schedule and chunk size are taken from the 1302 // run-sched-var ICV. If the ICV is set to auto, the schedule is 1303 // implementation defined 1304 // 1305 // while(__kmpc_dispatch_next(&LB, &UB)) { 1306 // idx = LB; 1307 // while (idx <= UB) { BODY; ++idx; 1308 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 1309 // } // inner loop 1310 // } 1311 // 1312 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1313 // When schedule(static, chunk_size) is specified, iterations are divided into 1314 // chunks of size chunk_size, and the chunks are assigned to the threads in 1315 // the team in a round-robin fashion in the order of the thread number. 1316 // 1317 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 1318 // while (idx <= UB) { BODY; ++idx; } // inner loop 1319 // LB = LB + ST; 1320 // UB = UB + ST; 1321 // } 1322 // 1323 1324 const Expr *IVExpr = S.getIterationVariable(); 1325 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1326 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1327 1328 if (DynamicOrOrdered) { 1329 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration()); 1330 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, 1331 IVSize, IVSigned, Ordered, UBVal, Chunk); 1332 } else { 1333 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, 1334 IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk); 1335 } 1336 1337 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 1338 1339 // Start the loop with a block that tests the condition. 1340 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 1341 EmitBlock(CondBlock); 1342 LoopStack.push(CondBlock); 1343 1344 llvm::Value *BoolCondVal = nullptr; 1345 if (!DynamicOrOrdered) { 1346 // UB = min(UB, GlobalUB) 1347 EmitIgnoredExpr(S.getEnsureUpperBound()); 1348 // IV = LB 1349 EmitIgnoredExpr(S.getInit()); 1350 // IV < UB 1351 BoolCondVal = EvaluateExprAsBool(S.getCond()); 1352 } else { 1353 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, 1354 IL, LB, UB, ST); 1355 } 1356 1357 // If there are any cleanups between here and the loop-exit scope, 1358 // create a block to stage a loop exit along. 1359 auto ExitBlock = LoopExit.getBlock(); 1360 if (LoopScope.requiresCleanups()) 1361 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 1362 1363 auto LoopBody = createBasicBlock("omp.dispatch.body"); 1364 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 1365 if (ExitBlock != LoopExit.getBlock()) { 1366 EmitBlock(ExitBlock); 1367 EmitBranchThroughCleanup(LoopExit); 1368 } 1369 EmitBlock(LoopBody); 1370 1371 // Emit "IV = LB" (in case of static schedule, we have already calculated new 1372 // LB for loop condition and emitted it above). 1373 if (DynamicOrOrdered) 1374 EmitIgnoredExpr(S.getInit()); 1375 1376 // Create a block for the increment. 1377 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 1378 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1379 1380 // Generate !llvm.loop.parallel metadata for loads and stores for loops 1381 // with dynamic/guided scheduling and without ordered clause. 1382 if (!isOpenMPSimdDirective(S.getDirectiveKind())) 1383 LoopStack.setParallel(!IsMonotonic); 1384 else 1385 EmitOMPSimdInit(S, IsMonotonic); 1386 1387 SourceLocation Loc = S.getLocStart(); 1388 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 1389 [&S, LoopExit](CodeGenFunction &CGF) { 1390 CGF.EmitOMPLoopBody(S, LoopExit); 1391 CGF.EmitStopPoint(&S); 1392 }, 1393 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { 1394 if (Ordered) { 1395 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( 1396 CGF, Loc, IVSize, IVSigned); 1397 } 1398 }); 1399 1400 EmitBlock(Continue.getBlock()); 1401 BreakContinueStack.pop_back(); 1402 if (!DynamicOrOrdered) { 1403 // Emit "LB = LB + Stride", "UB = UB + Stride". 1404 EmitIgnoredExpr(S.getNextLowerBound()); 1405 EmitIgnoredExpr(S.getNextUpperBound()); 1406 } 1407 1408 EmitBranch(CondBlock); 1409 LoopStack.pop(); 1410 // Emit the fall-through block. 1411 EmitBlock(LoopExit.getBlock()); 1412 1413 // Tell the runtime we are done. 1414 if (!DynamicOrOrdered) 1415 RT.emitForStaticFinish(*this, S.getLocEnd()); 1416 } 1417 1418 /// \brief Emit a helper variable and return corresponding lvalue. 1419 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1420 const DeclRefExpr *Helper) { 1421 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1422 CGF.EmitVarDecl(*VDecl); 1423 return CGF.EmitLValue(Helper); 1424 } 1425 1426 namespace { 1427 struct ScheduleKindModifiersTy { 1428 OpenMPScheduleClauseKind Kind; 1429 OpenMPScheduleClauseModifier M1; 1430 OpenMPScheduleClauseModifier M2; 1431 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind, 1432 OpenMPScheduleClauseModifier M1, 1433 OpenMPScheduleClauseModifier M2) 1434 : Kind(Kind), M1(M1), M2(M2) {} 1435 }; 1436 } // namespace 1437 1438 static std::pair<llvm::Value * /*Chunk*/, ScheduleKindModifiersTy> 1439 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, 1440 bool OuterRegion) { 1441 // Detect the loop schedule kind and chunk. 1442 auto ScheduleKind = OMPC_SCHEDULE_unknown; 1443 OpenMPScheduleClauseModifier M1 = OMPC_SCHEDULE_MODIFIER_unknown; 1444 OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown; 1445 llvm::Value *Chunk = nullptr; 1446 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 1447 ScheduleKind = C->getScheduleKind(); 1448 M1 = C->getFirstScheduleModifier(); 1449 M2 = C->getSecondScheduleModifier(); 1450 if (const auto *Ch = C->getChunkSize()) { 1451 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { 1452 if (OuterRegion) { 1453 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); 1454 CGF.EmitVarDecl(*ImpVar); 1455 CGF.EmitStoreThroughLValue( 1456 CGF.EmitAnyExpr(Ch), 1457 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), 1458 ImpVar->getType())); 1459 } else { 1460 Ch = ImpRef; 1461 } 1462 } 1463 if (!C->getHelperChunkSize() || !OuterRegion) { 1464 Chunk = CGF.EmitScalarExpr(Ch); 1465 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), 1466 S.getIterationVariable()->getType(), 1467 S.getLocStart()); 1468 } 1469 } 1470 } 1471 return std::make_pair(Chunk, ScheduleKindModifiersTy(ScheduleKind, M1, M2)); 1472 } 1473 1474 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { 1475 // Emit the loop iteration variable. 1476 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 1477 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 1478 EmitVarDecl(*IVDecl); 1479 1480 // Emit the iterations count variable. 1481 // If it is not a variable, Sema decided to calculate iterations count on each 1482 // iteration (e.g., it is foldable into a constant). 1483 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1484 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1485 // Emit calculation of the iterations count. 1486 EmitIgnoredExpr(S.getCalcLastIteration()); 1487 } 1488 1489 auto &RT = CGM.getOpenMPRuntime(); 1490 1491 bool HasLastprivateClause; 1492 // Check pre-condition. 1493 { 1494 // Skip the entire loop if we don't meet the precondition. 1495 // If the condition constant folds and can be elided, avoid emitting the 1496 // whole loop. 1497 bool CondConstant; 1498 llvm::BasicBlock *ContBlock = nullptr; 1499 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1500 if (!CondConstant) 1501 return false; 1502 } else { 1503 auto *ThenBlock = createBasicBlock("omp.precond.then"); 1504 ContBlock = createBasicBlock("omp.precond.end"); 1505 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 1506 getProfileCount(&S)); 1507 EmitBlock(ThenBlock); 1508 incrementProfileCounter(&S); 1509 } 1510 1511 emitAlignedClause(*this, S); 1512 EmitOMPLinearClauseInit(S); 1513 // Emit 'then' code. 1514 { 1515 // Emit helper vars inits. 1516 LValue LB = 1517 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable())); 1518 LValue UB = 1519 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable())); 1520 LValue ST = 1521 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 1522 LValue IL = 1523 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 1524 1525 OMPPrivateScope LoopScope(*this); 1526 if (EmitOMPFirstprivateClause(S, LoopScope)) { 1527 // Emit implicit barrier to synchronize threads and avoid data races on 1528 // initialization of firstprivate variables. 1529 CGM.getOpenMPRuntime().emitBarrierCall( 1530 *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 1531 /*ForceSimpleCall=*/true); 1532 } 1533 EmitOMPPrivateClause(S, LoopScope); 1534 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 1535 EmitOMPReductionClauseInit(S, LoopScope); 1536 emitPrivateLoopCounters(*this, LoopScope, S.counters(), 1537 S.private_counters()); 1538 emitPrivateLinearVars(*this, S, LoopScope); 1539 (void)LoopScope.Privatize(); 1540 1541 // Detect the loop schedule kind and chunk. 1542 llvm::Value *Chunk; 1543 OpenMPScheduleClauseKind ScheduleKind; 1544 auto ScheduleInfo = 1545 emitScheduleClause(*this, S, /*OuterRegion=*/false); 1546 Chunk = ScheduleInfo.first; 1547 ScheduleKind = ScheduleInfo.second.Kind; 1548 const OpenMPScheduleClauseModifier M1 = ScheduleInfo.second.M1; 1549 const OpenMPScheduleClauseModifier M2 = ScheduleInfo.second.M2; 1550 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1551 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1552 const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr; 1553 // OpenMP 4.5, 2.7.1 Loop Construct, Description. 1554 // If the static schedule kind is specified or if the ordered clause is 1555 // specified, and if no monotonic modifier is specified, the effect will 1556 // be as if the monotonic modifier was specified. 1557 if (RT.isStaticNonchunked(ScheduleKind, 1558 /* Chunked */ Chunk != nullptr) && 1559 !Ordered) { 1560 if (isOpenMPSimdDirective(S.getDirectiveKind())) 1561 EmitOMPSimdInit(S, /*IsMonotonic=*/true); 1562 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1563 // When no chunk_size is specified, the iteration space is divided into 1564 // chunks that are approximately equal in size, and at most one chunk is 1565 // distributed to each thread. Note that the size of the chunks is 1566 // unspecified in this case. 1567 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, 1568 IVSize, IVSigned, Ordered, 1569 IL.getAddress(), LB.getAddress(), 1570 UB.getAddress(), ST.getAddress()); 1571 auto LoopExit = 1572 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 1573 // UB = min(UB, GlobalUB); 1574 EmitIgnoredExpr(S.getEnsureUpperBound()); 1575 // IV = LB; 1576 EmitIgnoredExpr(S.getInit()); 1577 // while (idx <= UB) { BODY; ++idx; } 1578 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1579 S.getInc(), 1580 [&S, LoopExit](CodeGenFunction &CGF) { 1581 CGF.EmitOMPLoopBody(S, LoopExit); 1582 CGF.EmitStopPoint(&S); 1583 }, 1584 [](CodeGenFunction &) {}); 1585 EmitBlock(LoopExit.getBlock()); 1586 // Tell the runtime we are done. 1587 RT.emitForStaticFinish(*this, S.getLocStart()); 1588 } else { 1589 const bool IsMonotonic = Ordered || 1590 ScheduleKind == OMPC_SCHEDULE_static || 1591 ScheduleKind == OMPC_SCHEDULE_unknown || 1592 M1 == OMPC_SCHEDULE_MODIFIER_monotonic || 1593 M2 == OMPC_SCHEDULE_MODIFIER_monotonic; 1594 // Emit the outer loop, which requests its work chunk [LB..UB] from 1595 // runtime and runs the inner loop to process it. 1596 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, 1597 LB.getAddress(), UB.getAddress(), ST.getAddress(), 1598 IL.getAddress(), Chunk); 1599 } 1600 EmitOMPReductionClauseFinal(S); 1601 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1602 if (HasLastprivateClause) 1603 EmitOMPLastprivateClauseFinal( 1604 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 1605 } 1606 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1607 EmitOMPSimdFinal(S); 1608 } 1609 // We're now done with the loop, so jump to the continuation block. 1610 if (ContBlock) { 1611 EmitBranch(ContBlock); 1612 EmitBlock(ContBlock, true); 1613 } 1614 } 1615 return HasLastprivateClause; 1616 } 1617 1618 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 1619 LexicalScope Scope(*this, S.getSourceRange()); 1620 bool HasLastprivates = false; 1621 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1622 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1623 }; 1624 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen, 1625 S.hasCancel()); 1626 1627 // Emit an implicit barrier at the end. 1628 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 1629 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1630 } 1631 } 1632 1633 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 1634 LexicalScope Scope(*this, S.getSourceRange()); 1635 bool HasLastprivates = false; 1636 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1637 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1638 }; 1639 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1640 1641 // Emit an implicit barrier at the end. 1642 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 1643 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1644 } 1645 } 1646 1647 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 1648 const Twine &Name, 1649 llvm::Value *Init = nullptr) { 1650 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 1651 if (Init) 1652 CGF.EmitScalarInit(Init, LVal); 1653 return LVal; 1654 } 1655 1656 OpenMPDirectiveKind 1657 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 1658 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 1659 auto *CS = dyn_cast<CompoundStmt>(Stmt); 1660 if (CS && CS->size() > 1) { 1661 bool HasLastprivates = false; 1662 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { 1663 auto &C = CGF.CGM.getContext(); 1664 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 1665 // Emit helper vars inits. 1666 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 1667 CGF.Builder.getInt32(0)); 1668 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); 1669 LValue UB = 1670 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 1671 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 1672 CGF.Builder.getInt32(1)); 1673 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 1674 CGF.Builder.getInt32(0)); 1675 // Loop counter. 1676 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 1677 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1678 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 1679 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1680 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 1681 // Generate condition for loop. 1682 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 1683 OK_Ordinary, S.getLocStart(), 1684 /*fpContractable=*/false); 1685 // Increment for loop counter. 1686 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, 1687 OK_Ordinary, S.getLocStart()); 1688 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { 1689 // Iterate through all sections and emit a switch construct: 1690 // switch (IV) { 1691 // case 0: 1692 // <SectionStmt[0]>; 1693 // break; 1694 // ... 1695 // case <NumSection> - 1: 1696 // <SectionStmt[<NumSection> - 1]>; 1697 // break; 1698 // } 1699 // .omp.sections.exit: 1700 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 1701 auto *SwitchStmt = CGF.Builder.CreateSwitch( 1702 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 1703 CS->size()); 1704 unsigned CaseNumber = 0; 1705 for (auto *SubStmt : CS->children()) { 1706 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 1707 CGF.EmitBlock(CaseBB); 1708 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 1709 CGF.EmitStmt(SubStmt); 1710 CGF.EmitBranch(ExitBB); 1711 ++CaseNumber; 1712 } 1713 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1714 }; 1715 1716 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 1717 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 1718 // Emit implicit barrier to synchronize threads and avoid data races on 1719 // initialization of firstprivate variables. 1720 CGF.CGM.getOpenMPRuntime().emitBarrierCall( 1721 CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false, 1722 /*ForceSimpleCall=*/true); 1723 } 1724 CGF.EmitOMPPrivateClause(S, LoopScope); 1725 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1726 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1727 (void)LoopScope.Privatize(); 1728 1729 // Emit static non-chunked loop. 1730 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 1731 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, 1732 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 1733 LB.getAddress(), UB.getAddress(), ST.getAddress()); 1734 // UB = min(UB, GlobalUB); 1735 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 1736 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 1737 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 1738 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 1739 // IV = LB; 1740 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 1741 // while (idx <= UB) { BODY; ++idx; } 1742 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 1743 [](CodeGenFunction &) {}); 1744 // Tell the runtime we are done. 1745 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); 1746 CGF.EmitOMPReductionClauseFinal(S); 1747 1748 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1749 if (HasLastprivates) 1750 CGF.EmitOMPLastprivateClauseFinal( 1751 S, CGF.Builder.CreateIsNotNull( 1752 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 1753 }; 1754 1755 bool HasCancel = false; 1756 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S)) 1757 HasCancel = OSD->hasCancel(); 1758 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S)) 1759 HasCancel = OPSD->hasCancel(); 1760 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen, 1761 HasCancel); 1762 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 1763 // clause. Otherwise the barrier will be generated by the codegen for the 1764 // directive. 1765 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 1766 // Emit implicit barrier to synchronize threads and avoid data races on 1767 // initialization of firstprivate variables. 1768 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1769 OMPD_unknown); 1770 } 1771 return OMPD_sections; 1772 } 1773 // If only one section is found - no need to generate loop, emit as a single 1774 // region. 1775 bool HasFirstprivates; 1776 // No need to generate reductions for sections with single section region, we 1777 // can use original shared variables for all operations. 1778 bool HasReductions = S.hasClausesOfKind<OMPReductionClause>(); 1779 // No need to generate lastprivates for sections with single section region, 1780 // we can use original shared variable for all calculations with barrier at 1781 // the end of the sections. 1782 bool HasLastprivates = S.hasClausesOfKind<OMPLastprivateClause>(); 1783 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { 1784 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1785 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1786 CGF.EmitOMPPrivateClause(S, SingleScope); 1787 (void)SingleScope.Privatize(); 1788 1789 CGF.EmitStmt(Stmt); 1790 }; 1791 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1792 llvm::None, llvm::None, llvm::None, 1793 llvm::None); 1794 // Emit barrier for firstprivates, lastprivates or reductions only if 1795 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be 1796 // generated by the codegen for the directive. 1797 if ((HasFirstprivates || HasLastprivates || HasReductions) && 1798 S.getSingleClause<OMPNowaitClause>()) { 1799 // Emit implicit barrier to synchronize threads and avoid data races on 1800 // initialization of firstprivate variables. 1801 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown, 1802 /*EmitChecks=*/false, 1803 /*ForceSimpleCall=*/true); 1804 } 1805 return OMPD_single; 1806 } 1807 1808 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 1809 LexicalScope Scope(*this, S.getSourceRange()); 1810 OpenMPDirectiveKind EmittedAs = EmitSections(S); 1811 // Emit an implicit barrier at the end. 1812 if (!S.getSingleClause<OMPNowaitClause>()) { 1813 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); 1814 } 1815 } 1816 1817 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 1818 LexicalScope Scope(*this, S.getSourceRange()); 1819 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1820 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1821 }; 1822 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen, 1823 S.hasCancel()); 1824 } 1825 1826 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 1827 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 1828 llvm::SmallVector<const Expr *, 8> DestExprs; 1829 llvm::SmallVector<const Expr *, 8> SrcExprs; 1830 llvm::SmallVector<const Expr *, 8> AssignmentOps; 1831 // Check if there are any 'copyprivate' clauses associated with this 1832 // 'single' 1833 // construct. 1834 // Build a list of copyprivate variables along with helper expressions 1835 // (<source>, <destination>, <destination>=<source> expressions) 1836 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 1837 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 1838 DestExprs.append(C->destination_exprs().begin(), 1839 C->destination_exprs().end()); 1840 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 1841 AssignmentOps.append(C->assignment_ops().begin(), 1842 C->assignment_ops().end()); 1843 } 1844 LexicalScope Scope(*this, S.getSourceRange()); 1845 // Emit code for 'single' region along with 'copyprivate' clauses 1846 bool HasFirstprivates; 1847 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { 1848 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1849 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1850 CGF.EmitOMPPrivateClause(S, SingleScope); 1851 (void)SingleScope.Privatize(); 1852 1853 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1854 }; 1855 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1856 CopyprivateVars, DestExprs, SrcExprs, 1857 AssignmentOps); 1858 // Emit an implicit barrier at the end (to avoid data race on firstprivate 1859 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 1860 if ((!S.getSingleClause<OMPNowaitClause>() || HasFirstprivates) && 1861 CopyprivateVars.empty()) { 1862 CGM.getOpenMPRuntime().emitBarrierCall( 1863 *this, S.getLocStart(), 1864 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 1865 } 1866 } 1867 1868 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 1869 LexicalScope Scope(*this, S.getSourceRange()); 1870 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1871 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1872 }; 1873 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 1874 } 1875 1876 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 1877 LexicalScope Scope(*this, S.getSourceRange()); 1878 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1879 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1880 }; 1881 Expr *Hint = nullptr; 1882 if (auto *HintClause = S.getSingleClause<OMPHintClause>()) 1883 Hint = HintClause->getHint(); 1884 CGM.getOpenMPRuntime().emitCriticalRegion(*this, 1885 S.getDirectiveName().getAsString(), 1886 CodeGen, S.getLocStart(), Hint); 1887 } 1888 1889 void CodeGenFunction::EmitOMPParallelForDirective( 1890 const OMPParallelForDirective &S) { 1891 // Emit directive as a combined directive that consists of two implicit 1892 // directives: 'parallel' with 'for' directive. 1893 LexicalScope Scope(*this, S.getSourceRange()); 1894 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1895 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1896 CGF.EmitOMPWorksharingLoop(S); 1897 }; 1898 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen); 1899 } 1900 1901 void CodeGenFunction::EmitOMPParallelForSimdDirective( 1902 const OMPParallelForSimdDirective &S) { 1903 // Emit directive as a combined directive that consists of two implicit 1904 // directives: 'parallel' with 'for' directive. 1905 LexicalScope Scope(*this, S.getSourceRange()); 1906 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1907 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1908 CGF.EmitOMPWorksharingLoop(S); 1909 }; 1910 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen); 1911 } 1912 1913 void CodeGenFunction::EmitOMPParallelSectionsDirective( 1914 const OMPParallelSectionsDirective &S) { 1915 // Emit directive as a combined directive that consists of two implicit 1916 // directives: 'parallel' with 'sections' directive. 1917 LexicalScope Scope(*this, S.getSourceRange()); 1918 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1919 (void)CGF.EmitSections(S); 1920 }; 1921 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen); 1922 } 1923 1924 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 1925 // Emit outlined function for task construct. 1926 LexicalScope Scope(*this, S.getSourceRange()); 1927 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1928 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 1929 auto *I = CS->getCapturedDecl()->param_begin(); 1930 auto *PartId = std::next(I); 1931 // The first function argument for tasks is a thread id, the second one is a 1932 // part id (0 for tied tasks, >=0 for untied task). 1933 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 1934 // Get list of private variables. 1935 llvm::SmallVector<const Expr *, 8> PrivateVars; 1936 llvm::SmallVector<const Expr *, 8> PrivateCopies; 1937 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 1938 auto IRef = C->varlist_begin(); 1939 for (auto *IInit : C->private_copies()) { 1940 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1941 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1942 PrivateVars.push_back(*IRef); 1943 PrivateCopies.push_back(IInit); 1944 } 1945 ++IRef; 1946 } 1947 } 1948 EmittedAsPrivate.clear(); 1949 // Get list of firstprivate variables. 1950 llvm::SmallVector<const Expr *, 8> FirstprivateVars; 1951 llvm::SmallVector<const Expr *, 8> FirstprivateCopies; 1952 llvm::SmallVector<const Expr *, 8> FirstprivateInits; 1953 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1954 auto IRef = C->varlist_begin(); 1955 auto IElemInitRef = C->inits().begin(); 1956 for (auto *IInit : C->private_copies()) { 1957 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1958 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1959 FirstprivateVars.push_back(*IRef); 1960 FirstprivateCopies.push_back(IInit); 1961 FirstprivateInits.push_back(*IElemInitRef); 1962 } 1963 ++IRef, ++IElemInitRef; 1964 } 1965 } 1966 // Build list of dependences. 1967 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8> 1968 Dependences; 1969 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 1970 for (auto *IRef : C->varlists()) { 1971 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef)); 1972 } 1973 } 1974 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( 1975 CodeGenFunction &CGF) { 1976 // Set proper addresses for generated private copies. 1977 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1978 OMPPrivateScope Scope(CGF); 1979 if (!PrivateVars.empty() || !FirstprivateVars.empty()) { 1980 auto *CopyFn = CGF.Builder.CreateLoad( 1981 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3))); 1982 auto *PrivatesPtr = CGF.Builder.CreateLoad( 1983 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2))); 1984 // Map privates. 1985 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> 1986 PrivatePtrs; 1987 llvm::SmallVector<llvm::Value *, 16> CallArgs; 1988 CallArgs.push_back(PrivatesPtr); 1989 for (auto *E : PrivateVars) { 1990 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1991 Address PrivatePtr = 1992 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1993 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1994 CallArgs.push_back(PrivatePtr.getPointer()); 1995 } 1996 for (auto *E : FirstprivateVars) { 1997 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1998 Address PrivatePtr = 1999 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 2000 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 2001 CallArgs.push_back(PrivatePtr.getPointer()); 2002 } 2003 CGF.EmitRuntimeCall(CopyFn, CallArgs); 2004 for (auto &&Pair : PrivatePtrs) { 2005 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 2006 CGF.getContext().getDeclAlign(Pair.first)); 2007 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 2008 } 2009 } 2010 (void)Scope.Privatize(); 2011 if (*PartId) { 2012 // TODO: emit code for untied tasks. 2013 } 2014 CGF.EmitStmt(CS->getCapturedStmt()); 2015 }; 2016 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 2017 S, *I, OMPD_task, CodeGen); 2018 // Check if we should emit tied or untied task. 2019 bool Tied = !S.getSingleClause<OMPUntiedClause>(); 2020 // Check if the task is final 2021 llvm::PointerIntPair<llvm::Value *, 1, bool> Final; 2022 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 2023 // If the condition constant folds and can be elided, try to avoid emitting 2024 // the condition and the dead arm of the if/else. 2025 auto *Cond = Clause->getCondition(); 2026 bool CondConstant; 2027 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 2028 Final.setInt(CondConstant); 2029 else 2030 Final.setPointer(EvaluateExprAsBool(Cond)); 2031 } else { 2032 // By default the task is not final. 2033 Final.setInt(/*IntVal=*/false); 2034 } 2035 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 2036 const Expr *IfCond = nullptr; 2037 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2038 if (C->getNameModifier() == OMPD_unknown || 2039 C->getNameModifier() == OMPD_task) { 2040 IfCond = C->getCondition(); 2041 break; 2042 } 2043 } 2044 CGM.getOpenMPRuntime().emitTaskCall( 2045 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, 2046 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, 2047 FirstprivateCopies, FirstprivateInits, Dependences); 2048 } 2049 2050 void CodeGenFunction::EmitOMPTaskyieldDirective( 2051 const OMPTaskyieldDirective &S) { 2052 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 2053 } 2054 2055 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 2056 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 2057 } 2058 2059 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 2060 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 2061 } 2062 2063 void CodeGenFunction::EmitOMPTaskgroupDirective( 2064 const OMPTaskgroupDirective &S) { 2065 LexicalScope Scope(*this, S.getSourceRange()); 2066 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 2067 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2068 }; 2069 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart()); 2070 } 2071 2072 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 2073 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 2074 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) { 2075 return llvm::makeArrayRef(FlushClause->varlist_begin(), 2076 FlushClause->varlist_end()); 2077 } 2078 return llvm::None; 2079 }(), S.getLocStart()); 2080 } 2081 2082 void CodeGenFunction::EmitOMPDistributeDirective( 2083 const OMPDistributeDirective &S) { 2084 llvm_unreachable("CodeGen for 'omp distribute' is not supported yet."); 2085 } 2086 2087 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM, 2088 const CapturedStmt *S) { 2089 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 2090 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo; 2091 CGF.CapturedStmtInfo = &CapStmtInfo; 2092 auto *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S); 2093 Fn->addFnAttr(llvm::Attribute::NoInline); 2094 return Fn; 2095 } 2096 2097 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 2098 if (!S.getAssociatedStmt()) 2099 return; 2100 LexicalScope Scope(*this, S.getSourceRange()); 2101 auto *C = S.getSingleClause<OMPSIMDClause>(); 2102 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF) { 2103 if (C) { 2104 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2105 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 2106 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 2107 auto *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS); 2108 CGF.EmitNounwindRuntimeCall(OutlinedFn, CapturedVars); 2109 } else { 2110 CGF.EmitStmt( 2111 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 2112 } 2113 }; 2114 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C); 2115 } 2116 2117 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 2118 QualType SrcType, QualType DestType, 2119 SourceLocation Loc) { 2120 assert(CGF.hasScalarEvaluationKind(DestType) && 2121 "DestType must have scalar evaluation kind."); 2122 assert(!Val.isAggregate() && "Must be a scalar or complex."); 2123 return Val.isScalar() 2124 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType, 2125 Loc) 2126 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 2127 DestType, Loc); 2128 } 2129 2130 static CodeGenFunction::ComplexPairTy 2131 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 2132 QualType DestType, SourceLocation Loc) { 2133 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 2134 "DestType must have complex evaluation kind."); 2135 CodeGenFunction::ComplexPairTy ComplexVal; 2136 if (Val.isScalar()) { 2137 // Convert the input element to the element type of the complex. 2138 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 2139 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 2140 DestElementType, Loc); 2141 ComplexVal = CodeGenFunction::ComplexPairTy( 2142 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 2143 } else { 2144 assert(Val.isComplex() && "Must be a scalar or complex."); 2145 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 2146 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 2147 ComplexVal.first = CGF.EmitScalarConversion( 2148 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 2149 ComplexVal.second = CGF.EmitScalarConversion( 2150 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 2151 } 2152 return ComplexVal; 2153 } 2154 2155 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 2156 LValue LVal, RValue RVal) { 2157 if (LVal.isGlobalReg()) { 2158 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 2159 } else { 2160 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent 2161 : llvm::Monotonic, 2162 LVal.isVolatile(), /*IsInit=*/false); 2163 } 2164 } 2165 2166 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, 2167 QualType RValTy, SourceLocation Loc) { 2168 switch (CGF.getEvaluationKind(LVal.getType())) { 2169 case TEK_Scalar: 2170 CGF.EmitStoreThroughLValue(RValue::get(convertToScalarValue( 2171 CGF, RVal, RValTy, LVal.getType(), Loc)), 2172 LVal); 2173 break; 2174 case TEK_Complex: 2175 CGF.EmitStoreOfComplex( 2176 convertToComplexValue(CGF, RVal, RValTy, LVal.getType(), Loc), LVal, 2177 /*isInit=*/false); 2178 break; 2179 case TEK_Aggregate: 2180 llvm_unreachable("Must be a scalar or complex."); 2181 } 2182 } 2183 2184 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 2185 const Expr *X, const Expr *V, 2186 SourceLocation Loc) { 2187 // v = x; 2188 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 2189 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 2190 LValue XLValue = CGF.EmitLValue(X); 2191 LValue VLValue = CGF.EmitLValue(V); 2192 RValue Res = XLValue.isGlobalReg() 2193 ? CGF.EmitLoadOfLValue(XLValue, Loc) 2194 : CGF.EmitAtomicLoad(XLValue, Loc, 2195 IsSeqCst ? llvm::SequentiallyConsistent 2196 : llvm::Monotonic, 2197 XLValue.isVolatile()); 2198 // OpenMP, 2.12.6, atomic Construct 2199 // Any atomic construct with a seq_cst clause forces the atomically 2200 // performed operation to include an implicit flush operation without a 2201 // list. 2202 if (IsSeqCst) 2203 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2204 emitSimpleStore(CGF, VLValue, Res, X->getType().getNonReferenceType(), Loc); 2205 } 2206 2207 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 2208 const Expr *X, const Expr *E, 2209 SourceLocation Loc) { 2210 // x = expr; 2211 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 2212 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 2213 // OpenMP, 2.12.6, atomic Construct 2214 // Any atomic construct with a seq_cst clause forces the atomically 2215 // performed operation to include an implicit flush operation without a 2216 // list. 2217 if (IsSeqCst) 2218 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2219 } 2220 2221 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 2222 RValue Update, 2223 BinaryOperatorKind BO, 2224 llvm::AtomicOrdering AO, 2225 bool IsXLHSInRHSPart) { 2226 auto &Context = CGF.CGM.getContext(); 2227 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 2228 // expression is simple and atomic is allowed for the given type for the 2229 // target platform. 2230 if (BO == BO_Comma || !Update.isScalar() || 2231 !Update.getScalarVal()->getType()->isIntegerTy() || 2232 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 2233 (Update.getScalarVal()->getType() != 2234 X.getAddress().getElementType())) || 2235 !X.getAddress().getElementType()->isIntegerTy() || 2236 !Context.getTargetInfo().hasBuiltinAtomic( 2237 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 2238 return std::make_pair(false, RValue::get(nullptr)); 2239 2240 llvm::AtomicRMWInst::BinOp RMWOp; 2241 switch (BO) { 2242 case BO_Add: 2243 RMWOp = llvm::AtomicRMWInst::Add; 2244 break; 2245 case BO_Sub: 2246 if (!IsXLHSInRHSPart) 2247 return std::make_pair(false, RValue::get(nullptr)); 2248 RMWOp = llvm::AtomicRMWInst::Sub; 2249 break; 2250 case BO_And: 2251 RMWOp = llvm::AtomicRMWInst::And; 2252 break; 2253 case BO_Or: 2254 RMWOp = llvm::AtomicRMWInst::Or; 2255 break; 2256 case BO_Xor: 2257 RMWOp = llvm::AtomicRMWInst::Xor; 2258 break; 2259 case BO_LT: 2260 RMWOp = X.getType()->hasSignedIntegerRepresentation() 2261 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 2262 : llvm::AtomicRMWInst::Max) 2263 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 2264 : llvm::AtomicRMWInst::UMax); 2265 break; 2266 case BO_GT: 2267 RMWOp = X.getType()->hasSignedIntegerRepresentation() 2268 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 2269 : llvm::AtomicRMWInst::Min) 2270 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 2271 : llvm::AtomicRMWInst::UMin); 2272 break; 2273 case BO_Assign: 2274 RMWOp = llvm::AtomicRMWInst::Xchg; 2275 break; 2276 case BO_Mul: 2277 case BO_Div: 2278 case BO_Rem: 2279 case BO_Shl: 2280 case BO_Shr: 2281 case BO_LAnd: 2282 case BO_LOr: 2283 return std::make_pair(false, RValue::get(nullptr)); 2284 case BO_PtrMemD: 2285 case BO_PtrMemI: 2286 case BO_LE: 2287 case BO_GE: 2288 case BO_EQ: 2289 case BO_NE: 2290 case BO_AddAssign: 2291 case BO_SubAssign: 2292 case BO_AndAssign: 2293 case BO_OrAssign: 2294 case BO_XorAssign: 2295 case BO_MulAssign: 2296 case BO_DivAssign: 2297 case BO_RemAssign: 2298 case BO_ShlAssign: 2299 case BO_ShrAssign: 2300 case BO_Comma: 2301 llvm_unreachable("Unsupported atomic update operation"); 2302 } 2303 auto *UpdateVal = Update.getScalarVal(); 2304 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 2305 UpdateVal = CGF.Builder.CreateIntCast( 2306 IC, X.getAddress().getElementType(), 2307 X.getType()->hasSignedIntegerRepresentation()); 2308 } 2309 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO); 2310 return std::make_pair(true, RValue::get(Res)); 2311 } 2312 2313 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 2314 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 2315 llvm::AtomicOrdering AO, SourceLocation Loc, 2316 const llvm::function_ref<RValue(RValue)> &CommonGen) { 2317 // Update expressions are allowed to have the following forms: 2318 // x binop= expr; -> xrval + expr; 2319 // x++, ++x -> xrval + 1; 2320 // x--, --x -> xrval - 1; 2321 // x = x binop expr; -> xrval binop expr 2322 // x = expr Op x; - > expr binop xrval; 2323 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 2324 if (!Res.first) { 2325 if (X.isGlobalReg()) { 2326 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 2327 // 'xrval'. 2328 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 2329 } else { 2330 // Perform compare-and-swap procedure. 2331 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 2332 } 2333 } 2334 return Res; 2335 } 2336 2337 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 2338 const Expr *X, const Expr *E, 2339 const Expr *UE, bool IsXLHSInRHSPart, 2340 SourceLocation Loc) { 2341 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 2342 "Update expr in 'atomic update' must be a binary operator."); 2343 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 2344 // Update expressions are allowed to have the following forms: 2345 // x binop= expr; -> xrval + expr; 2346 // x++, ++x -> xrval + 1; 2347 // x--, --x -> xrval - 1; 2348 // x = x binop expr; -> xrval binop expr 2349 // x = expr Op x; - > expr binop xrval; 2350 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 2351 LValue XLValue = CGF.EmitLValue(X); 2352 RValue ExprRValue = CGF.EmitAnyExpr(E); 2353 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 2354 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 2355 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 2356 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 2357 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 2358 auto Gen = 2359 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 2360 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2361 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 2362 return CGF.EmitAnyExpr(UE); 2363 }; 2364 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 2365 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 2366 // OpenMP, 2.12.6, atomic Construct 2367 // Any atomic construct with a seq_cst clause forces the atomically 2368 // performed operation to include an implicit flush operation without a 2369 // list. 2370 if (IsSeqCst) 2371 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2372 } 2373 2374 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 2375 QualType SourceType, QualType ResType, 2376 SourceLocation Loc) { 2377 switch (CGF.getEvaluationKind(ResType)) { 2378 case TEK_Scalar: 2379 return RValue::get( 2380 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 2381 case TEK_Complex: { 2382 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 2383 return RValue::getComplex(Res.first, Res.second); 2384 } 2385 case TEK_Aggregate: 2386 break; 2387 } 2388 llvm_unreachable("Must be a scalar or complex."); 2389 } 2390 2391 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 2392 bool IsPostfixUpdate, const Expr *V, 2393 const Expr *X, const Expr *E, 2394 const Expr *UE, bool IsXLHSInRHSPart, 2395 SourceLocation Loc) { 2396 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 2397 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 2398 RValue NewVVal; 2399 LValue VLValue = CGF.EmitLValue(V); 2400 LValue XLValue = CGF.EmitLValue(X); 2401 RValue ExprRValue = CGF.EmitAnyExpr(E); 2402 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 2403 QualType NewVValType; 2404 if (UE) { 2405 // 'x' is updated with some additional value. 2406 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 2407 "Update expr in 'atomic capture' must be a binary operator."); 2408 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 2409 // Update expressions are allowed to have the following forms: 2410 // x binop= expr; -> xrval + expr; 2411 // x++, ++x -> xrval + 1; 2412 // x--, --x -> xrval - 1; 2413 // x = x binop expr; -> xrval binop expr 2414 // x = expr Op x; - > expr binop xrval; 2415 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 2416 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 2417 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 2418 NewVValType = XRValExpr->getType(); 2419 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 2420 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 2421 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { 2422 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2423 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 2424 RValue Res = CGF.EmitAnyExpr(UE); 2425 NewVVal = IsPostfixUpdate ? XRValue : Res; 2426 return Res; 2427 }; 2428 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 2429 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 2430 if (Res.first) { 2431 // 'atomicrmw' instruction was generated. 2432 if (IsPostfixUpdate) { 2433 // Use old value from 'atomicrmw'. 2434 NewVVal = Res.second; 2435 } else { 2436 // 'atomicrmw' does not provide new value, so evaluate it using old 2437 // value of 'x'. 2438 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2439 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 2440 NewVVal = CGF.EmitAnyExpr(UE); 2441 } 2442 } 2443 } else { 2444 // 'x' is simply rewritten with some 'expr'. 2445 NewVValType = X->getType().getNonReferenceType(); 2446 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 2447 X->getType().getNonReferenceType(), Loc); 2448 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { 2449 NewVVal = XRValue; 2450 return ExprRValue; 2451 }; 2452 // Try to perform atomicrmw xchg, otherwise simple exchange. 2453 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 2454 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 2455 Loc, Gen); 2456 if (Res.first) { 2457 // 'atomicrmw' instruction was generated. 2458 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 2459 } 2460 } 2461 // Emit post-update store to 'v' of old/new 'x' value. 2462 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType, Loc); 2463 // OpenMP, 2.12.6, atomic Construct 2464 // Any atomic construct with a seq_cst clause forces the atomically 2465 // performed operation to include an implicit flush operation without a 2466 // list. 2467 if (IsSeqCst) 2468 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2469 } 2470 2471 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 2472 bool IsSeqCst, bool IsPostfixUpdate, 2473 const Expr *X, const Expr *V, const Expr *E, 2474 const Expr *UE, bool IsXLHSInRHSPart, 2475 SourceLocation Loc) { 2476 switch (Kind) { 2477 case OMPC_read: 2478 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 2479 break; 2480 case OMPC_write: 2481 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 2482 break; 2483 case OMPC_unknown: 2484 case OMPC_update: 2485 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 2486 break; 2487 case OMPC_capture: 2488 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 2489 IsXLHSInRHSPart, Loc); 2490 break; 2491 case OMPC_if: 2492 case OMPC_final: 2493 case OMPC_num_threads: 2494 case OMPC_private: 2495 case OMPC_firstprivate: 2496 case OMPC_lastprivate: 2497 case OMPC_reduction: 2498 case OMPC_safelen: 2499 case OMPC_simdlen: 2500 case OMPC_collapse: 2501 case OMPC_default: 2502 case OMPC_seq_cst: 2503 case OMPC_shared: 2504 case OMPC_linear: 2505 case OMPC_aligned: 2506 case OMPC_copyin: 2507 case OMPC_copyprivate: 2508 case OMPC_flush: 2509 case OMPC_proc_bind: 2510 case OMPC_schedule: 2511 case OMPC_ordered: 2512 case OMPC_nowait: 2513 case OMPC_untied: 2514 case OMPC_threadprivate: 2515 case OMPC_depend: 2516 case OMPC_mergeable: 2517 case OMPC_device: 2518 case OMPC_threads: 2519 case OMPC_simd: 2520 case OMPC_map: 2521 case OMPC_num_teams: 2522 case OMPC_thread_limit: 2523 case OMPC_priority: 2524 case OMPC_grainsize: 2525 case OMPC_nogroup: 2526 case OMPC_num_tasks: 2527 case OMPC_hint: 2528 case OMPC_dist_schedule: 2529 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 2530 } 2531 } 2532 2533 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 2534 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>(); 2535 OpenMPClauseKind Kind = OMPC_unknown; 2536 for (auto *C : S.clauses()) { 2537 // Find first clause (skip seq_cst clause, if it is first). 2538 if (C->getClauseKind() != OMPC_seq_cst) { 2539 Kind = C->getClauseKind(); 2540 break; 2541 } 2542 } 2543 2544 const auto *CS = 2545 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 2546 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 2547 enterFullExpression(EWC); 2548 } 2549 // Processing for statements under 'atomic capture'. 2550 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 2551 for (const auto *C : Compound->body()) { 2552 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 2553 enterFullExpression(EWC); 2554 } 2555 } 2556 } 2557 2558 LexicalScope Scope(*this, S.getSourceRange()); 2559 auto &&CodeGen = [&S, Kind, IsSeqCst, CS](CodeGenFunction &CGF) { 2560 CGF.EmitStopPoint(CS); 2561 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 2562 S.getV(), S.getExpr(), S.getUpdateExpr(), 2563 S.isXLHSInRHSPart(), S.getLocStart()); 2564 }; 2565 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 2566 } 2567 2568 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) { 2569 LexicalScope Scope(*this, S.getSourceRange()); 2570 const CapturedStmt &CS = *cast<CapturedStmt>(S.getAssociatedStmt()); 2571 2572 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 2573 GenerateOpenMPCapturedVars(CS, CapturedVars); 2574 2575 llvm::Function *Fn = nullptr; 2576 llvm::Constant *FnID = nullptr; 2577 2578 // Check if we have any if clause associated with the directive. 2579 const Expr *IfCond = nullptr; 2580 2581 if (auto *C = S.getSingleClause<OMPIfClause>()) { 2582 IfCond = C->getCondition(); 2583 } 2584 2585 // Check if we have any device clause associated with the directive. 2586 const Expr *Device = nullptr; 2587 if (auto *C = S.getSingleClause<OMPDeviceClause>()) { 2588 Device = C->getDevice(); 2589 } 2590 2591 // Check if we have an if clause whose conditional always evaluates to false 2592 // or if we do not have any targets specified. If so the target region is not 2593 // an offload entry point. 2594 bool IsOffloadEntry = true; 2595 if (IfCond) { 2596 bool Val; 2597 if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val) 2598 IsOffloadEntry = false; 2599 } 2600 if (CGM.getLangOpts().OMPTargetTriples.empty()) 2601 IsOffloadEntry = false; 2602 2603 assert(CurFuncDecl && "No parent declaration for target region!"); 2604 StringRef ParentName; 2605 // In case we have Ctors/Dtors we use the complete type variant to produce 2606 // the mangling of the device outlined kernel. 2607 if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl)) 2608 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete)); 2609 else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl)) 2610 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete)); 2611 else 2612 ParentName = 2613 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl))); 2614 2615 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID, 2616 IsOffloadEntry); 2617 2618 CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device, 2619 CapturedVars); 2620 } 2621 2622 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { 2623 llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); 2624 } 2625 2626 void CodeGenFunction::EmitOMPCancellationPointDirective( 2627 const OMPCancellationPointDirective &S) { 2628 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(), 2629 S.getCancelRegion()); 2630 } 2631 2632 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 2633 const Expr *IfCond = nullptr; 2634 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 2635 if (C->getNameModifier() == OMPD_unknown || 2636 C->getNameModifier() == OMPD_cancel) { 2637 IfCond = C->getCondition(); 2638 break; 2639 } 2640 } 2641 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond, 2642 S.getCancelRegion()); 2643 } 2644 2645 CodeGenFunction::JumpDest 2646 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 2647 if (Kind == OMPD_parallel || Kind == OMPD_task) 2648 return ReturnBlock; 2649 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections || 2650 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for); 2651 return BreakContinueStack.back().BreakBlock; 2652 } 2653 2654 // Generate the instructions for '#pragma omp target data' directive. 2655 void CodeGenFunction::EmitOMPTargetDataDirective( 2656 const OMPTargetDataDirective &S) { 2657 // emit the code inside the construct for now 2658 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2659 CGM.getOpenMPRuntime().emitInlinedDirective( 2660 *this, OMPD_target_data, 2661 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); }); 2662 } 2663 2664 void CodeGenFunction::EmitOMPTargetEnterDataDirective( 2665 const OMPTargetEnterDataDirective &S) { 2666 // TODO: codegen for target enter data. 2667 } 2668 2669 void CodeGenFunction::EmitOMPTargetExitDataDirective( 2670 const OMPTargetExitDataDirective &S) { 2671 // TODO: codegen for target exit data. 2672 } 2673 2674 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) { 2675 // emit the code inside the construct for now 2676 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2677 CGM.getOpenMPRuntime().emitInlinedDirective( 2678 *this, OMPD_taskloop, 2679 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); }); 2680 } 2681 2682 void CodeGenFunction::EmitOMPTaskLoopSimdDirective( 2683 const OMPTaskLoopSimdDirective &S) { 2684 // emit the code inside the construct for now 2685 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2686 CGM.getOpenMPRuntime().emitInlinedDirective( 2687 *this, OMPD_taskloop_simd, 2688 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); }); 2689 } 2690 2691