1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Stmt.h" 19 #include "clang/AST/StmtOpenMP.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 void CodeGenFunction::GenerateOpenMPCapturedVars( 24 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) { 25 const RecordDecl *RD = S.getCapturedRecordDecl(); 26 auto CurField = RD->field_begin(); 27 auto CurCap = S.captures().begin(); 28 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(), 29 E = S.capture_init_end(); 30 I != E; ++I, ++CurField, ++CurCap) { 31 if (CurField->hasCapturedVLAType()) { 32 auto VAT = CurField->getCapturedVLAType(); 33 CapturedVars.push_back(VLASizeMap[VAT->getSizeExpr()]); 34 } else if (CurCap->capturesThis()) 35 CapturedVars.push_back(CXXThisValue); 36 else 37 CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer()); 38 } 39 } 40 41 llvm::Function * 42 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) { 43 assert( 44 CapturedStmtInfo && 45 "CapturedStmtInfo should be set when generating the captured function"); 46 const CapturedDecl *CD = S.getCapturedDecl(); 47 const RecordDecl *RD = S.getCapturedRecordDecl(); 48 assert(CD->hasBody() && "missing CapturedDecl body"); 49 50 // Build the argument list. 51 ASTContext &Ctx = CGM.getContext(); 52 FunctionArgList Args; 53 Args.append(CD->param_begin(), 54 std::next(CD->param_begin(), CD->getContextParamPosition())); 55 auto I = S.captures().begin(); 56 for (auto *FD : RD->fields()) { 57 QualType ArgType = FD->getType(); 58 IdentifierInfo *II = nullptr; 59 VarDecl *CapVar = nullptr; 60 if (I->capturesVariable()) { 61 CapVar = I->getCapturedVar(); 62 II = CapVar->getIdentifier(); 63 } else if (I->capturesThis()) 64 II = &getContext().Idents.get("this"); 65 else { 66 assert(I->capturesVariableArrayType()); 67 II = &getContext().Idents.get("vla"); 68 } 69 if (ArgType->isVariablyModifiedType()) 70 ArgType = getContext().getVariableArrayDecayedType(ArgType); 71 Args.push_back(ImplicitParamDecl::Create(getContext(), nullptr, 72 FD->getLocation(), II, ArgType)); 73 ++I; 74 } 75 Args.append( 76 std::next(CD->param_begin(), CD->getContextParamPosition() + 1), 77 CD->param_end()); 78 79 // Create the function declaration. 80 FunctionType::ExtInfo ExtInfo; 81 const CGFunctionInfo &FuncInfo = 82 CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo, 83 /*IsVariadic=*/false); 84 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); 85 86 llvm::Function *F = llvm::Function::Create( 87 FuncLLVMTy, llvm::GlobalValue::InternalLinkage, 88 CapturedStmtInfo->getHelperName(), &CGM.getModule()); 89 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); 90 if (CD->isNothrow()) 91 F->addFnAttr(llvm::Attribute::NoUnwind); 92 93 // Generate the function. 94 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), 95 CD->getBody()->getLocStart()); 96 unsigned Cnt = CD->getContextParamPosition(); 97 I = S.captures().begin(); 98 for (auto *FD : RD->fields()) { 99 LValue ArgLVal = 100 MakeAddrLValue(GetAddrOfLocalVar(Args[Cnt]), Args[Cnt]->getType(), 101 AlignmentSource::Decl); 102 if (FD->hasCapturedVLAType()) { 103 auto *ExprArg = 104 EmitLoadOfLValue(ArgLVal, SourceLocation()).getScalarVal(); 105 auto VAT = FD->getCapturedVLAType(); 106 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 107 } else if (I->capturesVariable()) { 108 auto *Var = I->getCapturedVar(); 109 QualType VarTy = Var->getType(); 110 Address ArgAddr = ArgLVal.getAddress(); 111 if (!VarTy->isReferenceType()) { 112 ArgAddr = EmitLoadOfReference( 113 ArgAddr, ArgLVal.getType()->castAs<ReferenceType>()); 114 } 115 setAddrOfLocalVar( 116 Var, Address(ArgAddr.getPointer(), getContext().getDeclAlign(Var))); 117 } else { 118 // If 'this' is captured, load it into CXXThisValue. 119 assert(I->capturesThis()); 120 CXXThisValue = 121 EmitLoadOfLValue(ArgLVal, Args[Cnt]->getLocation()).getScalarVal(); 122 } 123 ++Cnt, ++I; 124 } 125 126 PGO.assignRegionCounters(CD, F); 127 CapturedStmtInfo->EmitBody(*this, CD->getBody()); 128 FinishFunction(CD->getBodyRBrace()); 129 130 return F; 131 } 132 133 //===----------------------------------------------------------------------===// 134 // OpenMP Directive Emission 135 //===----------------------------------------------------------------------===// 136 void CodeGenFunction::EmitOMPAggregateAssign( 137 Address DestAddr, Address SrcAddr, QualType OriginalType, 138 const llvm::function_ref<void(Address, Address)> &CopyGen) { 139 // Perform element-by-element initialization. 140 QualType ElementTy; 141 142 // Drill down to the base element type on both arrays. 143 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 144 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr); 145 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 146 147 auto SrcBegin = SrcAddr.getPointer(); 148 auto DestBegin = DestAddr.getPointer(); 149 // Cast from pointer to array type to pointer to single element. 150 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 151 // The basic structure here is a while-do loop. 152 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 153 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 154 auto IsEmpty = 155 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 156 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 157 158 // Enter the loop body, making that address the current address. 159 auto EntryBB = Builder.GetInsertBlock(); 160 EmitBlock(BodyBB); 161 162 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy); 163 164 llvm::PHINode *SrcElementPHI = 165 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 166 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 167 Address SrcElementCurrent = 168 Address(SrcElementPHI, 169 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 170 171 llvm::PHINode *DestElementPHI = 172 Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 173 DestElementPHI->addIncoming(DestBegin, EntryBB); 174 Address DestElementCurrent = 175 Address(DestElementPHI, 176 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 177 178 // Emit copy. 179 CopyGen(DestElementCurrent, SrcElementCurrent); 180 181 // Shift the address forward by one element. 182 auto DestElementNext = Builder.CreateConstGEP1_32( 183 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 184 auto SrcElementNext = Builder.CreateConstGEP1_32( 185 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 186 // Check whether we've reached the end. 187 auto Done = 188 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 189 Builder.CreateCondBr(Done, DoneBB, BodyBB); 190 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock()); 191 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 192 193 // Done. 194 EmitBlock(DoneBB, /*IsFinished=*/true); 195 } 196 197 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr, 198 Address SrcAddr, const VarDecl *DestVD, 199 const VarDecl *SrcVD, const Expr *Copy) { 200 if (OriginalType->isArrayType()) { 201 auto *BO = dyn_cast<BinaryOperator>(Copy); 202 if (BO && BO->getOpcode() == BO_Assign) { 203 // Perform simple memcpy for simple copying. 204 EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 205 } else { 206 // For arrays with complex element types perform element by element 207 // copying. 208 EmitOMPAggregateAssign( 209 DestAddr, SrcAddr, OriginalType, 210 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) { 211 // Working with the single array element, so have to remap 212 // destination and source variables to corresponding array 213 // elements. 214 CodeGenFunction::OMPPrivateScope Remap(*this); 215 Remap.addPrivate(DestVD, [DestElement]() -> Address { 216 return DestElement; 217 }); 218 Remap.addPrivate( 219 SrcVD, [SrcElement]() -> Address { return SrcElement; }); 220 (void)Remap.Privatize(); 221 EmitIgnoredExpr(Copy); 222 }); 223 } 224 } else { 225 // Remap pseudo source variable to private copy. 226 CodeGenFunction::OMPPrivateScope Remap(*this); 227 Remap.addPrivate(SrcVD, [SrcAddr]() -> Address { return SrcAddr; }); 228 Remap.addPrivate(DestVD, [DestAddr]() -> Address { return DestAddr; }); 229 (void)Remap.Privatize(); 230 // Emit copying of the whole variable. 231 EmitIgnoredExpr(Copy); 232 } 233 } 234 235 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 236 OMPPrivateScope &PrivateScope) { 237 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 238 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) { 239 auto IRef = C->varlist_begin(); 240 auto InitsRef = C->inits().begin(); 241 for (auto IInit : C->private_copies()) { 242 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 243 if (EmittedAsFirstprivate.count(OrigVD) == 0) { 244 EmittedAsFirstprivate.insert(OrigVD); 245 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 246 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 247 bool IsRegistered; 248 DeclRefExpr DRE( 249 const_cast<VarDecl *>(OrigVD), 250 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 251 OrigVD) != nullptr, 252 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 253 Address OriginalAddr = EmitLValue(&DRE).getAddress(); 254 QualType Type = OrigVD->getType(); 255 if (Type->isArrayType()) { 256 // Emit VarDecl with copy init for arrays. 257 // Get the address of the original variable captured in current 258 // captured region. 259 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 260 auto Emission = EmitAutoVarAlloca(*VD); 261 auto *Init = VD->getInit(); 262 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 263 // Perform simple memcpy. 264 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 265 Type); 266 } else { 267 EmitOMPAggregateAssign( 268 Emission.getAllocatedAddress(), OriginalAddr, Type, 269 [this, VDInit, Init](Address DestElement, 270 Address SrcElement) { 271 // Clean up any temporaries needed by the initialization. 272 RunCleanupsScope InitScope(*this); 273 // Emit initialization for single element. 274 setAddrOfLocalVar(VDInit, SrcElement); 275 EmitAnyExprToMem(Init, DestElement, 276 Init->getType().getQualifiers(), 277 /*IsInitializer*/ false); 278 LocalDeclMap.erase(VDInit); 279 }); 280 } 281 EmitAutoVarCleanups(Emission); 282 return Emission.getAllocatedAddress(); 283 }); 284 } else { 285 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address { 286 // Emit private VarDecl with copy init. 287 // Remap temp VDInit variable to the address of the original 288 // variable 289 // (for proper handling of captured global variables). 290 setAddrOfLocalVar(VDInit, OriginalAddr); 291 EmitDecl(*VD); 292 LocalDeclMap.erase(VDInit); 293 return GetAddrOfLocalVar(VD); 294 }); 295 } 296 assert(IsRegistered && 297 "firstprivate var already registered as private"); 298 // Silence the warning about unused variable. 299 (void)IsRegistered; 300 } 301 ++IRef, ++InitsRef; 302 } 303 } 304 return !EmittedAsFirstprivate.empty(); 305 } 306 307 void CodeGenFunction::EmitOMPPrivateClause( 308 const OMPExecutableDirective &D, 309 CodeGenFunction::OMPPrivateScope &PrivateScope) { 310 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 311 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) { 312 auto IRef = C->varlist_begin(); 313 for (auto IInit : C->private_copies()) { 314 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 315 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 316 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 317 bool IsRegistered = 318 PrivateScope.addPrivate(OrigVD, [&]() -> Address { 319 // Emit private VarDecl with copy init. 320 EmitDecl(*VD); 321 return GetAddrOfLocalVar(VD); 322 }); 323 assert(IsRegistered && "private var already registered as private"); 324 // Silence the warning about unused variable. 325 (void)IsRegistered; 326 } 327 ++IRef; 328 } 329 } 330 } 331 332 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 333 // threadprivate_var1 = master_threadprivate_var1; 334 // operator=(threadprivate_var2, master_threadprivate_var2); 335 // ... 336 // __kmpc_barrier(&loc, global_tid); 337 llvm::DenseSet<const VarDecl *> CopiedVars; 338 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 339 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) { 340 auto IRef = C->varlist_begin(); 341 auto ISrcRef = C->source_exprs().begin(); 342 auto IDestRef = C->destination_exprs().begin(); 343 for (auto *AssignOp : C->assignment_ops()) { 344 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 345 QualType Type = VD->getType(); 346 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 347 348 // Get the address of the master variable. If we are emitting code with 349 // TLS support, the address is passed from the master as field in the 350 // captured declaration. 351 Address MasterAddr = Address::invalid(); 352 if (getLangOpts().OpenMPUseTLS && 353 getContext().getTargetInfo().isTLSSupported()) { 354 assert(CapturedStmtInfo->lookup(VD) && 355 "Copyin threadprivates should have been captured!"); 356 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(), 357 VK_LValue, (*IRef)->getExprLoc()); 358 MasterAddr = EmitLValue(&DRE).getAddress(); 359 LocalDeclMap.erase(VD); 360 } else { 361 MasterAddr = 362 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 363 : CGM.GetAddrOfGlobal(VD), 364 getContext().getDeclAlign(VD)); 365 } 366 // Get the address of the threadprivate variable. 367 Address PrivateAddr = EmitLValue(*IRef).getAddress(); 368 if (CopiedVars.size() == 1) { 369 // At first check if current thread is a master thread. If it is, no 370 // need to copy data. 371 CopyBegin = createBasicBlock("copyin.not.master"); 372 CopyEnd = createBasicBlock("copyin.not.master.end"); 373 Builder.CreateCondBr( 374 Builder.CreateICmpNE( 375 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy), 376 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy)), 377 CopyBegin, CopyEnd); 378 EmitBlock(CopyBegin); 379 } 380 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 381 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 382 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp); 383 } 384 ++IRef; 385 ++ISrcRef; 386 ++IDestRef; 387 } 388 } 389 if (CopyEnd) { 390 // Exit out of copying procedure for non-master thread. 391 EmitBlock(CopyEnd, /*IsFinished=*/true); 392 return true; 393 } 394 return false; 395 } 396 397 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 398 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 399 bool HasAtLeastOneLastprivate = false; 400 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 401 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 402 HasAtLeastOneLastprivate = true; 403 auto IRef = C->varlist_begin(); 404 auto IDestRef = C->destination_exprs().begin(); 405 for (auto *IInit : C->private_copies()) { 406 // Keep the address of the original variable for future update at the end 407 // of the loop. 408 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 409 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 410 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 411 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> Address { 412 DeclRefExpr DRE( 413 const_cast<VarDecl *>(OrigVD), 414 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 415 OrigVD) != nullptr, 416 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 417 return EmitLValue(&DRE).getAddress(); 418 }); 419 // Check if the variable is also a firstprivate: in this case IInit is 420 // not generated. Initialization of this variable will happen in codegen 421 // for 'firstprivate' clause. 422 if (IInit) { 423 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 424 bool IsRegistered = 425 PrivateScope.addPrivate(OrigVD, [&]() -> Address { 426 // Emit private VarDecl with copy init. 427 EmitDecl(*VD); 428 return GetAddrOfLocalVar(VD); 429 }); 430 assert(IsRegistered && 431 "lastprivate var already registered as private"); 432 (void)IsRegistered; 433 } 434 } 435 ++IRef, ++IDestRef; 436 } 437 } 438 return HasAtLeastOneLastprivate; 439 } 440 441 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 442 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { 443 // Emit following code: 444 // if (<IsLastIterCond>) { 445 // orig_var1 = private_orig_var1; 446 // ... 447 // orig_varn = private_orig_varn; 448 // } 449 llvm::BasicBlock *ThenBB = nullptr; 450 llvm::BasicBlock *DoneBB = nullptr; 451 if (IsLastIterCond) { 452 ThenBB = createBasicBlock(".omp.lastprivate.then"); 453 DoneBB = createBasicBlock(".omp.lastprivate.done"); 454 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 455 EmitBlock(ThenBB); 456 } 457 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; 458 const Expr *LastIterVal = nullptr; 459 const Expr *IVExpr = nullptr; 460 const Expr *IncExpr = nullptr; 461 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 462 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) { 463 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>( 464 LoopDirective->getUpperBoundVariable()) 465 ->getDecl()) 466 ->getAnyInitializer(); 467 IVExpr = LoopDirective->getIterationVariable(); 468 IncExpr = LoopDirective->getInc(); 469 auto IUpdate = LoopDirective->updates().begin(); 470 for (auto *E : LoopDirective->counters()) { 471 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 472 LoopCountersAndUpdates[D] = *IUpdate; 473 ++IUpdate; 474 } 475 } 476 } 477 { 478 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 479 bool FirstLCV = true; 480 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) { 481 auto IRef = C->varlist_begin(); 482 auto ISrcRef = C->source_exprs().begin(); 483 auto IDestRef = C->destination_exprs().begin(); 484 for (auto *AssignOp : C->assignment_ops()) { 485 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 486 QualType Type = PrivateVD->getType(); 487 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 488 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 489 // If lastprivate variable is a loop control variable for loop-based 490 // directive, update its value before copyin back to original 491 // variable. 492 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { 493 if (FirstLCV && LastIterVal) { 494 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), 495 IVExpr->getType().getQualifiers(), 496 /*IsInitializer=*/false); 497 EmitIgnoredExpr(IncExpr); 498 FirstLCV = false; 499 } 500 EmitIgnoredExpr(UpExpr); 501 } 502 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 503 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 504 // Get the address of the original variable. 505 Address OriginalAddr = GetAddrOfLocalVar(DestVD); 506 // Get the address of the private variable. 507 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD); 508 if (auto RefTy = PrivateVD->getType()->getAs<ReferenceType>()) 509 PrivateAddr = 510 Address(Builder.CreateLoad(PrivateAddr), 511 getNaturalTypeAlignment(RefTy->getPointeeType())); 512 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp); 513 } 514 ++IRef; 515 ++ISrcRef; 516 ++IDestRef; 517 } 518 } 519 } 520 if (IsLastIterCond) { 521 EmitBlock(DoneBB, /*IsFinished=*/true); 522 } 523 } 524 525 void CodeGenFunction::EmitOMPReductionClauseInit( 526 const OMPExecutableDirective &D, 527 CodeGenFunction::OMPPrivateScope &PrivateScope) { 528 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 529 auto ILHS = C->lhs_exprs().begin(); 530 auto IRHS = C->rhs_exprs().begin(); 531 for (auto IRef : C->varlists()) { 532 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 533 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 534 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 535 // Store the address of the original variable associated with the LHS 536 // implicit variable. 537 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> Address { 538 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 539 CapturedStmtInfo->lookup(OrigVD) != nullptr, 540 IRef->getType(), VK_LValue, IRef->getExprLoc()); 541 return EmitLValue(&DRE).getAddress(); 542 }); 543 // Emit reduction copy. 544 bool IsRegistered = 545 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> Address { 546 // Emit private VarDecl with reduction init. 547 EmitDecl(*PrivateVD); 548 return GetAddrOfLocalVar(PrivateVD); 549 }); 550 assert(IsRegistered && "private var already registered as private"); 551 // Silence the warning about unused variable. 552 (void)IsRegistered; 553 ++ILHS, ++IRHS; 554 } 555 } 556 } 557 558 void CodeGenFunction::EmitOMPReductionClauseFinal( 559 const OMPExecutableDirective &D) { 560 llvm::SmallVector<const Expr *, 8> LHSExprs; 561 llvm::SmallVector<const Expr *, 8> RHSExprs; 562 llvm::SmallVector<const Expr *, 8> ReductionOps; 563 bool HasAtLeastOneReduction = false; 564 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 565 HasAtLeastOneReduction = true; 566 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 567 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 568 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 569 } 570 if (HasAtLeastOneReduction) { 571 // Emit nowait reduction if nowait clause is present or directive is a 572 // parallel directive (it always has implicit barrier). 573 CGM.getOpenMPRuntime().emitReduction( 574 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps, 575 D.getSingleClause<OMPNowaitClause>() || 576 isOpenMPParallelDirective(D.getDirectiveKind()) || 577 D.getDirectiveKind() == OMPD_simd, 578 D.getDirectiveKind() == OMPD_simd); 579 } 580 } 581 582 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, 583 const OMPExecutableDirective &S, 584 OpenMPDirectiveKind InnermostKind, 585 const RegionCodeGenTy &CodeGen) { 586 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 587 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 588 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars); 589 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 590 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 591 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) { 592 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 593 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 594 /*IgnoreResultAssign*/ true); 595 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 596 CGF, NumThreads, NumThreadsClause->getLocStart()); 597 } 598 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) { 599 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 600 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 601 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart()); 602 } 603 const Expr *IfCond = nullptr; 604 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 605 if (C->getNameModifier() == OMPD_unknown || 606 C->getNameModifier() == OMPD_parallel) { 607 IfCond = C->getCondition(); 608 break; 609 } 610 } 611 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 612 CapturedVars, IfCond); 613 } 614 615 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 616 LexicalScope Scope(*this, S.getSourceRange()); 617 // Emit parallel region as a standalone region. 618 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 619 OMPPrivateScope PrivateScope(CGF); 620 bool Copyins = CGF.EmitOMPCopyinClause(S); 621 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); 622 if (Copyins || Firstprivates) { 623 // Emit implicit barrier to synchronize threads and avoid data races on 624 // initialization of firstprivate variables or propagation master's thread 625 // values of threadprivate variables to local instances of that variables 626 // of all other implicit threads. 627 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 628 OMPD_unknown); 629 } 630 CGF.EmitOMPPrivateClause(S, PrivateScope); 631 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 632 (void)PrivateScope.Privatize(); 633 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 634 CGF.EmitOMPReductionClauseFinal(S); 635 // Emit implicit barrier at the end of the 'parallel' directive. 636 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 637 OMPD_unknown); 638 }; 639 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen); 640 } 641 642 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 643 JumpDest LoopExit) { 644 RunCleanupsScope BodyScope(*this); 645 // Update counters values on current iteration. 646 for (auto I : D.updates()) { 647 EmitIgnoredExpr(I); 648 } 649 // Update the linear variables. 650 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 651 for (auto U : C->updates()) { 652 EmitIgnoredExpr(U); 653 } 654 } 655 656 // On a continue in the body, jump to the end. 657 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 658 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 659 // Emit loop body. 660 EmitStmt(D.getBody()); 661 // The end (updates/cleanups). 662 EmitBlock(Continue.getBlock()); 663 BreakContinueStack.pop_back(); 664 // TODO: Update lastprivates if the SeparateIter flag is true. 665 // This will be implemented in a follow-up OMPLastprivateClause patch, but 666 // result should be still correct without it, as we do not make these 667 // variables private yet. 668 } 669 670 void CodeGenFunction::EmitOMPInnerLoop( 671 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 672 const Expr *IncExpr, 673 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 674 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 675 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 676 677 // Start the loop with a block that tests the condition. 678 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 679 EmitBlock(CondBlock); 680 LoopStack.push(CondBlock); 681 682 // If there are any cleanups between here and the loop-exit scope, 683 // create a block to stage a loop exit along. 684 auto ExitBlock = LoopExit.getBlock(); 685 if (RequiresCleanup) 686 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 687 688 auto LoopBody = createBasicBlock("omp.inner.for.body"); 689 690 // Emit condition. 691 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 692 if (ExitBlock != LoopExit.getBlock()) { 693 EmitBlock(ExitBlock); 694 EmitBranchThroughCleanup(LoopExit); 695 } 696 697 EmitBlock(LoopBody); 698 incrementProfileCounter(&S); 699 700 // Create a block for the increment. 701 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 702 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 703 704 BodyGen(*this); 705 706 // Emit "IV = IV + 1" and a back-edge to the condition block. 707 EmitBlock(Continue.getBlock()); 708 EmitIgnoredExpr(IncExpr); 709 PostIncGen(*this); 710 BreakContinueStack.pop_back(); 711 EmitBranch(CondBlock); 712 LoopStack.pop(); 713 // Emit the fall-through block. 714 EmitBlock(LoopExit.getBlock()); 715 } 716 717 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 718 // Emit inits for the linear variables. 719 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 720 for (auto Init : C->inits()) { 721 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 722 auto *OrigVD = cast<VarDecl>( 723 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl()); 724 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 725 CapturedStmtInfo->lookup(OrigVD) != nullptr, 726 VD->getInit()->getType(), VK_LValue, 727 VD->getInit()->getExprLoc()); 728 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 729 EmitExprAsInit(&DRE, VD, 730 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()), 731 /*capturedByInit=*/false); 732 EmitAutoVarCleanups(Emission); 733 } 734 // Emit the linear steps for the linear clauses. 735 // If a step is not constant, it is pre-calculated before the loop. 736 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 737 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 738 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 739 // Emit calculation of the linear step. 740 EmitIgnoredExpr(CS); 741 } 742 } 743 } 744 745 static void emitLinearClauseFinal(CodeGenFunction &CGF, 746 const OMPLoopDirective &D) { 747 // Emit the final values of the linear variables. 748 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 749 auto IC = C->varlist_begin(); 750 for (auto F : C->finals()) { 751 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 752 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 753 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 754 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 755 Address OrigAddr = CGF.EmitLValue(&DRE).getAddress(); 756 CodeGenFunction::OMPPrivateScope VarScope(CGF); 757 VarScope.addPrivate(OrigVD, 758 [OrigAddr]() -> Address { return OrigAddr; }); 759 (void)VarScope.Privatize(); 760 CGF.EmitIgnoredExpr(F); 761 ++IC; 762 } 763 } 764 } 765 766 static void emitAlignedClause(CodeGenFunction &CGF, 767 const OMPExecutableDirective &D) { 768 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) { 769 unsigned ClauseAlignment = 0; 770 if (auto AlignmentExpr = Clause->getAlignment()) { 771 auto AlignmentCI = 772 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 773 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 774 } 775 for (auto E : Clause->varlists()) { 776 unsigned Alignment = ClauseAlignment; 777 if (Alignment == 0) { 778 // OpenMP [2.8.1, Description] 779 // If no optional parameter is specified, implementation-defined default 780 // alignments for SIMD instructions on the target platforms are assumed. 781 Alignment = 782 CGF.getContext() 783 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 784 E->getType()->getPointeeType())) 785 .getQuantity(); 786 } 787 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 788 "alignment is not power of 2"); 789 if (Alignment != 0) { 790 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 791 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 792 } 793 } 794 } 795 } 796 797 static void emitPrivateLoopCounters(CodeGenFunction &CGF, 798 CodeGenFunction::OMPPrivateScope &LoopScope, 799 ArrayRef<Expr *> Counters, 800 ArrayRef<Expr *> PrivateCounters) { 801 auto I = PrivateCounters.begin(); 802 for (auto *E : Counters) { 803 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 804 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 805 Address Addr = Address::invalid(); 806 (void)LoopScope.addPrivate(PrivateVD, [&]() -> Address { 807 // Emit var without initialization. 808 auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD); 809 CGF.EmitAutoVarCleanups(VarEmission); 810 Addr = VarEmission.getAllocatedAddress(); 811 return Addr; 812 }); 813 (void)LoopScope.addPrivate(VD, [&]() -> Address { return Addr; }); 814 ++I; 815 } 816 } 817 818 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 819 const Expr *Cond, llvm::BasicBlock *TrueBlock, 820 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 821 { 822 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 823 emitPrivateLoopCounters(CGF, PreCondScope, S.counters(), 824 S.private_counters()); 825 (void)PreCondScope.Privatize(); 826 // Get initial values of real counters. 827 for (auto I : S.inits()) { 828 CGF.EmitIgnoredExpr(I); 829 } 830 } 831 // Check that loop is executed at least one time. 832 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 833 } 834 835 static void 836 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, 837 CodeGenFunction::OMPPrivateScope &PrivateScope) { 838 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) { 839 auto CurPrivate = C->privates().begin(); 840 for (auto *E : C->varlists()) { 841 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 842 auto *PrivateVD = 843 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl()); 844 bool IsRegistered = PrivateScope.addPrivate(VD, [&]() -> Address { 845 // Emit private VarDecl with copy init. 846 CGF.EmitVarDecl(*PrivateVD); 847 return CGF.GetAddrOfLocalVar(PrivateVD); 848 }); 849 assert(IsRegistered && "linear var already registered as private"); 850 // Silence the warning about unused variable. 851 (void)IsRegistered; 852 ++CurPrivate; 853 } 854 } 855 } 856 857 static void emitSimdlenSafelenClause(CodeGenFunction &CGF, 858 const OMPExecutableDirective &D) { 859 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) { 860 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(), 861 /*ignoreResult=*/true); 862 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 863 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 864 // In presence of finite 'safelen', it may be unsafe to mark all 865 // the memory instructions parallel, because loop-carried 866 // dependences of 'safelen' iterations are possible. 867 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>()); 868 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) { 869 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 870 /*ignoreResult=*/true); 871 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 872 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 873 // In presence of finite 'safelen', it may be unsafe to mark all 874 // the memory instructions parallel, because loop-carried 875 // dependences of 'safelen' iterations are possible. 876 CGF.LoopStack.setParallel(false); 877 } 878 } 879 880 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) { 881 // Walk clauses and process safelen/lastprivate. 882 LoopStack.setParallel(); 883 LoopStack.setVectorizeEnable(true); 884 emitSimdlenSafelenClause(*this, D); 885 } 886 887 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) { 888 auto IC = D.counters().begin(); 889 for (auto F : D.finals()) { 890 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 891 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) { 892 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 893 CapturedStmtInfo->lookup(OrigVD) != nullptr, 894 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 895 Address OrigAddr = EmitLValue(&DRE).getAddress(); 896 OMPPrivateScope VarScope(*this); 897 VarScope.addPrivate(OrigVD, 898 [OrigAddr]() -> Address { return OrigAddr; }); 899 (void)VarScope.Privatize(); 900 EmitIgnoredExpr(F); 901 } 902 ++IC; 903 } 904 emitLinearClauseFinal(*this, D); 905 } 906 907 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 908 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 909 // if (PreCond) { 910 // for (IV in 0..LastIteration) BODY; 911 // <Final counter/linear vars updates>; 912 // } 913 // 914 915 // Emit: if (PreCond) - begin. 916 // If the condition constant folds and can be elided, avoid emitting the 917 // whole loop. 918 bool CondConstant; 919 llvm::BasicBlock *ContBlock = nullptr; 920 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 921 if (!CondConstant) 922 return; 923 } else { 924 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 925 ContBlock = CGF.createBasicBlock("simd.if.end"); 926 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 927 CGF.getProfileCount(&S)); 928 CGF.EmitBlock(ThenBlock); 929 CGF.incrementProfileCounter(&S); 930 } 931 932 // Emit the loop iteration variable. 933 const Expr *IVExpr = S.getIterationVariable(); 934 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 935 CGF.EmitVarDecl(*IVDecl); 936 CGF.EmitIgnoredExpr(S.getInit()); 937 938 // Emit the iterations count variable. 939 // If it is not a variable, Sema decided to calculate iterations count on 940 // each iteration (e.g., it is foldable into a constant). 941 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 942 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 943 // Emit calculation of the iterations count. 944 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 945 } 946 947 CGF.EmitOMPSimdInit(S); 948 949 emitAlignedClause(CGF, S); 950 CGF.EmitOMPLinearClauseInit(S); 951 bool HasLastprivateClause; 952 { 953 OMPPrivateScope LoopScope(CGF); 954 emitPrivateLoopCounters(CGF, LoopScope, S.counters(), 955 S.private_counters()); 956 emitPrivateLinearVars(CGF, S, LoopScope); 957 CGF.EmitOMPPrivateClause(S, LoopScope); 958 CGF.EmitOMPReductionClauseInit(S, LoopScope); 959 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 960 (void)LoopScope.Privatize(); 961 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 962 S.getInc(), 963 [&S](CodeGenFunction &CGF) { 964 CGF.EmitOMPLoopBody(S, JumpDest()); 965 CGF.EmitStopPoint(&S); 966 }, 967 [](CodeGenFunction &) {}); 968 // Emit final copy of the lastprivate variables at the end of loops. 969 if (HasLastprivateClause) { 970 CGF.EmitOMPLastprivateClauseFinal(S); 971 } 972 CGF.EmitOMPReductionClauseFinal(S); 973 } 974 CGF.EmitOMPSimdFinal(S); 975 // Emit: if (PreCond) - end. 976 if (ContBlock) { 977 CGF.EmitBranch(ContBlock); 978 CGF.EmitBlock(ContBlock, true); 979 } 980 }; 981 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 982 } 983 984 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, 985 const OMPLoopDirective &S, 986 OMPPrivateScope &LoopScope, 987 bool Ordered, Address LB, 988 Address UB, Address ST, 989 Address IL, llvm::Value *Chunk) { 990 auto &RT = CGM.getOpenMPRuntime(); 991 992 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 993 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); 994 995 assert((Ordered || 996 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && 997 "static non-chunked schedule does not need outer loop"); 998 999 // Emit outer loop. 1000 // 1001 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1002 // When schedule(dynamic,chunk_size) is specified, the iterations are 1003 // distributed to threads in the team in chunks as the threads request them. 1004 // Each thread executes a chunk of iterations, then requests another chunk, 1005 // until no chunks remain to be distributed. Each chunk contains chunk_size 1006 // iterations, except for the last chunk to be distributed, which may have 1007 // fewer iterations. When no chunk_size is specified, it defaults to 1. 1008 // 1009 // When schedule(guided,chunk_size) is specified, the iterations are assigned 1010 // to threads in the team in chunks as the executing threads request them. 1011 // Each thread executes a chunk of iterations, then requests another chunk, 1012 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 1013 // each chunk is proportional to the number of unassigned iterations divided 1014 // by the number of threads in the team, decreasing to 1. For a chunk_size 1015 // with value k (greater than 1), the size of each chunk is determined in the 1016 // same way, with the restriction that the chunks do not contain fewer than k 1017 // iterations (except for the last chunk to be assigned, which may have fewer 1018 // than k iterations). 1019 // 1020 // When schedule(auto) is specified, the decision regarding scheduling is 1021 // delegated to the compiler and/or runtime system. The programmer gives the 1022 // implementation the freedom to choose any possible mapping of iterations to 1023 // threads in the team. 1024 // 1025 // When schedule(runtime) is specified, the decision regarding scheduling is 1026 // deferred until run time, and the schedule and chunk size are taken from the 1027 // run-sched-var ICV. If the ICV is set to auto, the schedule is 1028 // implementation defined 1029 // 1030 // while(__kmpc_dispatch_next(&LB, &UB)) { 1031 // idx = LB; 1032 // while (idx <= UB) { BODY; ++idx; 1033 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 1034 // } // inner loop 1035 // } 1036 // 1037 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1038 // When schedule(static, chunk_size) is specified, iterations are divided into 1039 // chunks of size chunk_size, and the chunks are assigned to the threads in 1040 // the team in a round-robin fashion in the order of the thread number. 1041 // 1042 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 1043 // while (idx <= UB) { BODY; ++idx; } // inner loop 1044 // LB = LB + ST; 1045 // UB = UB + ST; 1046 // } 1047 // 1048 1049 const Expr *IVExpr = S.getIterationVariable(); 1050 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1051 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1052 1053 if (DynamicOrOrdered) { 1054 llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration()); 1055 RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, 1056 IVSize, IVSigned, Ordered, UBVal, Chunk); 1057 } else { 1058 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, 1059 IVSize, IVSigned, Ordered, IL, LB, UB, ST, Chunk); 1060 } 1061 1062 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 1063 1064 // Start the loop with a block that tests the condition. 1065 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 1066 EmitBlock(CondBlock); 1067 LoopStack.push(CondBlock); 1068 1069 llvm::Value *BoolCondVal = nullptr; 1070 if (!DynamicOrOrdered) { 1071 // UB = min(UB, GlobalUB) 1072 EmitIgnoredExpr(S.getEnsureUpperBound()); 1073 // IV = LB 1074 EmitIgnoredExpr(S.getInit()); 1075 // IV < UB 1076 BoolCondVal = EvaluateExprAsBool(S.getCond()); 1077 } else { 1078 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, 1079 IL, LB, UB, ST); 1080 } 1081 1082 // If there are any cleanups between here and the loop-exit scope, 1083 // create a block to stage a loop exit along. 1084 auto ExitBlock = LoopExit.getBlock(); 1085 if (LoopScope.requiresCleanups()) 1086 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 1087 1088 auto LoopBody = createBasicBlock("omp.dispatch.body"); 1089 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 1090 if (ExitBlock != LoopExit.getBlock()) { 1091 EmitBlock(ExitBlock); 1092 EmitBranchThroughCleanup(LoopExit); 1093 } 1094 EmitBlock(LoopBody); 1095 1096 // Emit "IV = LB" (in case of static schedule, we have already calculated new 1097 // LB for loop condition and emitted it above). 1098 if (DynamicOrOrdered) 1099 EmitIgnoredExpr(S.getInit()); 1100 1101 // Create a block for the increment. 1102 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 1103 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 1104 1105 // Generate !llvm.loop.parallel metadata for loads and stores for loops 1106 // with dynamic/guided scheduling and without ordered clause. 1107 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 1108 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic || 1109 ScheduleKind == OMPC_SCHEDULE_guided) && 1110 !Ordered); 1111 } else { 1112 EmitOMPSimdInit(S); 1113 } 1114 1115 SourceLocation Loc = S.getLocStart(); 1116 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 1117 [&S, LoopExit](CodeGenFunction &CGF) { 1118 CGF.EmitOMPLoopBody(S, LoopExit); 1119 CGF.EmitStopPoint(&S); 1120 }, 1121 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { 1122 if (Ordered) { 1123 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( 1124 CGF, Loc, IVSize, IVSigned); 1125 } 1126 }); 1127 1128 EmitBlock(Continue.getBlock()); 1129 BreakContinueStack.pop_back(); 1130 if (!DynamicOrOrdered) { 1131 // Emit "LB = LB + Stride", "UB = UB + Stride". 1132 EmitIgnoredExpr(S.getNextLowerBound()); 1133 EmitIgnoredExpr(S.getNextUpperBound()); 1134 } 1135 1136 EmitBranch(CondBlock); 1137 LoopStack.pop(); 1138 // Emit the fall-through block. 1139 EmitBlock(LoopExit.getBlock()); 1140 1141 // Tell the runtime we are done. 1142 if (!DynamicOrOrdered) 1143 RT.emitForStaticFinish(*this, S.getLocEnd()); 1144 } 1145 1146 /// \brief Emit a helper variable and return corresponding lvalue. 1147 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1148 const DeclRefExpr *Helper) { 1149 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1150 CGF.EmitVarDecl(*VDecl); 1151 return CGF.EmitLValue(Helper); 1152 } 1153 1154 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind> 1155 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, 1156 bool OuterRegion) { 1157 // Detect the loop schedule kind and chunk. 1158 auto ScheduleKind = OMPC_SCHEDULE_unknown; 1159 llvm::Value *Chunk = nullptr; 1160 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) { 1161 ScheduleKind = C->getScheduleKind(); 1162 if (const auto *Ch = C->getChunkSize()) { 1163 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { 1164 if (OuterRegion) { 1165 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); 1166 CGF.EmitVarDecl(*ImpVar); 1167 CGF.EmitStoreThroughLValue( 1168 CGF.EmitAnyExpr(Ch), 1169 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), 1170 ImpVar->getType())); 1171 } else { 1172 Ch = ImpRef; 1173 } 1174 } 1175 if (!C->getHelperChunkSize() || !OuterRegion) { 1176 Chunk = CGF.EmitScalarExpr(Ch); 1177 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), 1178 S.getIterationVariable()->getType(), 1179 S.getLocStart()); 1180 } 1181 } 1182 } 1183 return std::make_pair(Chunk, ScheduleKind); 1184 } 1185 1186 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { 1187 // Emit the loop iteration variable. 1188 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 1189 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 1190 EmitVarDecl(*IVDecl); 1191 1192 // Emit the iterations count variable. 1193 // If it is not a variable, Sema decided to calculate iterations count on each 1194 // iteration (e.g., it is foldable into a constant). 1195 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1196 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1197 // Emit calculation of the iterations count. 1198 EmitIgnoredExpr(S.getCalcLastIteration()); 1199 } 1200 1201 auto &RT = CGM.getOpenMPRuntime(); 1202 1203 bool HasLastprivateClause; 1204 // Check pre-condition. 1205 { 1206 // Skip the entire loop if we don't meet the precondition. 1207 // If the condition constant folds and can be elided, avoid emitting the 1208 // whole loop. 1209 bool CondConstant; 1210 llvm::BasicBlock *ContBlock = nullptr; 1211 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1212 if (!CondConstant) 1213 return false; 1214 } else { 1215 auto *ThenBlock = createBasicBlock("omp.precond.then"); 1216 ContBlock = createBasicBlock("omp.precond.end"); 1217 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 1218 getProfileCount(&S)); 1219 EmitBlock(ThenBlock); 1220 incrementProfileCounter(&S); 1221 } 1222 1223 emitAlignedClause(*this, S); 1224 EmitOMPLinearClauseInit(S); 1225 // Emit 'then' code. 1226 { 1227 // Emit helper vars inits. 1228 LValue LB = 1229 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable())); 1230 LValue UB = 1231 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable())); 1232 LValue ST = 1233 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 1234 LValue IL = 1235 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 1236 1237 OMPPrivateScope LoopScope(*this); 1238 if (EmitOMPFirstprivateClause(S, LoopScope)) { 1239 // Emit implicit barrier to synchronize threads and avoid data races on 1240 // initialization of firstprivate variables. 1241 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1242 OMPD_unknown); 1243 } 1244 EmitOMPPrivateClause(S, LoopScope); 1245 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 1246 EmitOMPReductionClauseInit(S, LoopScope); 1247 emitPrivateLoopCounters(*this, LoopScope, S.counters(), 1248 S.private_counters()); 1249 emitPrivateLinearVars(*this, S, LoopScope); 1250 (void)LoopScope.Privatize(); 1251 1252 // Detect the loop schedule kind and chunk. 1253 llvm::Value *Chunk; 1254 OpenMPScheduleClauseKind ScheduleKind; 1255 auto ScheduleInfo = 1256 emitScheduleClause(*this, S, /*OuterRegion=*/false); 1257 Chunk = ScheduleInfo.first; 1258 ScheduleKind = ScheduleInfo.second; 1259 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1260 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1261 const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr; 1262 if (RT.isStaticNonchunked(ScheduleKind, 1263 /* Chunked */ Chunk != nullptr) && 1264 !Ordered) { 1265 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1266 EmitOMPSimdInit(S); 1267 } 1268 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1269 // When no chunk_size is specified, the iteration space is divided into 1270 // chunks that are approximately equal in size, and at most one chunk is 1271 // distributed to each thread. Note that the size of the chunks is 1272 // unspecified in this case. 1273 RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, 1274 IVSize, IVSigned, Ordered, 1275 IL.getAddress(), LB.getAddress(), 1276 UB.getAddress(), ST.getAddress()); 1277 auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 1278 // UB = min(UB, GlobalUB); 1279 EmitIgnoredExpr(S.getEnsureUpperBound()); 1280 // IV = LB; 1281 EmitIgnoredExpr(S.getInit()); 1282 // while (idx <= UB) { BODY; ++idx; } 1283 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1284 S.getInc(), 1285 [&S, LoopExit](CodeGenFunction &CGF) { 1286 CGF.EmitOMPLoopBody(S, LoopExit); 1287 CGF.EmitStopPoint(&S); 1288 }, 1289 [](CodeGenFunction &) {}); 1290 EmitBlock(LoopExit.getBlock()); 1291 // Tell the runtime we are done. 1292 RT.emitForStaticFinish(*this, S.getLocStart()); 1293 } else { 1294 // Emit the outer loop, which requests its work chunk [LB..UB] from 1295 // runtime and runs the inner loop to process it. 1296 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered, 1297 LB.getAddress(), UB.getAddress(), ST.getAddress(), 1298 IL.getAddress(), Chunk); 1299 } 1300 EmitOMPReductionClauseFinal(S); 1301 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1302 if (HasLastprivateClause) 1303 EmitOMPLastprivateClauseFinal( 1304 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 1305 } 1306 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1307 EmitOMPSimdFinal(S); 1308 } 1309 // We're now done with the loop, so jump to the continuation block. 1310 if (ContBlock) { 1311 EmitBranch(ContBlock); 1312 EmitBlock(ContBlock, true); 1313 } 1314 } 1315 return HasLastprivateClause; 1316 } 1317 1318 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 1319 LexicalScope Scope(*this, S.getSourceRange()); 1320 bool HasLastprivates = false; 1321 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1322 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1323 }; 1324 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen); 1325 1326 // Emit an implicit barrier at the end. 1327 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 1328 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1329 } 1330 } 1331 1332 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 1333 LexicalScope Scope(*this, S.getSourceRange()); 1334 bool HasLastprivates = false; 1335 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1336 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1337 }; 1338 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1339 1340 // Emit an implicit barrier at the end. 1341 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates) { 1342 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1343 } 1344 } 1345 1346 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 1347 const Twine &Name, 1348 llvm::Value *Init = nullptr) { 1349 auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 1350 if (Init) 1351 CGF.EmitScalarInit(Init, LVal); 1352 return LVal; 1353 } 1354 1355 OpenMPDirectiveKind 1356 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 1357 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 1358 auto *CS = dyn_cast<CompoundStmt>(Stmt); 1359 if (CS && CS->size() > 1) { 1360 bool HasLastprivates = false; 1361 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { 1362 auto &C = CGF.CGM.getContext(); 1363 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 1364 // Emit helper vars inits. 1365 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 1366 CGF.Builder.getInt32(0)); 1367 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); 1368 LValue UB = 1369 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 1370 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 1371 CGF.Builder.getInt32(1)); 1372 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 1373 CGF.Builder.getInt32(0)); 1374 // Loop counter. 1375 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 1376 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1377 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 1378 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1379 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 1380 // Generate condition for loop. 1381 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 1382 OK_Ordinary, S.getLocStart(), 1383 /*fpContractable=*/false); 1384 // Increment for loop counter. 1385 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, 1386 OK_Ordinary, S.getLocStart()); 1387 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { 1388 // Iterate through all sections and emit a switch construct: 1389 // switch (IV) { 1390 // case 0: 1391 // <SectionStmt[0]>; 1392 // break; 1393 // ... 1394 // case <NumSection> - 1: 1395 // <SectionStmt[<NumSection> - 1]>; 1396 // break; 1397 // } 1398 // .omp.sections.exit: 1399 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 1400 auto *SwitchStmt = CGF.Builder.CreateSwitch( 1401 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 1402 CS->size()); 1403 unsigned CaseNumber = 0; 1404 for (auto *SubStmt : CS->children()) { 1405 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 1406 CGF.EmitBlock(CaseBB); 1407 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 1408 CGF.EmitStmt(SubStmt); 1409 CGF.EmitBranch(ExitBB); 1410 ++CaseNumber; 1411 } 1412 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1413 }; 1414 1415 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 1416 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 1417 // Emit implicit barrier to synchronize threads and avoid data races on 1418 // initialization of firstprivate variables. 1419 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1420 OMPD_unknown); 1421 } 1422 CGF.EmitOMPPrivateClause(S, LoopScope); 1423 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1424 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1425 (void)LoopScope.Privatize(); 1426 1427 // Emit static non-chunked loop. 1428 CGF.CGM.getOpenMPRuntime().emitForStaticInit( 1429 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, 1430 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 1431 LB.getAddress(), UB.getAddress(), ST.getAddress()); 1432 // UB = min(UB, GlobalUB); 1433 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 1434 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 1435 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 1436 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 1437 // IV = LB; 1438 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 1439 // while (idx <= UB) { BODY; ++idx; } 1440 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 1441 [](CodeGenFunction &) {}); 1442 // Tell the runtime we are done. 1443 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); 1444 CGF.EmitOMPReductionClauseFinal(S); 1445 1446 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1447 if (HasLastprivates) 1448 CGF.EmitOMPLastprivateClauseFinal( 1449 S, CGF.Builder.CreateIsNotNull( 1450 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 1451 }; 1452 1453 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen); 1454 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 1455 // clause. Otherwise the barrier will be generated by the codegen for the 1456 // directive. 1457 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) { 1458 // Emit implicit barrier to synchronize threads and avoid data races on 1459 // initialization of firstprivate variables. 1460 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1461 OMPD_unknown); 1462 } 1463 return OMPD_sections; 1464 } 1465 // If only one section is found - no need to generate loop, emit as a single 1466 // region. 1467 bool HasFirstprivates; 1468 // No need to generate reductions for sections with single section region, we 1469 // can use original shared variables for all operations. 1470 bool HasReductions = S.hasClausesOfKind<OMPReductionClause>(); 1471 // No need to generate lastprivates for sections with single section region, 1472 // we can use original shared variable for all calculations with barrier at 1473 // the end of the sections. 1474 bool HasLastprivates = S.hasClausesOfKind<OMPLastprivateClause>(); 1475 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { 1476 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1477 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1478 CGF.EmitOMPPrivateClause(S, SingleScope); 1479 (void)SingleScope.Privatize(); 1480 1481 CGF.EmitStmt(Stmt); 1482 }; 1483 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1484 llvm::None, llvm::None, llvm::None, 1485 llvm::None); 1486 // Emit barrier for firstprivates, lastprivates or reductions only if 1487 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be 1488 // generated by the codegen for the directive. 1489 if ((HasFirstprivates || HasLastprivates || HasReductions) && 1490 S.getSingleClause<OMPNowaitClause>()) { 1491 // Emit implicit barrier to synchronize threads and avoid data races on 1492 // initialization of firstprivate variables. 1493 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown); 1494 } 1495 return OMPD_single; 1496 } 1497 1498 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 1499 LexicalScope Scope(*this, S.getSourceRange()); 1500 OpenMPDirectiveKind EmittedAs = EmitSections(S); 1501 // Emit an implicit barrier at the end. 1502 if (!S.getSingleClause<OMPNowaitClause>()) { 1503 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); 1504 } 1505 } 1506 1507 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 1508 LexicalScope Scope(*this, S.getSourceRange()); 1509 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1510 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1511 CGF.EnsureInsertPoint(); 1512 }; 1513 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen); 1514 } 1515 1516 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 1517 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 1518 llvm::SmallVector<const Expr *, 8> DestExprs; 1519 llvm::SmallVector<const Expr *, 8> SrcExprs; 1520 llvm::SmallVector<const Expr *, 8> AssignmentOps; 1521 // Check if there are any 'copyprivate' clauses associated with this 1522 // 'single' 1523 // construct. 1524 // Build a list of copyprivate variables along with helper expressions 1525 // (<source>, <destination>, <destination>=<source> expressions) 1526 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) { 1527 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 1528 DestExprs.append(C->destination_exprs().begin(), 1529 C->destination_exprs().end()); 1530 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 1531 AssignmentOps.append(C->assignment_ops().begin(), 1532 C->assignment_ops().end()); 1533 } 1534 LexicalScope Scope(*this, S.getSourceRange()); 1535 // Emit code for 'single' region along with 'copyprivate' clauses 1536 bool HasFirstprivates; 1537 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { 1538 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1539 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1540 CGF.EmitOMPPrivateClause(S, SingleScope); 1541 (void)SingleScope.Privatize(); 1542 1543 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1544 CGF.EnsureInsertPoint(); 1545 }; 1546 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1547 CopyprivateVars, DestExprs, SrcExprs, 1548 AssignmentOps); 1549 // Emit an implicit barrier at the end (to avoid data race on firstprivate 1550 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 1551 if ((!S.getSingleClause<OMPNowaitClause>() || HasFirstprivates) && 1552 CopyprivateVars.empty()) { 1553 CGM.getOpenMPRuntime().emitBarrierCall( 1554 *this, S.getLocStart(), 1555 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single); 1556 } 1557 } 1558 1559 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 1560 LexicalScope Scope(*this, S.getSourceRange()); 1561 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1562 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1563 CGF.EnsureInsertPoint(); 1564 }; 1565 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 1566 } 1567 1568 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 1569 LexicalScope Scope(*this, S.getSourceRange()); 1570 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1571 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1572 CGF.EnsureInsertPoint(); 1573 }; 1574 CGM.getOpenMPRuntime().emitCriticalRegion( 1575 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart()); 1576 } 1577 1578 void CodeGenFunction::EmitOMPParallelForDirective( 1579 const OMPParallelForDirective &S) { 1580 // Emit directive as a combined directive that consists of two implicit 1581 // directives: 'parallel' with 'for' directive. 1582 LexicalScope Scope(*this, S.getSourceRange()); 1583 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1584 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1585 CGF.EmitOMPWorksharingLoop(S); 1586 // Emit implicit barrier at the end of parallel region, but this barrier 1587 // is at the end of 'for' directive, so emit it as the implicit barrier for 1588 // this 'for' directive. 1589 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1590 OMPD_parallel); 1591 }; 1592 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen); 1593 } 1594 1595 void CodeGenFunction::EmitOMPParallelForSimdDirective( 1596 const OMPParallelForSimdDirective &S) { 1597 // Emit directive as a combined directive that consists of two implicit 1598 // directives: 'parallel' with 'for' directive. 1599 LexicalScope Scope(*this, S.getSourceRange()); 1600 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1601 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1602 CGF.EmitOMPWorksharingLoop(S); 1603 // Emit implicit barrier at the end of parallel region, but this barrier 1604 // is at the end of 'for' directive, so emit it as the implicit barrier for 1605 // this 'for' directive. 1606 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1607 OMPD_parallel); 1608 }; 1609 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen); 1610 } 1611 1612 void CodeGenFunction::EmitOMPParallelSectionsDirective( 1613 const OMPParallelSectionsDirective &S) { 1614 // Emit directive as a combined directive that consists of two implicit 1615 // directives: 'parallel' with 'sections' directive. 1616 LexicalScope Scope(*this, S.getSourceRange()); 1617 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1618 (void)CGF.EmitSections(S); 1619 // Emit implicit barrier at the end of parallel region. 1620 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1621 OMPD_parallel); 1622 }; 1623 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen); 1624 } 1625 1626 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 1627 // Emit outlined function for task construct. 1628 LexicalScope Scope(*this, S.getSourceRange()); 1629 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1630 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 1631 auto *I = CS->getCapturedDecl()->param_begin(); 1632 auto *PartId = std::next(I); 1633 // The first function argument for tasks is a thread id, the second one is a 1634 // part id (0 for tied tasks, >=0 for untied task). 1635 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 1636 // Get list of private variables. 1637 llvm::SmallVector<const Expr *, 8> PrivateVars; 1638 llvm::SmallVector<const Expr *, 8> PrivateCopies; 1639 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) { 1640 auto IRef = C->varlist_begin(); 1641 for (auto *IInit : C->private_copies()) { 1642 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1643 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1644 PrivateVars.push_back(*IRef); 1645 PrivateCopies.push_back(IInit); 1646 } 1647 ++IRef; 1648 } 1649 } 1650 EmittedAsPrivate.clear(); 1651 // Get list of firstprivate variables. 1652 llvm::SmallVector<const Expr *, 8> FirstprivateVars; 1653 llvm::SmallVector<const Expr *, 8> FirstprivateCopies; 1654 llvm::SmallVector<const Expr *, 8> FirstprivateInits; 1655 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) { 1656 auto IRef = C->varlist_begin(); 1657 auto IElemInitRef = C->inits().begin(); 1658 for (auto *IInit : C->private_copies()) { 1659 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1660 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1661 FirstprivateVars.push_back(*IRef); 1662 FirstprivateCopies.push_back(IInit); 1663 FirstprivateInits.push_back(*IElemInitRef); 1664 } 1665 ++IRef, ++IElemInitRef; 1666 } 1667 } 1668 // Build list of dependences. 1669 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8> 1670 Dependences; 1671 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) { 1672 for (auto *IRef : C->varlists()) { 1673 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef)); 1674 } 1675 } 1676 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( 1677 CodeGenFunction &CGF) { 1678 // Set proper addresses for generated private copies. 1679 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1680 OMPPrivateScope Scope(CGF); 1681 if (!PrivateVars.empty() || !FirstprivateVars.empty()) { 1682 auto *CopyFn = CGF.Builder.CreateLoad( 1683 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3))); 1684 auto *PrivatesPtr = CGF.Builder.CreateLoad( 1685 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2))); 1686 // Map privates. 1687 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> 1688 PrivatePtrs; 1689 llvm::SmallVector<llvm::Value *, 16> CallArgs; 1690 CallArgs.push_back(PrivatesPtr); 1691 for (auto *E : PrivateVars) { 1692 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1693 Address PrivatePtr = 1694 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1695 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1696 CallArgs.push_back(PrivatePtr.getPointer()); 1697 } 1698 for (auto *E : FirstprivateVars) { 1699 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1700 Address PrivatePtr = 1701 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1702 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1703 CallArgs.push_back(PrivatePtr.getPointer()); 1704 } 1705 CGF.EmitRuntimeCall(CopyFn, CallArgs); 1706 for (auto &&Pair : PrivatePtrs) { 1707 Address Replacement(CGF.Builder.CreateLoad(Pair.second), 1708 CGF.getContext().getDeclAlign(Pair.first)); 1709 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 1710 } 1711 } 1712 (void)Scope.Privatize(); 1713 if (*PartId) { 1714 // TODO: emit code for untied tasks. 1715 } 1716 CGF.EmitStmt(CS->getCapturedStmt()); 1717 }; 1718 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 1719 S, *I, OMPD_task, CodeGen); 1720 // Check if we should emit tied or untied task. 1721 bool Tied = !S.getSingleClause<OMPUntiedClause>(); 1722 // Check if the task is final 1723 llvm::PointerIntPair<llvm::Value *, 1, bool> Final; 1724 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) { 1725 // If the condition constant folds and can be elided, try to avoid emitting 1726 // the condition and the dead arm of the if/else. 1727 auto *Cond = Clause->getCondition(); 1728 bool CondConstant; 1729 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 1730 Final.setInt(CondConstant); 1731 else 1732 Final.setPointer(EvaluateExprAsBool(Cond)); 1733 } else { 1734 // By default the task is not final. 1735 Final.setInt(/*IntVal=*/false); 1736 } 1737 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 1738 const Expr *IfCond = nullptr; 1739 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) { 1740 if (C->getNameModifier() == OMPD_unknown || 1741 C->getNameModifier() == OMPD_task) { 1742 IfCond = C->getCondition(); 1743 break; 1744 } 1745 } 1746 CGM.getOpenMPRuntime().emitTaskCall( 1747 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, 1748 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, 1749 FirstprivateCopies, FirstprivateInits, Dependences); 1750 } 1751 1752 void CodeGenFunction::EmitOMPTaskyieldDirective( 1753 const OMPTaskyieldDirective &S) { 1754 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 1755 } 1756 1757 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 1758 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 1759 } 1760 1761 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 1762 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 1763 } 1764 1765 void CodeGenFunction::EmitOMPTaskgroupDirective( 1766 const OMPTaskgroupDirective &S) { 1767 LexicalScope Scope(*this, S.getSourceRange()); 1768 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1769 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1770 CGF.EnsureInsertPoint(); 1771 }; 1772 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart()); 1773 } 1774 1775 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 1776 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 1777 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>()) { 1778 return llvm::makeArrayRef(FlushClause->varlist_begin(), 1779 FlushClause->varlist_end()); 1780 } 1781 return llvm::None; 1782 }(), S.getLocStart()); 1783 } 1784 1785 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 1786 LexicalScope Scope(*this, S.getSourceRange()); 1787 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1788 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1789 CGF.EnsureInsertPoint(); 1790 }; 1791 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart()); 1792 } 1793 1794 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 1795 QualType SrcType, QualType DestType, 1796 SourceLocation Loc) { 1797 assert(CGF.hasScalarEvaluationKind(DestType) && 1798 "DestType must have scalar evaluation kind."); 1799 assert(!Val.isAggregate() && "Must be a scalar or complex."); 1800 return Val.isScalar() 1801 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType, 1802 Loc) 1803 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 1804 DestType, Loc); 1805 } 1806 1807 static CodeGenFunction::ComplexPairTy 1808 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 1809 QualType DestType, SourceLocation Loc) { 1810 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 1811 "DestType must have complex evaluation kind."); 1812 CodeGenFunction::ComplexPairTy ComplexVal; 1813 if (Val.isScalar()) { 1814 // Convert the input element to the element type of the complex. 1815 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1816 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 1817 DestElementType, Loc); 1818 ComplexVal = CodeGenFunction::ComplexPairTy( 1819 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 1820 } else { 1821 assert(Val.isComplex() && "Must be a scalar or complex."); 1822 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 1823 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1824 ComplexVal.first = CGF.EmitScalarConversion( 1825 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 1826 ComplexVal.second = CGF.EmitScalarConversion( 1827 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 1828 } 1829 return ComplexVal; 1830 } 1831 1832 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 1833 LValue LVal, RValue RVal) { 1834 if (LVal.isGlobalReg()) { 1835 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 1836 } else { 1837 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent 1838 : llvm::Monotonic, 1839 LVal.isVolatile(), /*IsInit=*/false); 1840 } 1841 } 1842 1843 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, 1844 QualType RValTy, SourceLocation Loc) { 1845 switch (CGF.getEvaluationKind(LVal.getType())) { 1846 case TEK_Scalar: 1847 CGF.EmitStoreThroughLValue(RValue::get(convertToScalarValue( 1848 CGF, RVal, RValTy, LVal.getType(), Loc)), 1849 LVal); 1850 break; 1851 case TEK_Complex: 1852 CGF.EmitStoreOfComplex( 1853 convertToComplexValue(CGF, RVal, RValTy, LVal.getType(), Loc), LVal, 1854 /*isInit=*/false); 1855 break; 1856 case TEK_Aggregate: 1857 llvm_unreachable("Must be a scalar or complex."); 1858 } 1859 } 1860 1861 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 1862 const Expr *X, const Expr *V, 1863 SourceLocation Loc) { 1864 // v = x; 1865 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 1866 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 1867 LValue XLValue = CGF.EmitLValue(X); 1868 LValue VLValue = CGF.EmitLValue(V); 1869 RValue Res = XLValue.isGlobalReg() 1870 ? CGF.EmitLoadOfLValue(XLValue, Loc) 1871 : CGF.EmitAtomicLoad(XLValue, Loc, 1872 IsSeqCst ? llvm::SequentiallyConsistent 1873 : llvm::Monotonic, 1874 XLValue.isVolatile()); 1875 // OpenMP, 2.12.6, atomic Construct 1876 // Any atomic construct with a seq_cst clause forces the atomically 1877 // performed operation to include an implicit flush operation without a 1878 // list. 1879 if (IsSeqCst) 1880 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1881 emitSimpleStore(CGF, VLValue, Res, X->getType().getNonReferenceType(), Loc); 1882 } 1883 1884 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 1885 const Expr *X, const Expr *E, 1886 SourceLocation Loc) { 1887 // x = expr; 1888 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 1889 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 1890 // OpenMP, 2.12.6, atomic Construct 1891 // Any atomic construct with a seq_cst clause forces the atomically 1892 // performed operation to include an implicit flush operation without a 1893 // list. 1894 if (IsSeqCst) 1895 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1896 } 1897 1898 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 1899 RValue Update, 1900 BinaryOperatorKind BO, 1901 llvm::AtomicOrdering AO, 1902 bool IsXLHSInRHSPart) { 1903 auto &Context = CGF.CGM.getContext(); 1904 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 1905 // expression is simple and atomic is allowed for the given type for the 1906 // target platform. 1907 if (BO == BO_Comma || !Update.isScalar() || 1908 !Update.getScalarVal()->getType()->isIntegerTy() || 1909 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 1910 (Update.getScalarVal()->getType() != 1911 X.getAddress().getElementType())) || 1912 !X.getAddress().getElementType()->isIntegerTy() || 1913 !Context.getTargetInfo().hasBuiltinAtomic( 1914 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 1915 return std::make_pair(false, RValue::get(nullptr)); 1916 1917 llvm::AtomicRMWInst::BinOp RMWOp; 1918 switch (BO) { 1919 case BO_Add: 1920 RMWOp = llvm::AtomicRMWInst::Add; 1921 break; 1922 case BO_Sub: 1923 if (!IsXLHSInRHSPart) 1924 return std::make_pair(false, RValue::get(nullptr)); 1925 RMWOp = llvm::AtomicRMWInst::Sub; 1926 break; 1927 case BO_And: 1928 RMWOp = llvm::AtomicRMWInst::And; 1929 break; 1930 case BO_Or: 1931 RMWOp = llvm::AtomicRMWInst::Or; 1932 break; 1933 case BO_Xor: 1934 RMWOp = llvm::AtomicRMWInst::Xor; 1935 break; 1936 case BO_LT: 1937 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1938 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 1939 : llvm::AtomicRMWInst::Max) 1940 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 1941 : llvm::AtomicRMWInst::UMax); 1942 break; 1943 case BO_GT: 1944 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1945 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 1946 : llvm::AtomicRMWInst::Min) 1947 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 1948 : llvm::AtomicRMWInst::UMin); 1949 break; 1950 case BO_Assign: 1951 RMWOp = llvm::AtomicRMWInst::Xchg; 1952 break; 1953 case BO_Mul: 1954 case BO_Div: 1955 case BO_Rem: 1956 case BO_Shl: 1957 case BO_Shr: 1958 case BO_LAnd: 1959 case BO_LOr: 1960 return std::make_pair(false, RValue::get(nullptr)); 1961 case BO_PtrMemD: 1962 case BO_PtrMemI: 1963 case BO_LE: 1964 case BO_GE: 1965 case BO_EQ: 1966 case BO_NE: 1967 case BO_AddAssign: 1968 case BO_SubAssign: 1969 case BO_AndAssign: 1970 case BO_OrAssign: 1971 case BO_XorAssign: 1972 case BO_MulAssign: 1973 case BO_DivAssign: 1974 case BO_RemAssign: 1975 case BO_ShlAssign: 1976 case BO_ShrAssign: 1977 case BO_Comma: 1978 llvm_unreachable("Unsupported atomic update operation"); 1979 } 1980 auto *UpdateVal = Update.getScalarVal(); 1981 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 1982 UpdateVal = CGF.Builder.CreateIntCast( 1983 IC, X.getAddress().getElementType(), 1984 X.getType()->hasSignedIntegerRepresentation()); 1985 } 1986 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO); 1987 return std::make_pair(true, RValue::get(Res)); 1988 } 1989 1990 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 1991 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 1992 llvm::AtomicOrdering AO, SourceLocation Loc, 1993 const llvm::function_ref<RValue(RValue)> &CommonGen) { 1994 // Update expressions are allowed to have the following forms: 1995 // x binop= expr; -> xrval + expr; 1996 // x++, ++x -> xrval + 1; 1997 // x--, --x -> xrval - 1; 1998 // x = x binop expr; -> xrval binop expr 1999 // x = expr Op x; - > expr binop xrval; 2000 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 2001 if (!Res.first) { 2002 if (X.isGlobalReg()) { 2003 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 2004 // 'xrval'. 2005 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 2006 } else { 2007 // Perform compare-and-swap procedure. 2008 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 2009 } 2010 } 2011 return Res; 2012 } 2013 2014 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 2015 const Expr *X, const Expr *E, 2016 const Expr *UE, bool IsXLHSInRHSPart, 2017 SourceLocation Loc) { 2018 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 2019 "Update expr in 'atomic update' must be a binary operator."); 2020 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 2021 // Update expressions are allowed to have the following forms: 2022 // x binop= expr; -> xrval + expr; 2023 // x++, ++x -> xrval + 1; 2024 // x--, --x -> xrval - 1; 2025 // x = x binop expr; -> xrval binop expr 2026 // x = expr Op x; - > expr binop xrval; 2027 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 2028 LValue XLValue = CGF.EmitLValue(X); 2029 RValue ExprRValue = CGF.EmitAnyExpr(E); 2030 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 2031 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 2032 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 2033 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 2034 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 2035 auto Gen = 2036 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 2037 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2038 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 2039 return CGF.EmitAnyExpr(UE); 2040 }; 2041 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 2042 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 2043 // OpenMP, 2.12.6, atomic Construct 2044 // Any atomic construct with a seq_cst clause forces the atomically 2045 // performed operation to include an implicit flush operation without a 2046 // list. 2047 if (IsSeqCst) 2048 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2049 } 2050 2051 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 2052 QualType SourceType, QualType ResType, 2053 SourceLocation Loc) { 2054 switch (CGF.getEvaluationKind(ResType)) { 2055 case TEK_Scalar: 2056 return RValue::get( 2057 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 2058 case TEK_Complex: { 2059 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 2060 return RValue::getComplex(Res.first, Res.second); 2061 } 2062 case TEK_Aggregate: 2063 break; 2064 } 2065 llvm_unreachable("Must be a scalar or complex."); 2066 } 2067 2068 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 2069 bool IsPostfixUpdate, const Expr *V, 2070 const Expr *X, const Expr *E, 2071 const Expr *UE, bool IsXLHSInRHSPart, 2072 SourceLocation Loc) { 2073 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 2074 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 2075 RValue NewVVal; 2076 LValue VLValue = CGF.EmitLValue(V); 2077 LValue XLValue = CGF.EmitLValue(X); 2078 RValue ExprRValue = CGF.EmitAnyExpr(E); 2079 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 2080 QualType NewVValType; 2081 if (UE) { 2082 // 'x' is updated with some additional value. 2083 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 2084 "Update expr in 'atomic capture' must be a binary operator."); 2085 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 2086 // Update expressions are allowed to have the following forms: 2087 // x binop= expr; -> xrval + expr; 2088 // x++, ++x -> xrval + 1; 2089 // x--, --x -> xrval - 1; 2090 // x = x binop expr; -> xrval binop expr 2091 // x = expr Op x; - > expr binop xrval; 2092 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 2093 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 2094 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 2095 NewVValType = XRValExpr->getType(); 2096 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 2097 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 2098 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { 2099 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2100 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 2101 RValue Res = CGF.EmitAnyExpr(UE); 2102 NewVVal = IsPostfixUpdate ? XRValue : Res; 2103 return Res; 2104 }; 2105 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 2106 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 2107 if (Res.first) { 2108 // 'atomicrmw' instruction was generated. 2109 if (IsPostfixUpdate) { 2110 // Use old value from 'atomicrmw'. 2111 NewVVal = Res.second; 2112 } else { 2113 // 'atomicrmw' does not provide new value, so evaluate it using old 2114 // value of 'x'. 2115 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 2116 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 2117 NewVVal = CGF.EmitAnyExpr(UE); 2118 } 2119 } 2120 } else { 2121 // 'x' is simply rewritten with some 'expr'. 2122 NewVValType = X->getType().getNonReferenceType(); 2123 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 2124 X->getType().getNonReferenceType(), Loc); 2125 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { 2126 NewVVal = XRValue; 2127 return ExprRValue; 2128 }; 2129 // Try to perform atomicrmw xchg, otherwise simple exchange. 2130 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 2131 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 2132 Loc, Gen); 2133 if (Res.first) { 2134 // 'atomicrmw' instruction was generated. 2135 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 2136 } 2137 } 2138 // Emit post-update store to 'v' of old/new 'x' value. 2139 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType, Loc); 2140 // OpenMP, 2.12.6, atomic Construct 2141 // Any atomic construct with a seq_cst clause forces the atomically 2142 // performed operation to include an implicit flush operation without a 2143 // list. 2144 if (IsSeqCst) 2145 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2146 } 2147 2148 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 2149 bool IsSeqCst, bool IsPostfixUpdate, 2150 const Expr *X, const Expr *V, const Expr *E, 2151 const Expr *UE, bool IsXLHSInRHSPart, 2152 SourceLocation Loc) { 2153 switch (Kind) { 2154 case OMPC_read: 2155 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 2156 break; 2157 case OMPC_write: 2158 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 2159 break; 2160 case OMPC_unknown: 2161 case OMPC_update: 2162 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 2163 break; 2164 case OMPC_capture: 2165 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 2166 IsXLHSInRHSPart, Loc); 2167 break; 2168 case OMPC_if: 2169 case OMPC_final: 2170 case OMPC_num_threads: 2171 case OMPC_private: 2172 case OMPC_firstprivate: 2173 case OMPC_lastprivate: 2174 case OMPC_reduction: 2175 case OMPC_safelen: 2176 case OMPC_simdlen: 2177 case OMPC_collapse: 2178 case OMPC_default: 2179 case OMPC_seq_cst: 2180 case OMPC_shared: 2181 case OMPC_linear: 2182 case OMPC_aligned: 2183 case OMPC_copyin: 2184 case OMPC_copyprivate: 2185 case OMPC_flush: 2186 case OMPC_proc_bind: 2187 case OMPC_schedule: 2188 case OMPC_ordered: 2189 case OMPC_nowait: 2190 case OMPC_untied: 2191 case OMPC_threadprivate: 2192 case OMPC_depend: 2193 case OMPC_mergeable: 2194 case OMPC_device: 2195 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 2196 } 2197 } 2198 2199 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 2200 bool IsSeqCst = S.getSingleClause<OMPSeqCstClause>(); 2201 OpenMPClauseKind Kind = OMPC_unknown; 2202 for (auto *C : S.clauses()) { 2203 // Find first clause (skip seq_cst clause, if it is first). 2204 if (C->getClauseKind() != OMPC_seq_cst) { 2205 Kind = C->getClauseKind(); 2206 break; 2207 } 2208 } 2209 2210 const auto *CS = 2211 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 2212 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 2213 enterFullExpression(EWC); 2214 } 2215 // Processing for statements under 'atomic capture'. 2216 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 2217 for (const auto *C : Compound->body()) { 2218 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 2219 enterFullExpression(EWC); 2220 } 2221 } 2222 } 2223 2224 LexicalScope Scope(*this, S.getSourceRange()); 2225 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) { 2226 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 2227 S.getV(), S.getExpr(), S.getUpdateExpr(), 2228 S.isXLHSInRHSPart(), S.getLocStart()); 2229 }; 2230 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 2231 } 2232 2233 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { 2234 llvm_unreachable("CodeGen for 'omp target' is not supported yet."); 2235 } 2236 2237 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { 2238 llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); 2239 } 2240 2241 void CodeGenFunction::EmitOMPCancellationPointDirective( 2242 const OMPCancellationPointDirective &S) { 2243 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(), 2244 S.getCancelRegion()); 2245 } 2246 2247 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 2248 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), 2249 S.getCancelRegion()); 2250 } 2251 2252 CodeGenFunction::JumpDest 2253 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 2254 if (Kind == OMPD_parallel || Kind == OMPD_task) 2255 return ReturnBlock; 2256 else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections) 2257 return BreakContinueStack.empty() ? JumpDest() 2258 : BreakContinueStack.back().BreakBlock; 2259 return JumpDest(); 2260 } 2261 2262 // Generate the instructions for '#pragma omp target data' directive. 2263 void CodeGenFunction::EmitOMPTargetDataDirective( 2264 const OMPTargetDataDirective &S) { 2265 2266 // emit the code inside the construct for now 2267 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2268 CGM.getOpenMPRuntime().emitInlinedDirective( 2269 *this, OMPD_target_data, 2270 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); }); 2271 } 2272