1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Stmt.h" 19 #include "clang/AST/StmtOpenMP.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 //===----------------------------------------------------------------------===// 24 // OpenMP Directive Emission 25 //===----------------------------------------------------------------------===// 26 void CodeGenFunction::EmitOMPAggregateAssign( 27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType, 28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) { 29 // Perform element-by-element initialization. 30 QualType ElementTy; 31 auto SrcBegin = SrcAddr; 32 auto DestBegin = DestAddr; 33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin); 35 // Cast from pointer to array type to pointer to single element. 36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin, 37 DestBegin->getType()); 38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 39 // The basic structure here is a while-do loop. 40 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 41 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 42 auto IsEmpty = 43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 45 46 // Enter the loop body, making that address the current address. 47 auto EntryBB = Builder.GetInsertBlock(); 48 EmitBlock(BodyBB); 49 auto SrcElementCurrent = 50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB); 52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2, 53 "omp.arraycpy.destElementPast"); 54 DestElementCurrent->addIncoming(DestBegin, EntryBB); 55 56 // Emit copy. 57 CopyGen(DestElementCurrent, SrcElementCurrent); 58 59 // Shift the address forward by one element. 60 auto DestElementNext = Builder.CreateConstGEP1_32( 61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element"); 62 auto SrcElementNext = Builder.CreateConstGEP1_32( 63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element"); 64 // Check whether we've reached the end. 65 auto Done = 66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 67 Builder.CreateCondBr(Done, DoneBB, BodyBB); 68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock()); 69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 70 71 // Done. 72 EmitBlock(DoneBB, /*IsFinished=*/true); 73 } 74 75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF, 76 QualType OriginalType, llvm::Value *DestAddr, 77 llvm::Value *SrcAddr, const VarDecl *DestVD, 78 const VarDecl *SrcVD, const Expr *Copy) { 79 if (OriginalType->isArrayType()) { 80 auto *BO = dyn_cast<BinaryOperator>(Copy); 81 if (BO && BO->getOpcode() == BO_Assign) { 82 // Perform simple memcpy for simple copying. 83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 84 } else { 85 // For arrays with complex element types perform element by element 86 // copying. 87 CGF.EmitOMPAggregateAssign( 88 DestAddr, SrcAddr, OriginalType, 89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement, 90 llvm::Value *SrcElement) { 91 // Working with the single array element, so have to remap 92 // destination and source variables to corresponding array 93 // elements. 94 CodeGenFunction::OMPPrivateScope Remap(CGF); 95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{ 96 return DestElement; 97 }); 98 Remap.addPrivate( 99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; }); 100 (void)Remap.Privatize(); 101 CGF.EmitIgnoredExpr(Copy); 102 }); 103 } 104 } else { 105 // Remap pseudo source variable to private copy. 106 CodeGenFunction::OMPPrivateScope Remap(CGF); 107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; }); 108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; }); 109 (void)Remap.Privatize(); 110 // Emit copying of the whole variable. 111 CGF.EmitIgnoredExpr(Copy); 112 } 113 } 114 115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 116 OMPPrivateScope &PrivateScope) { 117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) { 119 auto *C = cast<OMPFirstprivateClause>(*I); 120 auto IRef = C->varlist_begin(); 121 auto InitsRef = C->inits().begin(); 122 for (auto IInit : C->private_copies()) { 123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 124 if (EmittedAsFirstprivate.count(OrigVD) == 0) { 125 EmittedAsFirstprivate.insert(OrigVD); 126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 128 bool IsRegistered; 129 DeclRefExpr DRE( 130 const_cast<VarDecl *>(OrigVD), 131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 132 OrigVD) != nullptr, 133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 134 auto *OriginalAddr = EmitLValue(&DRE).getAddress(); 135 QualType Type = OrigVD->getType(); 136 if (Type->isArrayType()) { 137 // Emit VarDecl with copy init for arrays. 138 // Get the address of the original variable captured in current 139 // captured region. 140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 141 auto Emission = EmitAutoVarAlloca(*VD); 142 auto *Init = VD->getInit(); 143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 144 // Perform simple memcpy. 145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 146 Type); 147 } else { 148 EmitOMPAggregateAssign( 149 Emission.getAllocatedAddress(), OriginalAddr, Type, 150 [this, VDInit, Init](llvm::Value *DestElement, 151 llvm::Value *SrcElement) { 152 // Clean up any temporaries needed by the initialization. 153 RunCleanupsScope InitScope(*this); 154 // Emit initialization for single element. 155 LocalDeclMap[VDInit] = SrcElement; 156 EmitAnyExprToMem(Init, DestElement, 157 Init->getType().getQualifiers(), 158 /*IsInitializer*/ false); 159 LocalDeclMap.erase(VDInit); 160 }); 161 } 162 EmitAutoVarCleanups(Emission); 163 return Emission.getAllocatedAddress(); 164 }); 165 } else { 166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 167 // Emit private VarDecl with copy init. 168 // Remap temp VDInit variable to the address of the original 169 // variable 170 // (for proper handling of captured global variables). 171 LocalDeclMap[VDInit] = OriginalAddr; 172 EmitDecl(*VD); 173 LocalDeclMap.erase(VDInit); 174 return GetAddrOfLocalVar(VD); 175 }); 176 } 177 assert(IsRegistered && 178 "firstprivate var already registered as private"); 179 // Silence the warning about unused variable. 180 (void)IsRegistered; 181 } 182 ++IRef, ++InitsRef; 183 } 184 } 185 return !EmittedAsFirstprivate.empty(); 186 } 187 188 void CodeGenFunction::EmitOMPPrivateClause( 189 const OMPExecutableDirective &D, 190 CodeGenFunction::OMPPrivateScope &PrivateScope) { 191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) { 193 auto *C = cast<OMPPrivateClause>(*I); 194 auto IRef = C->varlist_begin(); 195 for (auto IInit : C->private_copies()) { 196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 199 bool IsRegistered = 200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 201 // Emit private VarDecl with copy init. 202 EmitDecl(*VD); 203 return GetAddrOfLocalVar(VD); 204 }); 205 assert(IsRegistered && "private var already registered as private"); 206 // Silence the warning about unused variable. 207 (void)IsRegistered; 208 } 209 ++IRef; 210 } 211 } 212 } 213 214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 215 // threadprivate_var1 = master_threadprivate_var1; 216 // operator=(threadprivate_var2, master_threadprivate_var2); 217 // ... 218 // __kmpc_barrier(&loc, global_tid); 219 llvm::DenseSet<const VarDecl *> CopiedVars; 220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) { 222 auto *C = cast<OMPCopyinClause>(*I); 223 auto IRef = C->varlist_begin(); 224 auto ISrcRef = C->source_exprs().begin(); 225 auto IDestRef = C->destination_exprs().begin(); 226 for (auto *AssignOp : C->assignment_ops()) { 227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 228 QualType Type = VD->getType(); 229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 230 231 // Get the address of the master variable. If we are emitting code with 232 // TLS support, the address is passed from the master as field in the 233 // captured declaration. 234 llvm::Value *MasterAddr; 235 if (getLangOpts().OpenMPUseTLS && 236 getContext().getTargetInfo().isTLSSupported()) { 237 assert(CapturedStmtInfo->lookup(VD) && 238 "Copyin threadprivates should have been captured!"); 239 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(), 240 VK_LValue, (*IRef)->getExprLoc()); 241 MasterAddr = EmitLValue(&DRE).getAddress(); 242 } else { 243 MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 244 : CGM.GetAddrOfGlobal(VD); 245 } 246 // Get the address of the threadprivate variable. 247 auto *PrivateAddr = EmitLValue(*IRef).getAddress(); 248 if (CopiedVars.size() == 1) { 249 // At first check if current thread is a master thread. If it is, no 250 // need to copy data. 251 CopyBegin = createBasicBlock("copyin.not.master"); 252 CopyEnd = createBasicBlock("copyin.not.master.end"); 253 Builder.CreateCondBr( 254 Builder.CreateICmpNE( 255 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy), 256 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)), 257 CopyBegin, CopyEnd); 258 EmitBlock(CopyBegin); 259 } 260 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 261 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 262 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD, 263 AssignOp); 264 } 265 ++IRef; 266 ++ISrcRef; 267 ++IDestRef; 268 } 269 } 270 if (CopyEnd) { 271 // Exit out of copying procedure for non-master thread. 272 EmitBlock(CopyEnd, /*IsFinished=*/true); 273 return true; 274 } 275 return false; 276 } 277 278 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 279 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 280 bool HasAtLeastOneLastprivate = false; 281 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 282 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 283 HasAtLeastOneLastprivate = true; 284 auto *C = cast<OMPLastprivateClause>(*I); 285 auto IRef = C->varlist_begin(); 286 auto IDestRef = C->destination_exprs().begin(); 287 for (auto *IInit : C->private_copies()) { 288 // Keep the address of the original variable for future update at the end 289 // of the loop. 290 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 291 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 292 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 293 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{ 294 DeclRefExpr DRE( 295 const_cast<VarDecl *>(OrigVD), 296 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 297 OrigVD) != nullptr, 298 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 299 return EmitLValue(&DRE).getAddress(); 300 }); 301 // Check if the variable is also a firstprivate: in this case IInit is 302 // not generated. Initialization of this variable will happen in codegen 303 // for 'firstprivate' clause. 304 if (IInit) { 305 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 306 bool IsRegistered = 307 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 308 // Emit private VarDecl with copy init. 309 EmitDecl(*VD); 310 return GetAddrOfLocalVar(VD); 311 }); 312 assert(IsRegistered && 313 "lastprivate var already registered as private"); 314 (void)IsRegistered; 315 } 316 } 317 ++IRef, ++IDestRef; 318 } 319 } 320 return HasAtLeastOneLastprivate; 321 } 322 323 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 324 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { 325 // Emit following code: 326 // if (<IsLastIterCond>) { 327 // orig_var1 = private_orig_var1; 328 // ... 329 // orig_varn = private_orig_varn; 330 // } 331 llvm::BasicBlock *ThenBB = nullptr; 332 llvm::BasicBlock *DoneBB = nullptr; 333 if (IsLastIterCond) { 334 ThenBB = createBasicBlock(".omp.lastprivate.then"); 335 DoneBB = createBasicBlock(".omp.lastprivate.done"); 336 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 337 EmitBlock(ThenBB); 338 } 339 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; 340 const Expr *LastIterVal = nullptr; 341 const Expr *IVExpr = nullptr; 342 const Expr *IncExpr = nullptr; 343 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 344 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) { 345 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>( 346 LoopDirective->getUpperBoundVariable()) 347 ->getDecl()) 348 ->getAnyInitializer(); 349 IVExpr = LoopDirective->getIterationVariable(); 350 IncExpr = LoopDirective->getInc(); 351 auto IUpdate = LoopDirective->updates().begin(); 352 for (auto *E : LoopDirective->counters()) { 353 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 354 LoopCountersAndUpdates[D] = *IUpdate; 355 ++IUpdate; 356 } 357 } 358 } 359 { 360 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 361 bool FirstLCV = true; 362 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 363 auto *C = cast<OMPLastprivateClause>(*I); 364 auto IRef = C->varlist_begin(); 365 auto ISrcRef = C->source_exprs().begin(); 366 auto IDestRef = C->destination_exprs().begin(); 367 for (auto *AssignOp : C->assignment_ops()) { 368 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 369 QualType Type = PrivateVD->getType(); 370 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 371 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 372 // If lastprivate variable is a loop control variable for loop-based 373 // directive, update its value before copyin back to original 374 // variable. 375 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { 376 if (FirstLCV && LastIterVal) { 377 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), 378 IVExpr->getType().getQualifiers(), 379 /*IsInitializer=*/false); 380 EmitIgnoredExpr(IncExpr); 381 FirstLCV = false; 382 } 383 EmitIgnoredExpr(UpExpr); 384 } 385 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 386 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 387 // Get the address of the original variable. 388 auto *OriginalAddr = GetAddrOfLocalVar(DestVD); 389 // Get the address of the private variable. 390 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD); 391 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, 392 AssignOp); 393 } 394 ++IRef; 395 ++ISrcRef; 396 ++IDestRef; 397 } 398 } 399 } 400 if (IsLastIterCond) { 401 EmitBlock(DoneBB, /*IsFinished=*/true); 402 } 403 } 404 405 void CodeGenFunction::EmitOMPReductionClauseInit( 406 const OMPExecutableDirective &D, 407 CodeGenFunction::OMPPrivateScope &PrivateScope) { 408 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 409 auto *C = cast<OMPReductionClause>(*I); 410 auto ILHS = C->lhs_exprs().begin(); 411 auto IRHS = C->rhs_exprs().begin(); 412 for (auto IRef : C->varlists()) { 413 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 414 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 415 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 416 // Store the address of the original variable associated with the LHS 417 // implicit variable. 418 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{ 419 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 420 CapturedStmtInfo->lookup(OrigVD) != nullptr, 421 IRef->getType(), VK_LValue, IRef->getExprLoc()); 422 return EmitLValue(&DRE).getAddress(); 423 }); 424 // Emit reduction copy. 425 bool IsRegistered = 426 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{ 427 // Emit private VarDecl with reduction init. 428 EmitDecl(*PrivateVD); 429 return GetAddrOfLocalVar(PrivateVD); 430 }); 431 assert(IsRegistered && "private var already registered as private"); 432 // Silence the warning about unused variable. 433 (void)IsRegistered; 434 ++ILHS, ++IRHS; 435 } 436 } 437 } 438 439 void CodeGenFunction::EmitOMPReductionClauseFinal( 440 const OMPExecutableDirective &D) { 441 llvm::SmallVector<const Expr *, 8> LHSExprs; 442 llvm::SmallVector<const Expr *, 8> RHSExprs; 443 llvm::SmallVector<const Expr *, 8> ReductionOps; 444 bool HasAtLeastOneReduction = false; 445 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 446 HasAtLeastOneReduction = true; 447 auto *C = cast<OMPReductionClause>(*I); 448 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 449 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 450 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 451 } 452 if (HasAtLeastOneReduction) { 453 // Emit nowait reduction if nowait clause is present or directive is a 454 // parallel directive (it always has implicit barrier). 455 CGM.getOpenMPRuntime().emitReduction( 456 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps, 457 D.getSingleClause(OMPC_nowait) || 458 isOpenMPParallelDirective(D.getDirectiveKind()) || 459 D.getDirectiveKind() == OMPD_simd, 460 D.getDirectiveKind() == OMPD_simd); 461 } 462 } 463 464 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, 465 const OMPExecutableDirective &S, 466 OpenMPDirectiveKind InnermostKind, 467 const RegionCodeGenTy &CodeGen) { 468 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 469 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS); 470 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 471 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 472 if (auto C = S.getSingleClause(OMPC_num_threads)) { 473 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 474 auto NumThreadsClause = cast<OMPNumThreadsClause>(C); 475 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 476 /*IgnoreResultAssign*/ true); 477 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 478 CGF, NumThreads, NumThreadsClause->getLocStart()); 479 } 480 if (auto *C = S.getSingleClause(OMPC_proc_bind)) { 481 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 482 auto *ProcBindClause = cast<OMPProcBindClause>(C); 483 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 484 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart()); 485 } 486 const Expr *IfCond = nullptr; 487 if (auto C = S.getSingleClause(OMPC_if)) { 488 IfCond = cast<OMPIfClause>(C)->getCondition(); 489 } 490 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 491 CapturedStruct, IfCond); 492 } 493 494 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 495 LexicalScope Scope(*this, S.getSourceRange()); 496 // Emit parallel region as a standalone region. 497 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 498 OMPPrivateScope PrivateScope(CGF); 499 bool Copyins = CGF.EmitOMPCopyinClause(S); 500 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); 501 if (Copyins || Firstprivates) { 502 // Emit implicit barrier to synchronize threads and avoid data races on 503 // initialization of firstprivate variables or propagation master's thread 504 // values of threadprivate variables to local instances of that variables 505 // of all other implicit threads. 506 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 507 OMPD_unknown); 508 } 509 CGF.EmitOMPPrivateClause(S, PrivateScope); 510 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 511 (void)PrivateScope.Privatize(); 512 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 513 CGF.EmitOMPReductionClauseFinal(S); 514 // Emit implicit barrier at the end of the 'parallel' directive. 515 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 516 OMPD_unknown); 517 }; 518 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen); 519 } 520 521 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 522 JumpDest LoopExit) { 523 RunCleanupsScope BodyScope(*this); 524 // Update counters values on current iteration. 525 for (auto I : D.updates()) { 526 EmitIgnoredExpr(I); 527 } 528 // Update the linear variables. 529 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 530 auto *C = cast<OMPLinearClause>(*I); 531 for (auto U : C->updates()) { 532 EmitIgnoredExpr(U); 533 } 534 } 535 536 // On a continue in the body, jump to the end. 537 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 538 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 539 // Emit loop body. 540 EmitStmt(D.getBody()); 541 // The end (updates/cleanups). 542 EmitBlock(Continue.getBlock()); 543 BreakContinueStack.pop_back(); 544 // TODO: Update lastprivates if the SeparateIter flag is true. 545 // This will be implemented in a follow-up OMPLastprivateClause patch, but 546 // result should be still correct without it, as we do not make these 547 // variables private yet. 548 } 549 550 void CodeGenFunction::EmitOMPInnerLoop( 551 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 552 const Expr *IncExpr, 553 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 554 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 555 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 556 557 // Start the loop with a block that tests the condition. 558 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 559 EmitBlock(CondBlock); 560 LoopStack.push(CondBlock); 561 562 // If there are any cleanups between here and the loop-exit scope, 563 // create a block to stage a loop exit along. 564 auto ExitBlock = LoopExit.getBlock(); 565 if (RequiresCleanup) 566 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 567 568 auto LoopBody = createBasicBlock("omp.inner.for.body"); 569 570 // Emit condition. 571 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 572 if (ExitBlock != LoopExit.getBlock()) { 573 EmitBlock(ExitBlock); 574 EmitBranchThroughCleanup(LoopExit); 575 } 576 577 EmitBlock(LoopBody); 578 incrementProfileCounter(&S); 579 580 // Create a block for the increment. 581 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 582 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 583 584 BodyGen(*this); 585 586 // Emit "IV = IV + 1" and a back-edge to the condition block. 587 EmitBlock(Continue.getBlock()); 588 EmitIgnoredExpr(IncExpr); 589 PostIncGen(*this); 590 BreakContinueStack.pop_back(); 591 EmitBranch(CondBlock); 592 LoopStack.pop(); 593 // Emit the fall-through block. 594 EmitBlock(LoopExit.getBlock()); 595 } 596 597 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 598 // Emit inits for the linear variables. 599 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 600 auto *C = cast<OMPLinearClause>(*I); 601 for (auto Init : C->inits()) { 602 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 603 auto *OrigVD = cast<VarDecl>( 604 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl()); 605 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 606 CapturedStmtInfo->lookup(OrigVD) != nullptr, 607 VD->getInit()->getType(), VK_LValue, 608 VD->getInit()->getExprLoc()); 609 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 610 EmitExprAsInit(&DRE, VD, 611 MakeAddrLValue(Emission.getAllocatedAddress(), 612 VD->getType(), Emission.Alignment), 613 /*capturedByInit=*/false); 614 EmitAutoVarCleanups(Emission); 615 } 616 // Emit the linear steps for the linear clauses. 617 // If a step is not constant, it is pre-calculated before the loop. 618 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 619 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 620 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 621 // Emit calculation of the linear step. 622 EmitIgnoredExpr(CS); 623 } 624 } 625 } 626 627 static void emitLinearClauseFinal(CodeGenFunction &CGF, 628 const OMPLoopDirective &D) { 629 // Emit the final values of the linear variables. 630 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 631 auto *C = cast<OMPLinearClause>(*I); 632 auto IC = C->varlist_begin(); 633 for (auto F : C->finals()) { 634 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 635 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 636 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 637 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 638 auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress(); 639 CodeGenFunction::OMPPrivateScope VarScope(CGF); 640 VarScope.addPrivate(OrigVD, 641 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 642 (void)VarScope.Privatize(); 643 CGF.EmitIgnoredExpr(F); 644 ++IC; 645 } 646 } 647 } 648 649 static void emitAlignedClause(CodeGenFunction &CGF, 650 const OMPExecutableDirective &D) { 651 for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) { 652 auto *Clause = cast<OMPAlignedClause>(*I); 653 unsigned ClauseAlignment = 0; 654 if (auto AlignmentExpr = Clause->getAlignment()) { 655 auto AlignmentCI = 656 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 657 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 658 } 659 for (auto E : Clause->varlists()) { 660 unsigned Alignment = ClauseAlignment; 661 if (Alignment == 0) { 662 // OpenMP [2.8.1, Description] 663 // If no optional parameter is specified, implementation-defined default 664 // alignments for SIMD instructions on the target platforms are assumed. 665 Alignment = 666 CGF.getContext() 667 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 668 E->getType()->getPointeeType())) 669 .getQuantity(); 670 } 671 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 672 "alignment is not power of 2"); 673 if (Alignment != 0) { 674 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 675 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 676 } 677 } 678 } 679 } 680 681 static void emitPrivateLoopCounters(CodeGenFunction &CGF, 682 CodeGenFunction::OMPPrivateScope &LoopScope, 683 ArrayRef<Expr *> Counters, 684 ArrayRef<Expr *> PrivateCounters) { 685 auto I = PrivateCounters.begin(); 686 for (auto *E : Counters) { 687 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 688 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); 689 llvm::Value *Addr; 690 (void)LoopScope.addPrivate(PrivateVD, [&]() -> llvm::Value * { 691 // Emit var without initialization. 692 auto VarEmission = CGF.EmitAutoVarAlloca(*PrivateVD); 693 CGF.EmitAutoVarCleanups(VarEmission); 694 Addr = VarEmission.getAllocatedAddress(); 695 return Addr; 696 }); 697 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value * { return Addr; }); 698 ++I; 699 } 700 } 701 702 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 703 const Expr *Cond, llvm::BasicBlock *TrueBlock, 704 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 705 { 706 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 707 emitPrivateLoopCounters(CGF, PreCondScope, S.counters(), 708 S.private_counters()); 709 (void)PreCondScope.Privatize(); 710 // Get initial values of real counters. 711 for (auto I : S.inits()) { 712 CGF.EmitIgnoredExpr(I); 713 } 714 } 715 // Check that loop is executed at least one time. 716 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 717 } 718 719 static void 720 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, 721 CodeGenFunction::OMPPrivateScope &PrivateScope) { 722 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 723 auto *C = cast<OMPLinearClause>(*I); 724 for (auto *E : C->varlists()) { 725 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 726 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * { 727 // Emit var without initialization. 728 auto VarEmission = CGF.EmitAutoVarAlloca(*VD); 729 CGF.EmitAutoVarCleanups(VarEmission); 730 return VarEmission.getAllocatedAddress(); 731 }); 732 assert(IsRegistered && "linear var already registered as private"); 733 // Silence the warning about unused variable. 734 (void)IsRegistered; 735 } 736 } 737 } 738 739 static void emitSafelenClause(CodeGenFunction &CGF, 740 const OMPExecutableDirective &D) { 741 if (auto *C = 742 cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) { 743 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 744 /*ignoreResult=*/true); 745 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 746 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue()); 747 // In presence of finite 'safelen', it may be unsafe to mark all 748 // the memory instructions parallel, because loop-carried 749 // dependences of 'safelen' iterations are possible. 750 CGF.LoopStack.setParallel(false); 751 } 752 } 753 754 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) { 755 // Walk clauses and process safelen/lastprivate. 756 LoopStack.setParallel(); 757 LoopStack.setVectorizeEnable(true); 758 emitSafelenClause(*this, D); 759 } 760 761 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) { 762 auto IC = D.counters().begin(); 763 for (auto F : D.finals()) { 764 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 765 if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) { 766 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 767 CapturedStmtInfo->lookup(OrigVD) != nullptr, 768 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 769 auto *OrigAddr = EmitLValue(&DRE).getAddress(); 770 OMPPrivateScope VarScope(*this); 771 VarScope.addPrivate(OrigVD, 772 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 773 (void)VarScope.Privatize(); 774 EmitIgnoredExpr(F); 775 } 776 ++IC; 777 } 778 emitLinearClauseFinal(*this, D); 779 } 780 781 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 782 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 783 // if (PreCond) { 784 // for (IV in 0..LastIteration) BODY; 785 // <Final counter/linear vars updates>; 786 // } 787 // 788 789 // Emit: if (PreCond) - begin. 790 // If the condition constant folds and can be elided, avoid emitting the 791 // whole loop. 792 bool CondConstant; 793 llvm::BasicBlock *ContBlock = nullptr; 794 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 795 if (!CondConstant) 796 return; 797 } else { 798 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 799 ContBlock = CGF.createBasicBlock("simd.if.end"); 800 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 801 CGF.getProfileCount(&S)); 802 CGF.EmitBlock(ThenBlock); 803 CGF.incrementProfileCounter(&S); 804 } 805 806 // Emit the loop iteration variable. 807 const Expr *IVExpr = S.getIterationVariable(); 808 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 809 CGF.EmitVarDecl(*IVDecl); 810 CGF.EmitIgnoredExpr(S.getInit()); 811 812 // Emit the iterations count variable. 813 // If it is not a variable, Sema decided to calculate iterations count on 814 // each iteration (e.g., it is foldable into a constant). 815 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 816 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 817 // Emit calculation of the iterations count. 818 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 819 } 820 821 CGF.EmitOMPSimdInit(S); 822 823 emitAlignedClause(CGF, S); 824 CGF.EmitOMPLinearClauseInit(S); 825 bool HasLastprivateClause; 826 { 827 OMPPrivateScope LoopScope(CGF); 828 emitPrivateLoopCounters(CGF, LoopScope, S.counters(), 829 S.private_counters()); 830 emitPrivateLinearVars(CGF, S, LoopScope); 831 CGF.EmitOMPPrivateClause(S, LoopScope); 832 CGF.EmitOMPReductionClauseInit(S, LoopScope); 833 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 834 (void)LoopScope.Privatize(); 835 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 836 S.getInc(), 837 [&S](CodeGenFunction &CGF) { 838 CGF.EmitOMPLoopBody(S, JumpDest()); 839 CGF.EmitStopPoint(&S); 840 }, 841 [](CodeGenFunction &) {}); 842 // Emit final copy of the lastprivate variables at the end of loops. 843 if (HasLastprivateClause) { 844 CGF.EmitOMPLastprivateClauseFinal(S); 845 } 846 CGF.EmitOMPReductionClauseFinal(S); 847 } 848 CGF.EmitOMPSimdFinal(S); 849 // Emit: if (PreCond) - end. 850 if (ContBlock) { 851 CGF.EmitBranch(ContBlock); 852 CGF.EmitBlock(ContBlock, true); 853 } 854 }; 855 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 856 } 857 858 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, 859 const OMPLoopDirective &S, 860 OMPPrivateScope &LoopScope, 861 bool Ordered, llvm::Value *LB, 862 llvm::Value *UB, llvm::Value *ST, 863 llvm::Value *IL, llvm::Value *Chunk) { 864 auto &RT = CGM.getOpenMPRuntime(); 865 866 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 867 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); 868 869 assert((Ordered || 870 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && 871 "static non-chunked schedule does not need outer loop"); 872 873 // Emit outer loop. 874 // 875 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 876 // When schedule(dynamic,chunk_size) is specified, the iterations are 877 // distributed to threads in the team in chunks as the threads request them. 878 // Each thread executes a chunk of iterations, then requests another chunk, 879 // until no chunks remain to be distributed. Each chunk contains chunk_size 880 // iterations, except for the last chunk to be distributed, which may have 881 // fewer iterations. When no chunk_size is specified, it defaults to 1. 882 // 883 // When schedule(guided,chunk_size) is specified, the iterations are assigned 884 // to threads in the team in chunks as the executing threads request them. 885 // Each thread executes a chunk of iterations, then requests another chunk, 886 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 887 // each chunk is proportional to the number of unassigned iterations divided 888 // by the number of threads in the team, decreasing to 1. For a chunk_size 889 // with value k (greater than 1), the size of each chunk is determined in the 890 // same way, with the restriction that the chunks do not contain fewer than k 891 // iterations (except for the last chunk to be assigned, which may have fewer 892 // than k iterations). 893 // 894 // When schedule(auto) is specified, the decision regarding scheduling is 895 // delegated to the compiler and/or runtime system. The programmer gives the 896 // implementation the freedom to choose any possible mapping of iterations to 897 // threads in the team. 898 // 899 // When schedule(runtime) is specified, the decision regarding scheduling is 900 // deferred until run time, and the schedule and chunk size are taken from the 901 // run-sched-var ICV. If the ICV is set to auto, the schedule is 902 // implementation defined 903 // 904 // while(__kmpc_dispatch_next(&LB, &UB)) { 905 // idx = LB; 906 // while (idx <= UB) { BODY; ++idx; 907 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 908 // } // inner loop 909 // } 910 // 911 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 912 // When schedule(static, chunk_size) is specified, iterations are divided into 913 // chunks of size chunk_size, and the chunks are assigned to the threads in 914 // the team in a round-robin fashion in the order of the thread number. 915 // 916 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 917 // while (idx <= UB) { BODY; ++idx; } // inner loop 918 // LB = LB + ST; 919 // UB = UB + ST; 920 // } 921 // 922 923 const Expr *IVExpr = S.getIterationVariable(); 924 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 925 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 926 927 RT.emitForInit( 928 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB, 929 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal() 930 : UB), 931 ST, Chunk); 932 933 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 934 935 // Start the loop with a block that tests the condition. 936 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 937 EmitBlock(CondBlock); 938 LoopStack.push(CondBlock); 939 940 llvm::Value *BoolCondVal = nullptr; 941 if (!DynamicOrOrdered) { 942 // UB = min(UB, GlobalUB) 943 EmitIgnoredExpr(S.getEnsureUpperBound()); 944 // IV = LB 945 EmitIgnoredExpr(S.getInit()); 946 // IV < UB 947 BoolCondVal = EvaluateExprAsBool(S.getCond()); 948 } else { 949 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, 950 IL, LB, UB, ST); 951 } 952 953 // If there are any cleanups between here and the loop-exit scope, 954 // create a block to stage a loop exit along. 955 auto ExitBlock = LoopExit.getBlock(); 956 if (LoopScope.requiresCleanups()) 957 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 958 959 auto LoopBody = createBasicBlock("omp.dispatch.body"); 960 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 961 if (ExitBlock != LoopExit.getBlock()) { 962 EmitBlock(ExitBlock); 963 EmitBranchThroughCleanup(LoopExit); 964 } 965 EmitBlock(LoopBody); 966 967 // Emit "IV = LB" (in case of static schedule, we have already calculated new 968 // LB for loop condition and emitted it above). 969 if (DynamicOrOrdered) 970 EmitIgnoredExpr(S.getInit()); 971 972 // Create a block for the increment. 973 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 974 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 975 976 // Generate !llvm.loop.parallel metadata for loads and stores for loops 977 // with dynamic/guided scheduling and without ordered clause. 978 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 979 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic || 980 ScheduleKind == OMPC_SCHEDULE_guided) && 981 !Ordered); 982 } else { 983 EmitOMPSimdInit(S); 984 } 985 986 SourceLocation Loc = S.getLocStart(); 987 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 988 [&S, LoopExit](CodeGenFunction &CGF) { 989 CGF.EmitOMPLoopBody(S, LoopExit); 990 CGF.EmitStopPoint(&S); 991 }, 992 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { 993 if (Ordered) { 994 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( 995 CGF, Loc, IVSize, IVSigned); 996 } 997 }); 998 999 EmitBlock(Continue.getBlock()); 1000 BreakContinueStack.pop_back(); 1001 if (!DynamicOrOrdered) { 1002 // Emit "LB = LB + Stride", "UB = UB + Stride". 1003 EmitIgnoredExpr(S.getNextLowerBound()); 1004 EmitIgnoredExpr(S.getNextUpperBound()); 1005 } 1006 1007 EmitBranch(CondBlock); 1008 LoopStack.pop(); 1009 // Emit the fall-through block. 1010 EmitBlock(LoopExit.getBlock()); 1011 1012 // Tell the runtime we are done. 1013 if (!DynamicOrOrdered) 1014 RT.emitForStaticFinish(*this, S.getLocEnd()); 1015 } 1016 1017 /// \brief Emit a helper variable and return corresponding lvalue. 1018 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1019 const DeclRefExpr *Helper) { 1020 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1021 CGF.EmitVarDecl(*VDecl); 1022 return CGF.EmitLValue(Helper); 1023 } 1024 1025 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind> 1026 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, 1027 bool OuterRegion) { 1028 // Detect the loop schedule kind and chunk. 1029 auto ScheduleKind = OMPC_SCHEDULE_unknown; 1030 llvm::Value *Chunk = nullptr; 1031 if (auto *C = 1032 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) { 1033 ScheduleKind = C->getScheduleKind(); 1034 if (const auto *Ch = C->getChunkSize()) { 1035 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { 1036 if (OuterRegion) { 1037 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); 1038 CGF.EmitVarDecl(*ImpVar); 1039 CGF.EmitStoreThroughLValue( 1040 CGF.EmitAnyExpr(Ch), 1041 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), 1042 ImpVar->getType())); 1043 } else { 1044 Ch = ImpRef; 1045 } 1046 } 1047 if (!C->getHelperChunkSize() || !OuterRegion) { 1048 Chunk = CGF.EmitScalarExpr(Ch); 1049 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), 1050 S.getIterationVariable()->getType(), 1051 S.getLocStart()); 1052 } 1053 } 1054 } 1055 return std::make_pair(Chunk, ScheduleKind); 1056 } 1057 1058 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { 1059 // Emit the loop iteration variable. 1060 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 1061 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 1062 EmitVarDecl(*IVDecl); 1063 1064 // Emit the iterations count variable. 1065 // If it is not a variable, Sema decided to calculate iterations count on each 1066 // iteration (e.g., it is foldable into a constant). 1067 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1068 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1069 // Emit calculation of the iterations count. 1070 EmitIgnoredExpr(S.getCalcLastIteration()); 1071 } 1072 1073 auto &RT = CGM.getOpenMPRuntime(); 1074 1075 bool HasLastprivateClause; 1076 // Check pre-condition. 1077 { 1078 // Skip the entire loop if we don't meet the precondition. 1079 // If the condition constant folds and can be elided, avoid emitting the 1080 // whole loop. 1081 bool CondConstant; 1082 llvm::BasicBlock *ContBlock = nullptr; 1083 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1084 if (!CondConstant) 1085 return false; 1086 } else { 1087 auto *ThenBlock = createBasicBlock("omp.precond.then"); 1088 ContBlock = createBasicBlock("omp.precond.end"); 1089 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 1090 getProfileCount(&S)); 1091 EmitBlock(ThenBlock); 1092 incrementProfileCounter(&S); 1093 } 1094 1095 emitAlignedClause(*this, S); 1096 EmitOMPLinearClauseInit(S); 1097 // Emit 'then' code. 1098 { 1099 // Emit helper vars inits. 1100 LValue LB = 1101 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable())); 1102 LValue UB = 1103 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable())); 1104 LValue ST = 1105 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 1106 LValue IL = 1107 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 1108 1109 OMPPrivateScope LoopScope(*this); 1110 if (EmitOMPFirstprivateClause(S, LoopScope)) { 1111 // Emit implicit barrier to synchronize threads and avoid data races on 1112 // initialization of firstprivate variables. 1113 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1114 OMPD_unknown); 1115 } 1116 EmitOMPPrivateClause(S, LoopScope); 1117 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 1118 EmitOMPReductionClauseInit(S, LoopScope); 1119 emitPrivateLoopCounters(*this, LoopScope, S.counters(), 1120 S.private_counters()); 1121 emitPrivateLinearVars(*this, S, LoopScope); 1122 (void)LoopScope.Privatize(); 1123 1124 // Detect the loop schedule kind and chunk. 1125 llvm::Value *Chunk; 1126 OpenMPScheduleClauseKind ScheduleKind; 1127 auto ScheduleInfo = 1128 emitScheduleClause(*this, S, /*OuterRegion=*/false); 1129 Chunk = ScheduleInfo.first; 1130 ScheduleKind = ScheduleInfo.second; 1131 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1132 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1133 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr; 1134 if (RT.isStaticNonchunked(ScheduleKind, 1135 /* Chunked */ Chunk != nullptr) && 1136 !Ordered) { 1137 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1138 EmitOMPSimdInit(S); 1139 } 1140 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1141 // When no chunk_size is specified, the iteration space is divided into 1142 // chunks that are approximately equal in size, and at most one chunk is 1143 // distributed to each thread. Note that the size of the chunks is 1144 // unspecified in this case. 1145 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, 1146 Ordered, IL.getAddress(), LB.getAddress(), 1147 UB.getAddress(), ST.getAddress()); 1148 auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 1149 // UB = min(UB, GlobalUB); 1150 EmitIgnoredExpr(S.getEnsureUpperBound()); 1151 // IV = LB; 1152 EmitIgnoredExpr(S.getInit()); 1153 // while (idx <= UB) { BODY; ++idx; } 1154 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1155 S.getInc(), 1156 [&S, LoopExit](CodeGenFunction &CGF) { 1157 CGF.EmitOMPLoopBody(S, LoopExit); 1158 CGF.EmitStopPoint(&S); 1159 }, 1160 [](CodeGenFunction &) {}); 1161 EmitBlock(LoopExit.getBlock()); 1162 // Tell the runtime we are done. 1163 RT.emitForStaticFinish(*this, S.getLocStart()); 1164 } else { 1165 // Emit the outer loop, which requests its work chunk [LB..UB] from 1166 // runtime and runs the inner loop to process it. 1167 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered, 1168 LB.getAddress(), UB.getAddress(), ST.getAddress(), 1169 IL.getAddress(), Chunk); 1170 } 1171 EmitOMPReductionClauseFinal(S); 1172 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1173 if (HasLastprivateClause) 1174 EmitOMPLastprivateClauseFinal( 1175 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 1176 } 1177 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1178 EmitOMPSimdFinal(S); 1179 } 1180 // We're now done with the loop, so jump to the continuation block. 1181 if (ContBlock) { 1182 EmitBranch(ContBlock); 1183 EmitBlock(ContBlock, true); 1184 } 1185 } 1186 return HasLastprivateClause; 1187 } 1188 1189 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 1190 LexicalScope Scope(*this, S.getSourceRange()); 1191 bool HasLastprivates = false; 1192 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1193 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1194 }; 1195 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen); 1196 1197 // Emit an implicit barrier at the end. 1198 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { 1199 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1200 } 1201 } 1202 1203 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 1204 LexicalScope Scope(*this, S.getSourceRange()); 1205 bool HasLastprivates = false; 1206 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1207 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1208 }; 1209 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1210 1211 // Emit an implicit barrier at the end. 1212 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { 1213 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1214 } 1215 } 1216 1217 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 1218 const Twine &Name, 1219 llvm::Value *Init = nullptr) { 1220 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 1221 if (Init) 1222 CGF.EmitScalarInit(Init, LVal); 1223 return LVal; 1224 } 1225 1226 OpenMPDirectiveKind 1227 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 1228 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 1229 auto *CS = dyn_cast<CompoundStmt>(Stmt); 1230 if (CS && CS->size() > 1) { 1231 bool HasLastprivates = false; 1232 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { 1233 auto &C = CGF.CGM.getContext(); 1234 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 1235 // Emit helper vars inits. 1236 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 1237 CGF.Builder.getInt32(0)); 1238 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); 1239 LValue UB = 1240 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 1241 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 1242 CGF.Builder.getInt32(1)); 1243 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 1244 CGF.Builder.getInt32(0)); 1245 // Loop counter. 1246 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 1247 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1248 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 1249 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1250 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 1251 // Generate condition for loop. 1252 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 1253 OK_Ordinary, S.getLocStart(), 1254 /*fpContractable=*/false); 1255 // Increment for loop counter. 1256 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, 1257 OK_Ordinary, S.getLocStart()); 1258 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { 1259 // Iterate through all sections and emit a switch construct: 1260 // switch (IV) { 1261 // case 0: 1262 // <SectionStmt[0]>; 1263 // break; 1264 // ... 1265 // case <NumSection> - 1: 1266 // <SectionStmt[<NumSection> - 1]>; 1267 // break; 1268 // } 1269 // .omp.sections.exit: 1270 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 1271 auto *SwitchStmt = CGF.Builder.CreateSwitch( 1272 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 1273 CS->size()); 1274 unsigned CaseNumber = 0; 1275 for (auto *SubStmt : CS->children()) { 1276 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 1277 CGF.EmitBlock(CaseBB); 1278 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 1279 CGF.EmitStmt(SubStmt); 1280 CGF.EmitBranch(ExitBB); 1281 ++CaseNumber; 1282 } 1283 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1284 }; 1285 1286 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 1287 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 1288 // Emit implicit barrier to synchronize threads and avoid data races on 1289 // initialization of firstprivate variables. 1290 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1291 OMPD_unknown); 1292 } 1293 CGF.EmitOMPPrivateClause(S, LoopScope); 1294 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1295 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1296 (void)LoopScope.Privatize(); 1297 1298 // Emit static non-chunked loop. 1299 CGF.CGM.getOpenMPRuntime().emitForInit( 1300 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, 1301 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 1302 LB.getAddress(), UB.getAddress(), ST.getAddress()); 1303 // UB = min(UB, GlobalUB); 1304 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 1305 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 1306 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 1307 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 1308 // IV = LB; 1309 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 1310 // while (idx <= UB) { BODY; ++idx; } 1311 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 1312 [](CodeGenFunction &) {}); 1313 // Tell the runtime we are done. 1314 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); 1315 CGF.EmitOMPReductionClauseFinal(S); 1316 1317 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1318 if (HasLastprivates) 1319 CGF.EmitOMPLastprivateClauseFinal( 1320 S, CGF.Builder.CreateIsNotNull( 1321 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 1322 }; 1323 1324 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen); 1325 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 1326 // clause. Otherwise the barrier will be generated by the codegen for the 1327 // directive. 1328 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) { 1329 // Emit implicit barrier to synchronize threads and avoid data races on 1330 // initialization of firstprivate variables. 1331 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1332 OMPD_unknown); 1333 } 1334 return OMPD_sections; 1335 } 1336 // If only one section is found - no need to generate loop, emit as a single 1337 // region. 1338 bool HasFirstprivates; 1339 // No need to generate reductions for sections with single section region, we 1340 // can use original shared variables for all operations. 1341 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty(); 1342 // No need to generate lastprivates for sections with single section region, 1343 // we can use original shared variable for all calculations with barrier at 1344 // the end of the sections. 1345 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty(); 1346 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { 1347 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1348 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1349 CGF.EmitOMPPrivateClause(S, SingleScope); 1350 (void)SingleScope.Privatize(); 1351 1352 CGF.EmitStmt(Stmt); 1353 }; 1354 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1355 llvm::None, llvm::None, llvm::None, 1356 llvm::None); 1357 // Emit barrier for firstprivates, lastprivates or reductions only if 1358 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be 1359 // generated by the codegen for the directive. 1360 if ((HasFirstprivates || HasLastprivates || HasReductions) && 1361 S.getSingleClause(OMPC_nowait)) { 1362 // Emit implicit barrier to synchronize threads and avoid data races on 1363 // initialization of firstprivate variables. 1364 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown); 1365 } 1366 return OMPD_single; 1367 } 1368 1369 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 1370 LexicalScope Scope(*this, S.getSourceRange()); 1371 OpenMPDirectiveKind EmittedAs = EmitSections(S); 1372 // Emit an implicit barrier at the end. 1373 if (!S.getSingleClause(OMPC_nowait)) { 1374 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); 1375 } 1376 } 1377 1378 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 1379 LexicalScope Scope(*this, S.getSourceRange()); 1380 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1381 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1382 CGF.EnsureInsertPoint(); 1383 }; 1384 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen); 1385 } 1386 1387 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 1388 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 1389 llvm::SmallVector<const Expr *, 8> DestExprs; 1390 llvm::SmallVector<const Expr *, 8> SrcExprs; 1391 llvm::SmallVector<const Expr *, 8> AssignmentOps; 1392 // Check if there are any 'copyprivate' clauses associated with this 1393 // 'single' 1394 // construct. 1395 // Build a list of copyprivate variables along with helper expressions 1396 // (<source>, <destination>, <destination>=<source> expressions) 1397 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) { 1398 auto *C = cast<OMPCopyprivateClause>(*I); 1399 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 1400 DestExprs.append(C->destination_exprs().begin(), 1401 C->destination_exprs().end()); 1402 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 1403 AssignmentOps.append(C->assignment_ops().begin(), 1404 C->assignment_ops().end()); 1405 } 1406 LexicalScope Scope(*this, S.getSourceRange()); 1407 // Emit code for 'single' region along with 'copyprivate' clauses 1408 bool HasFirstprivates; 1409 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { 1410 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1411 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1412 CGF.EmitOMPPrivateClause(S, SingleScope); 1413 (void)SingleScope.Privatize(); 1414 1415 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1416 CGF.EnsureInsertPoint(); 1417 }; 1418 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1419 CopyprivateVars, DestExprs, SrcExprs, 1420 AssignmentOps); 1421 // Emit an implicit barrier at the end (to avoid data race on firstprivate 1422 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 1423 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) && 1424 CopyprivateVars.empty()) { 1425 CGM.getOpenMPRuntime().emitBarrierCall( 1426 *this, S.getLocStart(), 1427 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single); 1428 } 1429 } 1430 1431 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 1432 LexicalScope Scope(*this, S.getSourceRange()); 1433 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1434 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1435 CGF.EnsureInsertPoint(); 1436 }; 1437 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 1438 } 1439 1440 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 1441 LexicalScope Scope(*this, S.getSourceRange()); 1442 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1443 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1444 CGF.EnsureInsertPoint(); 1445 }; 1446 CGM.getOpenMPRuntime().emitCriticalRegion( 1447 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart()); 1448 } 1449 1450 void CodeGenFunction::EmitOMPParallelForDirective( 1451 const OMPParallelForDirective &S) { 1452 // Emit directive as a combined directive that consists of two implicit 1453 // directives: 'parallel' with 'for' directive. 1454 LexicalScope Scope(*this, S.getSourceRange()); 1455 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1456 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1457 CGF.EmitOMPWorksharingLoop(S); 1458 // Emit implicit barrier at the end of parallel region, but this barrier 1459 // is at the end of 'for' directive, so emit it as the implicit barrier for 1460 // this 'for' directive. 1461 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1462 OMPD_parallel); 1463 }; 1464 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen); 1465 } 1466 1467 void CodeGenFunction::EmitOMPParallelForSimdDirective( 1468 const OMPParallelForSimdDirective &S) { 1469 // Emit directive as a combined directive that consists of two implicit 1470 // directives: 'parallel' with 'for' directive. 1471 LexicalScope Scope(*this, S.getSourceRange()); 1472 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1473 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1474 CGF.EmitOMPWorksharingLoop(S); 1475 // Emit implicit barrier at the end of parallel region, but this barrier 1476 // is at the end of 'for' directive, so emit it as the implicit barrier for 1477 // this 'for' directive. 1478 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1479 OMPD_parallel); 1480 }; 1481 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen); 1482 } 1483 1484 void CodeGenFunction::EmitOMPParallelSectionsDirective( 1485 const OMPParallelSectionsDirective &S) { 1486 // Emit directive as a combined directive that consists of two implicit 1487 // directives: 'parallel' with 'sections' directive. 1488 LexicalScope Scope(*this, S.getSourceRange()); 1489 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1490 (void)CGF.EmitSections(S); 1491 // Emit implicit barrier at the end of parallel region. 1492 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1493 OMPD_parallel); 1494 }; 1495 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen); 1496 } 1497 1498 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 1499 // Emit outlined function for task construct. 1500 LexicalScope Scope(*this, S.getSourceRange()); 1501 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1502 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 1503 auto *I = CS->getCapturedDecl()->param_begin(); 1504 auto *PartId = std::next(I); 1505 // The first function argument for tasks is a thread id, the second one is a 1506 // part id (0 for tied tasks, >=0 for untied task). 1507 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 1508 // Get list of private variables. 1509 llvm::SmallVector<const Expr *, 8> PrivateVars; 1510 llvm::SmallVector<const Expr *, 8> PrivateCopies; 1511 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) { 1512 auto *C = cast<OMPPrivateClause>(*I); 1513 auto IRef = C->varlist_begin(); 1514 for (auto *IInit : C->private_copies()) { 1515 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1516 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1517 PrivateVars.push_back(*IRef); 1518 PrivateCopies.push_back(IInit); 1519 } 1520 ++IRef; 1521 } 1522 } 1523 EmittedAsPrivate.clear(); 1524 // Get list of firstprivate variables. 1525 llvm::SmallVector<const Expr *, 8> FirstprivateVars; 1526 llvm::SmallVector<const Expr *, 8> FirstprivateCopies; 1527 llvm::SmallVector<const Expr *, 8> FirstprivateInits; 1528 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) { 1529 auto *C = cast<OMPFirstprivateClause>(*I); 1530 auto IRef = C->varlist_begin(); 1531 auto IElemInitRef = C->inits().begin(); 1532 for (auto *IInit : C->private_copies()) { 1533 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1534 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1535 FirstprivateVars.push_back(*IRef); 1536 FirstprivateCopies.push_back(IInit); 1537 FirstprivateInits.push_back(*IElemInitRef); 1538 } 1539 ++IRef, ++IElemInitRef; 1540 } 1541 } 1542 // Build list of dependences. 1543 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8> 1544 Dependences; 1545 for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) { 1546 auto *C = cast<OMPDependClause>(*I); 1547 for (auto *IRef : C->varlists()) { 1548 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef)); 1549 } 1550 } 1551 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( 1552 CodeGenFunction &CGF) { 1553 // Set proper addresses for generated private copies. 1554 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1555 OMPPrivateScope Scope(CGF); 1556 if (!PrivateVars.empty() || !FirstprivateVars.empty()) { 1557 auto *CopyFn = CGF.Builder.CreateAlignedLoad( 1558 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)), 1559 CGF.PointerAlignInBytes); 1560 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad( 1561 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)), 1562 CGF.PointerAlignInBytes); 1563 // Map privates. 1564 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16> 1565 PrivatePtrs; 1566 llvm::SmallVector<llvm::Value *, 16> CallArgs; 1567 CallArgs.push_back(PrivatesPtr); 1568 for (auto *E : PrivateVars) { 1569 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1570 auto *PrivatePtr = 1571 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1572 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1573 CallArgs.push_back(PrivatePtr); 1574 } 1575 for (auto *E : FirstprivateVars) { 1576 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1577 auto *PrivatePtr = 1578 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1579 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1580 CallArgs.push_back(PrivatePtr); 1581 } 1582 CGF.EmitRuntimeCall(CopyFn, CallArgs); 1583 for (auto &&Pair : PrivatePtrs) { 1584 auto *Replacement = 1585 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes); 1586 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 1587 } 1588 } 1589 (void)Scope.Privatize(); 1590 if (*PartId) { 1591 // TODO: emit code for untied tasks. 1592 } 1593 CGF.EmitStmt(CS->getCapturedStmt()); 1594 }; 1595 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 1596 S, *I, OMPD_task, CodeGen); 1597 // Check if we should emit tied or untied task. 1598 bool Tied = !S.getSingleClause(OMPC_untied); 1599 // Check if the task is final 1600 llvm::PointerIntPair<llvm::Value *, 1, bool> Final; 1601 if (auto *Clause = S.getSingleClause(OMPC_final)) { 1602 // If the condition constant folds and can be elided, try to avoid emitting 1603 // the condition and the dead arm of the if/else. 1604 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition(); 1605 bool CondConstant; 1606 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 1607 Final.setInt(CondConstant); 1608 else 1609 Final.setPointer(EvaluateExprAsBool(Cond)); 1610 } else { 1611 // By default the task is not final. 1612 Final.setInt(/*IntVal=*/false); 1613 } 1614 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 1615 const Expr *IfCond = nullptr; 1616 if (auto C = S.getSingleClause(OMPC_if)) { 1617 IfCond = cast<OMPIfClause>(C)->getCondition(); 1618 } 1619 CGM.getOpenMPRuntime().emitTaskCall( 1620 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, 1621 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, 1622 FirstprivateCopies, FirstprivateInits, Dependences); 1623 } 1624 1625 void CodeGenFunction::EmitOMPTaskyieldDirective( 1626 const OMPTaskyieldDirective &S) { 1627 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 1628 } 1629 1630 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 1631 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 1632 } 1633 1634 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 1635 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 1636 } 1637 1638 void CodeGenFunction::EmitOMPTaskgroupDirective( 1639 const OMPTaskgroupDirective &S) { 1640 LexicalScope Scope(*this, S.getSourceRange()); 1641 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1642 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1643 CGF.EnsureInsertPoint(); 1644 }; 1645 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart()); 1646 } 1647 1648 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 1649 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 1650 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) { 1651 auto FlushClause = cast<OMPFlushClause>(C); 1652 return llvm::makeArrayRef(FlushClause->varlist_begin(), 1653 FlushClause->varlist_end()); 1654 } 1655 return llvm::None; 1656 }(), S.getLocStart()); 1657 } 1658 1659 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 1660 LexicalScope Scope(*this, S.getSourceRange()); 1661 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1662 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1663 CGF.EnsureInsertPoint(); 1664 }; 1665 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart()); 1666 } 1667 1668 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 1669 QualType SrcType, QualType DestType, 1670 SourceLocation Loc) { 1671 assert(CGF.hasScalarEvaluationKind(DestType) && 1672 "DestType must have scalar evaluation kind."); 1673 assert(!Val.isAggregate() && "Must be a scalar or complex."); 1674 return Val.isScalar() 1675 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType, 1676 Loc) 1677 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 1678 DestType, Loc); 1679 } 1680 1681 static CodeGenFunction::ComplexPairTy 1682 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 1683 QualType DestType, SourceLocation Loc) { 1684 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 1685 "DestType must have complex evaluation kind."); 1686 CodeGenFunction::ComplexPairTy ComplexVal; 1687 if (Val.isScalar()) { 1688 // Convert the input element to the element type of the complex. 1689 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1690 auto ScalarVal = CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, 1691 DestElementType, Loc); 1692 ComplexVal = CodeGenFunction::ComplexPairTy( 1693 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 1694 } else { 1695 assert(Val.isComplex() && "Must be a scalar or complex."); 1696 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 1697 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1698 ComplexVal.first = CGF.EmitScalarConversion( 1699 Val.getComplexVal().first, SrcElementType, DestElementType, Loc); 1700 ComplexVal.second = CGF.EmitScalarConversion( 1701 Val.getComplexVal().second, SrcElementType, DestElementType, Loc); 1702 } 1703 return ComplexVal; 1704 } 1705 1706 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 1707 LValue LVal, RValue RVal) { 1708 if (LVal.isGlobalReg()) { 1709 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 1710 } else { 1711 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent 1712 : llvm::Monotonic, 1713 LVal.isVolatile(), /*IsInit=*/false); 1714 } 1715 } 1716 1717 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, 1718 QualType RValTy, SourceLocation Loc) { 1719 switch (CGF.getEvaluationKind(LVal.getType())) { 1720 case TEK_Scalar: 1721 CGF.EmitStoreThroughLValue(RValue::get(convertToScalarValue( 1722 CGF, RVal, RValTy, LVal.getType(), Loc)), 1723 LVal); 1724 break; 1725 case TEK_Complex: 1726 CGF.EmitStoreOfComplex( 1727 convertToComplexValue(CGF, RVal, RValTy, LVal.getType(), Loc), LVal, 1728 /*isInit=*/false); 1729 break; 1730 case TEK_Aggregate: 1731 llvm_unreachable("Must be a scalar or complex."); 1732 } 1733 } 1734 1735 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 1736 const Expr *X, const Expr *V, 1737 SourceLocation Loc) { 1738 // v = x; 1739 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 1740 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 1741 LValue XLValue = CGF.EmitLValue(X); 1742 LValue VLValue = CGF.EmitLValue(V); 1743 RValue Res = XLValue.isGlobalReg() 1744 ? CGF.EmitLoadOfLValue(XLValue, Loc) 1745 : CGF.EmitAtomicLoad(XLValue, Loc, 1746 IsSeqCst ? llvm::SequentiallyConsistent 1747 : llvm::Monotonic, 1748 XLValue.isVolatile()); 1749 // OpenMP, 2.12.6, atomic Construct 1750 // Any atomic construct with a seq_cst clause forces the atomically 1751 // performed operation to include an implicit flush operation without a 1752 // list. 1753 if (IsSeqCst) 1754 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1755 emitSimpleStore(CGF, VLValue, Res, X->getType().getNonReferenceType(), Loc); 1756 } 1757 1758 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 1759 const Expr *X, const Expr *E, 1760 SourceLocation Loc) { 1761 // x = expr; 1762 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 1763 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 1764 // OpenMP, 2.12.6, atomic Construct 1765 // Any atomic construct with a seq_cst clause forces the atomically 1766 // performed operation to include an implicit flush operation without a 1767 // list. 1768 if (IsSeqCst) 1769 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1770 } 1771 1772 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 1773 RValue Update, 1774 BinaryOperatorKind BO, 1775 llvm::AtomicOrdering AO, 1776 bool IsXLHSInRHSPart) { 1777 auto &Context = CGF.CGM.getContext(); 1778 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 1779 // expression is simple and atomic is allowed for the given type for the 1780 // target platform. 1781 if (BO == BO_Comma || !Update.isScalar() || 1782 !Update.getScalarVal()->getType()->isIntegerTy() || 1783 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 1784 (Update.getScalarVal()->getType() != 1785 X.getAddress()->getType()->getPointerElementType())) || 1786 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() || 1787 !Context.getTargetInfo().hasBuiltinAtomic( 1788 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 1789 return std::make_pair(false, RValue::get(nullptr)); 1790 1791 llvm::AtomicRMWInst::BinOp RMWOp; 1792 switch (BO) { 1793 case BO_Add: 1794 RMWOp = llvm::AtomicRMWInst::Add; 1795 break; 1796 case BO_Sub: 1797 if (!IsXLHSInRHSPart) 1798 return std::make_pair(false, RValue::get(nullptr)); 1799 RMWOp = llvm::AtomicRMWInst::Sub; 1800 break; 1801 case BO_And: 1802 RMWOp = llvm::AtomicRMWInst::And; 1803 break; 1804 case BO_Or: 1805 RMWOp = llvm::AtomicRMWInst::Or; 1806 break; 1807 case BO_Xor: 1808 RMWOp = llvm::AtomicRMWInst::Xor; 1809 break; 1810 case BO_LT: 1811 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1812 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 1813 : llvm::AtomicRMWInst::Max) 1814 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 1815 : llvm::AtomicRMWInst::UMax); 1816 break; 1817 case BO_GT: 1818 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1819 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 1820 : llvm::AtomicRMWInst::Min) 1821 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 1822 : llvm::AtomicRMWInst::UMin); 1823 break; 1824 case BO_Assign: 1825 RMWOp = llvm::AtomicRMWInst::Xchg; 1826 break; 1827 case BO_Mul: 1828 case BO_Div: 1829 case BO_Rem: 1830 case BO_Shl: 1831 case BO_Shr: 1832 case BO_LAnd: 1833 case BO_LOr: 1834 return std::make_pair(false, RValue::get(nullptr)); 1835 case BO_PtrMemD: 1836 case BO_PtrMemI: 1837 case BO_LE: 1838 case BO_GE: 1839 case BO_EQ: 1840 case BO_NE: 1841 case BO_AddAssign: 1842 case BO_SubAssign: 1843 case BO_AndAssign: 1844 case BO_OrAssign: 1845 case BO_XorAssign: 1846 case BO_MulAssign: 1847 case BO_DivAssign: 1848 case BO_RemAssign: 1849 case BO_ShlAssign: 1850 case BO_ShrAssign: 1851 case BO_Comma: 1852 llvm_unreachable("Unsupported atomic update operation"); 1853 } 1854 auto *UpdateVal = Update.getScalarVal(); 1855 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 1856 UpdateVal = CGF.Builder.CreateIntCast( 1857 IC, X.getAddress()->getType()->getPointerElementType(), 1858 X.getType()->hasSignedIntegerRepresentation()); 1859 } 1860 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO); 1861 return std::make_pair(true, RValue::get(Res)); 1862 } 1863 1864 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 1865 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 1866 llvm::AtomicOrdering AO, SourceLocation Loc, 1867 const llvm::function_ref<RValue(RValue)> &CommonGen) { 1868 // Update expressions are allowed to have the following forms: 1869 // x binop= expr; -> xrval + expr; 1870 // x++, ++x -> xrval + 1; 1871 // x--, --x -> xrval - 1; 1872 // x = x binop expr; -> xrval binop expr 1873 // x = expr Op x; - > expr binop xrval; 1874 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 1875 if (!Res.first) { 1876 if (X.isGlobalReg()) { 1877 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 1878 // 'xrval'. 1879 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 1880 } else { 1881 // Perform compare-and-swap procedure. 1882 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 1883 } 1884 } 1885 return Res; 1886 } 1887 1888 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 1889 const Expr *X, const Expr *E, 1890 const Expr *UE, bool IsXLHSInRHSPart, 1891 SourceLocation Loc) { 1892 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1893 "Update expr in 'atomic update' must be a binary operator."); 1894 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1895 // Update expressions are allowed to have the following forms: 1896 // x binop= expr; -> xrval + expr; 1897 // x++, ++x -> xrval + 1; 1898 // x--, --x -> xrval - 1; 1899 // x = x binop expr; -> xrval binop expr 1900 // x = expr Op x; - > expr binop xrval; 1901 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 1902 LValue XLValue = CGF.EmitLValue(X); 1903 RValue ExprRValue = CGF.EmitAnyExpr(E); 1904 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1905 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1906 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1907 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1908 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1909 auto Gen = 1910 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 1911 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1912 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1913 return CGF.EmitAnyExpr(UE); 1914 }; 1915 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 1916 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1917 // OpenMP, 2.12.6, atomic Construct 1918 // Any atomic construct with a seq_cst clause forces the atomically 1919 // performed operation to include an implicit flush operation without a 1920 // list. 1921 if (IsSeqCst) 1922 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1923 } 1924 1925 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 1926 QualType SourceType, QualType ResType, 1927 SourceLocation Loc) { 1928 switch (CGF.getEvaluationKind(ResType)) { 1929 case TEK_Scalar: 1930 return RValue::get( 1931 convertToScalarValue(CGF, Value, SourceType, ResType, Loc)); 1932 case TEK_Complex: { 1933 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc); 1934 return RValue::getComplex(Res.first, Res.second); 1935 } 1936 case TEK_Aggregate: 1937 break; 1938 } 1939 llvm_unreachable("Must be a scalar or complex."); 1940 } 1941 1942 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 1943 bool IsPostfixUpdate, const Expr *V, 1944 const Expr *X, const Expr *E, 1945 const Expr *UE, bool IsXLHSInRHSPart, 1946 SourceLocation Loc) { 1947 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 1948 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 1949 RValue NewVVal; 1950 LValue VLValue = CGF.EmitLValue(V); 1951 LValue XLValue = CGF.EmitLValue(X); 1952 RValue ExprRValue = CGF.EmitAnyExpr(E); 1953 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1954 QualType NewVValType; 1955 if (UE) { 1956 // 'x' is updated with some additional value. 1957 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1958 "Update expr in 'atomic capture' must be a binary operator."); 1959 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1960 // Update expressions are allowed to have the following forms: 1961 // x binop= expr; -> xrval + expr; 1962 // x++, ++x -> xrval + 1; 1963 // x--, --x -> xrval - 1; 1964 // x = x binop expr; -> xrval binop expr 1965 // x = expr Op x; - > expr binop xrval; 1966 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1967 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1968 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1969 NewVValType = XRValExpr->getType(); 1970 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1971 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 1972 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { 1973 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1974 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1975 RValue Res = CGF.EmitAnyExpr(UE); 1976 NewVVal = IsPostfixUpdate ? XRValue : Res; 1977 return Res; 1978 }; 1979 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 1980 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1981 if (Res.first) { 1982 // 'atomicrmw' instruction was generated. 1983 if (IsPostfixUpdate) { 1984 // Use old value from 'atomicrmw'. 1985 NewVVal = Res.second; 1986 } else { 1987 // 'atomicrmw' does not provide new value, so evaluate it using old 1988 // value of 'x'. 1989 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1990 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 1991 NewVVal = CGF.EmitAnyExpr(UE); 1992 } 1993 } 1994 } else { 1995 // 'x' is simply rewritten with some 'expr'. 1996 NewVValType = X->getType().getNonReferenceType(); 1997 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 1998 X->getType().getNonReferenceType(), Loc); 1999 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { 2000 NewVVal = XRValue; 2001 return ExprRValue; 2002 }; 2003 // Try to perform atomicrmw xchg, otherwise simple exchange. 2004 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 2005 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 2006 Loc, Gen); 2007 if (Res.first) { 2008 // 'atomicrmw' instruction was generated. 2009 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 2010 } 2011 } 2012 // Emit post-update store to 'v' of old/new 'x' value. 2013 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType, Loc); 2014 // OpenMP, 2.12.6, atomic Construct 2015 // Any atomic construct with a seq_cst clause forces the atomically 2016 // performed operation to include an implicit flush operation without a 2017 // list. 2018 if (IsSeqCst) 2019 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2020 } 2021 2022 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 2023 bool IsSeqCst, bool IsPostfixUpdate, 2024 const Expr *X, const Expr *V, const Expr *E, 2025 const Expr *UE, bool IsXLHSInRHSPart, 2026 SourceLocation Loc) { 2027 switch (Kind) { 2028 case OMPC_read: 2029 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 2030 break; 2031 case OMPC_write: 2032 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 2033 break; 2034 case OMPC_unknown: 2035 case OMPC_update: 2036 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 2037 break; 2038 case OMPC_capture: 2039 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 2040 IsXLHSInRHSPart, Loc); 2041 break; 2042 case OMPC_if: 2043 case OMPC_final: 2044 case OMPC_num_threads: 2045 case OMPC_private: 2046 case OMPC_firstprivate: 2047 case OMPC_lastprivate: 2048 case OMPC_reduction: 2049 case OMPC_safelen: 2050 case OMPC_collapse: 2051 case OMPC_default: 2052 case OMPC_seq_cst: 2053 case OMPC_shared: 2054 case OMPC_linear: 2055 case OMPC_aligned: 2056 case OMPC_copyin: 2057 case OMPC_copyprivate: 2058 case OMPC_flush: 2059 case OMPC_proc_bind: 2060 case OMPC_schedule: 2061 case OMPC_ordered: 2062 case OMPC_nowait: 2063 case OMPC_untied: 2064 case OMPC_threadprivate: 2065 case OMPC_depend: 2066 case OMPC_mergeable: 2067 case OMPC_device: 2068 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 2069 } 2070 } 2071 2072 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 2073 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst); 2074 OpenMPClauseKind Kind = OMPC_unknown; 2075 for (auto *C : S.clauses()) { 2076 // Find first clause (skip seq_cst clause, if it is first). 2077 if (C->getClauseKind() != OMPC_seq_cst) { 2078 Kind = C->getClauseKind(); 2079 break; 2080 } 2081 } 2082 2083 const auto *CS = 2084 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 2085 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 2086 enterFullExpression(EWC); 2087 } 2088 // Processing for statements under 'atomic capture'. 2089 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 2090 for (const auto *C : Compound->body()) { 2091 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 2092 enterFullExpression(EWC); 2093 } 2094 } 2095 } 2096 2097 LexicalScope Scope(*this, S.getSourceRange()); 2098 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) { 2099 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 2100 S.getV(), S.getExpr(), S.getUpdateExpr(), 2101 S.isXLHSInRHSPart(), S.getLocStart()); 2102 }; 2103 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 2104 } 2105 2106 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { 2107 llvm_unreachable("CodeGen for 'omp target' is not supported yet."); 2108 } 2109 2110 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { 2111 llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); 2112 } 2113 2114 void CodeGenFunction::EmitOMPCancellationPointDirective( 2115 const OMPCancellationPointDirective &S) { 2116 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(), 2117 S.getCancelRegion()); 2118 } 2119 2120 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 2121 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), 2122 S.getCancelRegion()); 2123 } 2124 2125 CodeGenFunction::JumpDest 2126 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 2127 if (Kind == OMPD_parallel || Kind == OMPD_task) 2128 return ReturnBlock; 2129 else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections) 2130 return BreakContinueStack.empty() ? JumpDest() 2131 : BreakContinueStack.back().BreakBlock; 2132 return JumpDest(); 2133 } 2134 2135 // Generate the instructions for '#pragma omp target data' directive. 2136 void CodeGenFunction::EmitOMPTargetDataDirective( 2137 const OMPTargetDataDirective &S) { 2138 2139 // emit the code inside the construct for now 2140 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 2141 CGM.getOpenMPRuntime().emitInlinedDirective( 2142 *this, OMPD_target_data, 2143 [&CS](CodeGenFunction &CGF) { CGF.EmitStmt(CS->getCapturedStmt()); }); 2144 } 2145