1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Stmt.h" 19 #include "clang/AST/StmtOpenMP.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 //===----------------------------------------------------------------------===// 24 // OpenMP Directive Emission 25 //===----------------------------------------------------------------------===// 26 void CodeGenFunction::EmitOMPAggregateAssign( 27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType, 28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) { 29 // Perform element-by-element initialization. 30 QualType ElementTy; 31 auto SrcBegin = SrcAddr; 32 auto DestBegin = DestAddr; 33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin); 35 // Cast from pointer to array type to pointer to single element. 36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin, 37 DestBegin->getType()); 38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 39 // The basic structure here is a while-do loop. 40 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 41 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 42 auto IsEmpty = 43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 45 46 // Enter the loop body, making that address the current address. 47 auto EntryBB = Builder.GetInsertBlock(); 48 EmitBlock(BodyBB); 49 auto SrcElementCurrent = 50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB); 52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2, 53 "omp.arraycpy.destElementPast"); 54 DestElementCurrent->addIncoming(DestBegin, EntryBB); 55 56 // Emit copy. 57 CopyGen(DestElementCurrent, SrcElementCurrent); 58 59 // Shift the address forward by one element. 60 auto DestElementNext = Builder.CreateConstGEP1_32( 61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element"); 62 auto SrcElementNext = Builder.CreateConstGEP1_32( 63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element"); 64 // Check whether we've reached the end. 65 auto Done = 66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 67 Builder.CreateCondBr(Done, DoneBB, BodyBB); 68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock()); 69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 70 71 // Done. 72 EmitBlock(DoneBB, /*IsFinished=*/true); 73 } 74 75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF, 76 QualType OriginalType, llvm::Value *DestAddr, 77 llvm::Value *SrcAddr, const VarDecl *DestVD, 78 const VarDecl *SrcVD, const Expr *Copy) { 79 if (OriginalType->isArrayType()) { 80 auto *BO = dyn_cast<BinaryOperator>(Copy); 81 if (BO && BO->getOpcode() == BO_Assign) { 82 // Perform simple memcpy for simple copying. 83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 84 } else { 85 // For arrays with complex element types perform element by element 86 // copying. 87 CGF.EmitOMPAggregateAssign( 88 DestAddr, SrcAddr, OriginalType, 89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement, 90 llvm::Value *SrcElement) { 91 // Working with the single array element, so have to remap 92 // destination and source variables to corresponding array 93 // elements. 94 CodeGenFunction::OMPPrivateScope Remap(CGF); 95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{ 96 return DestElement; 97 }); 98 Remap.addPrivate( 99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; }); 100 (void)Remap.Privatize(); 101 CGF.EmitIgnoredExpr(Copy); 102 }); 103 } 104 } else { 105 // Remap pseudo source variable to private copy. 106 CodeGenFunction::OMPPrivateScope Remap(CGF); 107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; }); 108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; }); 109 (void)Remap.Privatize(); 110 // Emit copying of the whole variable. 111 CGF.EmitIgnoredExpr(Copy); 112 } 113 } 114 115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 116 OMPPrivateScope &PrivateScope) { 117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) { 119 auto *C = cast<OMPFirstprivateClause>(*I); 120 auto IRef = C->varlist_begin(); 121 auto InitsRef = C->inits().begin(); 122 for (auto IInit : C->private_copies()) { 123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 124 if (EmittedAsFirstprivate.count(OrigVD) == 0) { 125 EmittedAsFirstprivate.insert(OrigVD); 126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 128 bool IsRegistered; 129 DeclRefExpr DRE( 130 const_cast<VarDecl *>(OrigVD), 131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 132 OrigVD) != nullptr, 133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 134 auto *OriginalAddr = EmitLValue(&DRE).getAddress(); 135 QualType Type = OrigVD->getType(); 136 if (Type->isArrayType()) { 137 // Emit VarDecl with copy init for arrays. 138 // Get the address of the original variable captured in current 139 // captured region. 140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 141 auto Emission = EmitAutoVarAlloca(*VD); 142 auto *Init = VD->getInit(); 143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 144 // Perform simple memcpy. 145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 146 Type); 147 } else { 148 EmitOMPAggregateAssign( 149 Emission.getAllocatedAddress(), OriginalAddr, Type, 150 [this, VDInit, Init](llvm::Value *DestElement, 151 llvm::Value *SrcElement) { 152 // Clean up any temporaries needed by the initialization. 153 RunCleanupsScope InitScope(*this); 154 // Emit initialization for single element. 155 LocalDeclMap[VDInit] = SrcElement; 156 EmitAnyExprToMem(Init, DestElement, 157 Init->getType().getQualifiers(), 158 /*IsInitializer*/ false); 159 LocalDeclMap.erase(VDInit); 160 }); 161 } 162 EmitAutoVarCleanups(Emission); 163 return Emission.getAllocatedAddress(); 164 }); 165 } else { 166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 167 // Emit private VarDecl with copy init. 168 // Remap temp VDInit variable to the address of the original 169 // variable 170 // (for proper handling of captured global variables). 171 LocalDeclMap[VDInit] = OriginalAddr; 172 EmitDecl(*VD); 173 LocalDeclMap.erase(VDInit); 174 return GetAddrOfLocalVar(VD); 175 }); 176 } 177 assert(IsRegistered && 178 "firstprivate var already registered as private"); 179 // Silence the warning about unused variable. 180 (void)IsRegistered; 181 } 182 ++IRef, ++InitsRef; 183 } 184 } 185 return !EmittedAsFirstprivate.empty(); 186 } 187 188 void CodeGenFunction::EmitOMPPrivateClause( 189 const OMPExecutableDirective &D, 190 CodeGenFunction::OMPPrivateScope &PrivateScope) { 191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) { 193 auto *C = cast<OMPPrivateClause>(*I); 194 auto IRef = C->varlist_begin(); 195 for (auto IInit : C->private_copies()) { 196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 199 bool IsRegistered = 200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 201 // Emit private VarDecl with copy init. 202 EmitDecl(*VD); 203 return GetAddrOfLocalVar(VD); 204 }); 205 assert(IsRegistered && "private var already registered as private"); 206 // Silence the warning about unused variable. 207 (void)IsRegistered; 208 } 209 ++IRef; 210 } 211 } 212 } 213 214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 215 // threadprivate_var1 = master_threadprivate_var1; 216 // operator=(threadprivate_var2, master_threadprivate_var2); 217 // ... 218 // __kmpc_barrier(&loc, global_tid); 219 llvm::DenseSet<const VarDecl *> CopiedVars; 220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) { 222 auto *C = cast<OMPCopyinClause>(*I); 223 auto IRef = C->varlist_begin(); 224 auto ISrcRef = C->source_exprs().begin(); 225 auto IDestRef = C->destination_exprs().begin(); 226 for (auto *AssignOp : C->assignment_ops()) { 227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 228 QualType Type = VD->getType(); 229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 230 231 // Get the address of the master variable. If we are emitting code with 232 // TLS support, the address is passed from the master as field in the 233 // captured declaration. 234 llvm::Value *MasterAddr; 235 if (getLangOpts().OpenMPUseTLS && 236 getContext().getTargetInfo().isTLSSupported()) { 237 assert(CapturedStmtInfo->lookup(VD) && 238 "Copyin threadprivates should have been captured!"); 239 DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(), 240 VK_LValue, (*IRef)->getExprLoc()); 241 MasterAddr = EmitLValue(&DRE).getAddress(); 242 } else { 243 MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD) 244 : CGM.GetAddrOfGlobal(VD); 245 } 246 // Get the address of the threadprivate variable. 247 auto *PrivateAddr = EmitLValue(*IRef).getAddress(); 248 if (CopiedVars.size() == 1) { 249 // At first check if current thread is a master thread. If it is, no 250 // need to copy data. 251 CopyBegin = createBasicBlock("copyin.not.master"); 252 CopyEnd = createBasicBlock("copyin.not.master.end"); 253 Builder.CreateCondBr( 254 Builder.CreateICmpNE( 255 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy), 256 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)), 257 CopyBegin, CopyEnd); 258 EmitBlock(CopyBegin); 259 } 260 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 261 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 262 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD, 263 AssignOp); 264 } 265 ++IRef; 266 ++ISrcRef; 267 ++IDestRef; 268 } 269 } 270 if (CopyEnd) { 271 // Exit out of copying procedure for non-master thread. 272 EmitBlock(CopyEnd, /*IsFinished=*/true); 273 return true; 274 } 275 return false; 276 } 277 278 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 279 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 280 bool HasAtLeastOneLastprivate = false; 281 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 282 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 283 HasAtLeastOneLastprivate = true; 284 auto *C = cast<OMPLastprivateClause>(*I); 285 auto IRef = C->varlist_begin(); 286 auto IDestRef = C->destination_exprs().begin(); 287 for (auto *IInit : C->private_copies()) { 288 // Keep the address of the original variable for future update at the end 289 // of the loop. 290 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 291 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 292 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 293 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{ 294 DeclRefExpr DRE( 295 const_cast<VarDecl *>(OrigVD), 296 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 297 OrigVD) != nullptr, 298 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 299 return EmitLValue(&DRE).getAddress(); 300 }); 301 // Check if the variable is also a firstprivate: in this case IInit is 302 // not generated. Initialization of this variable will happen in codegen 303 // for 'firstprivate' clause. 304 if (IInit) { 305 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 306 bool IsRegistered = 307 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 308 // Emit private VarDecl with copy init. 309 EmitDecl(*VD); 310 return GetAddrOfLocalVar(VD); 311 }); 312 assert(IsRegistered && 313 "lastprivate var already registered as private"); 314 (void)IsRegistered; 315 } 316 } 317 ++IRef, ++IDestRef; 318 } 319 } 320 return HasAtLeastOneLastprivate; 321 } 322 323 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 324 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { 325 // Emit following code: 326 // if (<IsLastIterCond>) { 327 // orig_var1 = private_orig_var1; 328 // ... 329 // orig_varn = private_orig_varn; 330 // } 331 llvm::BasicBlock *ThenBB = nullptr; 332 llvm::BasicBlock *DoneBB = nullptr; 333 if (IsLastIterCond) { 334 ThenBB = createBasicBlock(".omp.lastprivate.then"); 335 DoneBB = createBasicBlock(".omp.lastprivate.done"); 336 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 337 EmitBlock(ThenBB); 338 } 339 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; 340 const Expr *LastIterVal = nullptr; 341 const Expr *IVExpr = nullptr; 342 const Expr *IncExpr = nullptr; 343 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 344 if (isOpenMPWorksharingDirective(D.getDirectiveKind())) { 345 LastIterVal = cast<VarDecl>(cast<DeclRefExpr>( 346 LoopDirective->getUpperBoundVariable()) 347 ->getDecl()) 348 ->getAnyInitializer(); 349 IVExpr = LoopDirective->getIterationVariable(); 350 IncExpr = LoopDirective->getInc(); 351 auto IUpdate = LoopDirective->updates().begin(); 352 for (auto *E : LoopDirective->counters()) { 353 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 354 LoopCountersAndUpdates[D] = *IUpdate; 355 ++IUpdate; 356 } 357 } 358 } 359 { 360 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 361 bool FirstLCV = true; 362 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 363 auto *C = cast<OMPLastprivateClause>(*I); 364 auto IRef = C->varlist_begin(); 365 auto ISrcRef = C->source_exprs().begin(); 366 auto IDestRef = C->destination_exprs().begin(); 367 for (auto *AssignOp : C->assignment_ops()) { 368 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 369 QualType Type = PrivateVD->getType(); 370 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 371 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 372 // If lastprivate variable is a loop control variable for loop-based 373 // directive, update its value before copyin back to original 374 // variable. 375 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { 376 if (FirstLCV && LastIterVal) { 377 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), 378 IVExpr->getType().getQualifiers(), 379 /*IsInitializer=*/false); 380 EmitIgnoredExpr(IncExpr); 381 FirstLCV = false; 382 } 383 EmitIgnoredExpr(UpExpr); 384 } 385 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 386 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 387 // Get the address of the original variable. 388 auto *OriginalAddr = GetAddrOfLocalVar(DestVD); 389 // Get the address of the private variable. 390 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD); 391 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, 392 AssignOp); 393 } 394 ++IRef; 395 ++ISrcRef; 396 ++IDestRef; 397 } 398 } 399 } 400 if (IsLastIterCond) { 401 EmitBlock(DoneBB, /*IsFinished=*/true); 402 } 403 } 404 405 void CodeGenFunction::EmitOMPReductionClauseInit( 406 const OMPExecutableDirective &D, 407 CodeGenFunction::OMPPrivateScope &PrivateScope) { 408 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 409 auto *C = cast<OMPReductionClause>(*I); 410 auto ILHS = C->lhs_exprs().begin(); 411 auto IRHS = C->rhs_exprs().begin(); 412 for (auto IRef : C->varlists()) { 413 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 414 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 415 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 416 // Store the address of the original variable associated with the LHS 417 // implicit variable. 418 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{ 419 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 420 CapturedStmtInfo->lookup(OrigVD) != nullptr, 421 IRef->getType(), VK_LValue, IRef->getExprLoc()); 422 return EmitLValue(&DRE).getAddress(); 423 }); 424 // Emit reduction copy. 425 bool IsRegistered = 426 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{ 427 // Emit private VarDecl with reduction init. 428 EmitDecl(*PrivateVD); 429 return GetAddrOfLocalVar(PrivateVD); 430 }); 431 assert(IsRegistered && "private var already registered as private"); 432 // Silence the warning about unused variable. 433 (void)IsRegistered; 434 ++ILHS, ++IRHS; 435 } 436 } 437 } 438 439 void CodeGenFunction::EmitOMPReductionClauseFinal( 440 const OMPExecutableDirective &D) { 441 llvm::SmallVector<const Expr *, 8> LHSExprs; 442 llvm::SmallVector<const Expr *, 8> RHSExprs; 443 llvm::SmallVector<const Expr *, 8> ReductionOps; 444 bool HasAtLeastOneReduction = false; 445 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 446 HasAtLeastOneReduction = true; 447 auto *C = cast<OMPReductionClause>(*I); 448 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 449 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 450 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 451 } 452 if (HasAtLeastOneReduction) { 453 // Emit nowait reduction if nowait clause is present or directive is a 454 // parallel directive (it always has implicit barrier). 455 CGM.getOpenMPRuntime().emitReduction( 456 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps, 457 D.getSingleClause(OMPC_nowait) || 458 isOpenMPParallelDirective(D.getDirectiveKind()) || 459 D.getDirectiveKind() == OMPD_simd, 460 D.getDirectiveKind() == OMPD_simd); 461 } 462 } 463 464 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, 465 const OMPExecutableDirective &S, 466 OpenMPDirectiveKind InnermostKind, 467 const RegionCodeGenTy &CodeGen) { 468 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 469 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS); 470 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 471 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); 472 if (auto C = S.getSingleClause(OMPC_num_threads)) { 473 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 474 auto NumThreadsClause = cast<OMPNumThreadsClause>(C); 475 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 476 /*IgnoreResultAssign*/ true); 477 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 478 CGF, NumThreads, NumThreadsClause->getLocStart()); 479 } 480 if (auto *C = S.getSingleClause(OMPC_proc_bind)) { 481 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 482 auto *ProcBindClause = cast<OMPProcBindClause>(C); 483 CGF.CGM.getOpenMPRuntime().emitProcBindClause( 484 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart()); 485 } 486 const Expr *IfCond = nullptr; 487 if (auto C = S.getSingleClause(OMPC_if)) { 488 IfCond = cast<OMPIfClause>(C)->getCondition(); 489 } 490 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 491 CapturedStruct, IfCond); 492 } 493 494 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 495 LexicalScope Scope(*this, S.getSourceRange()); 496 // Emit parallel region as a standalone region. 497 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 498 OMPPrivateScope PrivateScope(CGF); 499 bool Copyins = CGF.EmitOMPCopyinClause(S); 500 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); 501 if (Copyins || Firstprivates) { 502 // Emit implicit barrier to synchronize threads and avoid data races on 503 // initialization of firstprivate variables or propagation master's thread 504 // values of threadprivate variables to local instances of that variables 505 // of all other implicit threads. 506 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 507 OMPD_unknown); 508 } 509 CGF.EmitOMPPrivateClause(S, PrivateScope); 510 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 511 (void)PrivateScope.Privatize(); 512 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 513 CGF.EmitOMPReductionClauseFinal(S); 514 // Emit implicit barrier at the end of the 'parallel' directive. 515 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 516 OMPD_unknown); 517 }; 518 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen); 519 } 520 521 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D, 522 JumpDest LoopExit) { 523 RunCleanupsScope BodyScope(*this); 524 // Update counters values on current iteration. 525 for (auto I : D.updates()) { 526 EmitIgnoredExpr(I); 527 } 528 // Update the linear variables. 529 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 530 auto *C = cast<OMPLinearClause>(*I); 531 for (auto U : C->updates()) { 532 EmitIgnoredExpr(U); 533 } 534 } 535 536 // On a continue in the body, jump to the end. 537 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 538 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 539 // Emit loop body. 540 EmitStmt(D.getBody()); 541 // The end (updates/cleanups). 542 EmitBlock(Continue.getBlock()); 543 BreakContinueStack.pop_back(); 544 // TODO: Update lastprivates if the SeparateIter flag is true. 545 // This will be implemented in a follow-up OMPLastprivateClause patch, but 546 // result should be still correct without it, as we do not make these 547 // variables private yet. 548 } 549 550 void CodeGenFunction::EmitOMPInnerLoop( 551 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 552 const Expr *IncExpr, 553 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 554 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 555 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 556 557 // Start the loop with a block that tests the condition. 558 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 559 EmitBlock(CondBlock); 560 LoopStack.push(CondBlock); 561 562 // If there are any cleanups between here and the loop-exit scope, 563 // create a block to stage a loop exit along. 564 auto ExitBlock = LoopExit.getBlock(); 565 if (RequiresCleanup) 566 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 567 568 auto LoopBody = createBasicBlock("omp.inner.for.body"); 569 570 // Emit condition. 571 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 572 if (ExitBlock != LoopExit.getBlock()) { 573 EmitBlock(ExitBlock); 574 EmitBranchThroughCleanup(LoopExit); 575 } 576 577 EmitBlock(LoopBody); 578 incrementProfileCounter(&S); 579 580 // Create a block for the increment. 581 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 582 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 583 584 BodyGen(*this); 585 586 // Emit "IV = IV + 1" and a back-edge to the condition block. 587 EmitBlock(Continue.getBlock()); 588 EmitIgnoredExpr(IncExpr); 589 PostIncGen(*this); 590 BreakContinueStack.pop_back(); 591 EmitBranch(CondBlock); 592 LoopStack.pop(); 593 // Emit the fall-through block. 594 EmitBlock(LoopExit.getBlock()); 595 } 596 597 void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) { 598 // Emit inits for the linear variables. 599 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 600 auto *C = cast<OMPLinearClause>(*I); 601 for (auto Init : C->inits()) { 602 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 603 auto *OrigVD = cast<VarDecl>( 604 cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl()); 605 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 606 CapturedStmtInfo->lookup(OrigVD) != nullptr, 607 VD->getInit()->getType(), VK_LValue, 608 VD->getInit()->getExprLoc()); 609 AutoVarEmission Emission = EmitAutoVarAlloca(*VD); 610 EmitExprAsInit(&DRE, VD, 611 MakeAddrLValue(Emission.getAllocatedAddress(), 612 VD->getType(), Emission.Alignment), 613 /*capturedByInit=*/false); 614 EmitAutoVarCleanups(Emission); 615 } 616 // Emit the linear steps for the linear clauses. 617 // If a step is not constant, it is pre-calculated before the loop. 618 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 619 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 620 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 621 // Emit calculation of the linear step. 622 EmitIgnoredExpr(CS); 623 } 624 } 625 } 626 627 static void emitLinearClauseFinal(CodeGenFunction &CGF, 628 const OMPLoopDirective &D) { 629 // Emit the final values of the linear variables. 630 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 631 auto *C = cast<OMPLinearClause>(*I); 632 auto IC = C->varlist_begin(); 633 for (auto F : C->finals()) { 634 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 635 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 636 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr, 637 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 638 auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress(); 639 CodeGenFunction::OMPPrivateScope VarScope(CGF); 640 VarScope.addPrivate(OrigVD, 641 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 642 (void)VarScope.Privatize(); 643 CGF.EmitIgnoredExpr(F); 644 ++IC; 645 } 646 } 647 } 648 649 static void emitAlignedClause(CodeGenFunction &CGF, 650 const OMPExecutableDirective &D) { 651 for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) { 652 auto *Clause = cast<OMPAlignedClause>(*I); 653 unsigned ClauseAlignment = 0; 654 if (auto AlignmentExpr = Clause->getAlignment()) { 655 auto AlignmentCI = 656 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 657 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 658 } 659 for (auto E : Clause->varlists()) { 660 unsigned Alignment = ClauseAlignment; 661 if (Alignment == 0) { 662 // OpenMP [2.8.1, Description] 663 // If no optional parameter is specified, implementation-defined default 664 // alignments for SIMD instructions on the target platforms are assumed. 665 Alignment = 666 CGF.getContext() 667 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 668 E->getType()->getPointeeType())) 669 .getQuantity(); 670 } 671 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 672 "alignment is not power of 2"); 673 if (Alignment != 0) { 674 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 675 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 676 } 677 } 678 } 679 } 680 681 static void emitPrivateLoopCounters(CodeGenFunction &CGF, 682 CodeGenFunction::OMPPrivateScope &LoopScope, 683 ArrayRef<Expr *> Counters) { 684 for (auto *E : Counters) { 685 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 686 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{ 687 // Emit var without initialization. 688 auto VarEmission = CGF.EmitAutoVarAlloca(*VD); 689 CGF.EmitAutoVarCleanups(VarEmission); 690 return VarEmission.getAllocatedAddress(); 691 }); 692 } 693 } 694 695 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 696 const Expr *Cond, llvm::BasicBlock *TrueBlock, 697 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 698 { 699 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 700 emitPrivateLoopCounters(CGF, PreCondScope, S.counters()); 701 (void)PreCondScope.Privatize(); 702 // Get initial values of real counters. 703 for (auto I : S.inits()) { 704 CGF.EmitIgnoredExpr(I); 705 } 706 } 707 // Check that loop is executed at least one time. 708 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 709 } 710 711 static void 712 emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, 713 CodeGenFunction::OMPPrivateScope &PrivateScope) { 714 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 715 auto *C = cast<OMPLinearClause>(*I); 716 for (auto *E : C->varlists()) { 717 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 718 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * { 719 // Emit var without initialization. 720 auto VarEmission = CGF.EmitAutoVarAlloca(*VD); 721 CGF.EmitAutoVarCleanups(VarEmission); 722 return VarEmission.getAllocatedAddress(); 723 }); 724 assert(IsRegistered && "linear var already registered as private"); 725 // Silence the warning about unused variable. 726 (void)IsRegistered; 727 } 728 } 729 } 730 731 static void emitSafelenClause(CodeGenFunction &CGF, 732 const OMPExecutableDirective &D) { 733 if (auto *C = 734 cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) { 735 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(), 736 /*ignoreResult=*/true); 737 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 738 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue()); 739 // In presence of finite 'safelen', it may be unsafe to mark all 740 // the memory instructions parallel, because loop-carried 741 // dependences of 'safelen' iterations are possible. 742 CGF.LoopStack.setParallel(false); 743 } 744 } 745 746 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) { 747 // Walk clauses and process safelen/lastprivate. 748 LoopStack.setParallel(); 749 LoopStack.setVectorizerEnable(true); 750 emitSafelenClause(*this, D); 751 } 752 753 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) { 754 auto IC = D.counters().begin(); 755 for (auto F : D.finals()) { 756 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 757 if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) { 758 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 759 CapturedStmtInfo->lookup(OrigVD) != nullptr, 760 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 761 auto *OrigAddr = EmitLValue(&DRE).getAddress(); 762 OMPPrivateScope VarScope(*this); 763 VarScope.addPrivate(OrigVD, 764 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 765 (void)VarScope.Privatize(); 766 EmitIgnoredExpr(F); 767 } 768 ++IC; 769 } 770 emitLinearClauseFinal(*this, D); 771 } 772 773 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 774 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 775 // if (PreCond) { 776 // for (IV in 0..LastIteration) BODY; 777 // <Final counter/linear vars updates>; 778 // } 779 // 780 781 // Emit: if (PreCond) - begin. 782 // If the condition constant folds and can be elided, avoid emitting the 783 // whole loop. 784 bool CondConstant; 785 llvm::BasicBlock *ContBlock = nullptr; 786 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 787 if (!CondConstant) 788 return; 789 } else { 790 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 791 ContBlock = CGF.createBasicBlock("simd.if.end"); 792 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 793 CGF.getProfileCount(&S)); 794 CGF.EmitBlock(ThenBlock); 795 CGF.incrementProfileCounter(&S); 796 } 797 798 // Emit the loop iteration variable. 799 const Expr *IVExpr = S.getIterationVariable(); 800 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 801 CGF.EmitVarDecl(*IVDecl); 802 CGF.EmitIgnoredExpr(S.getInit()); 803 804 // Emit the iterations count variable. 805 // If it is not a variable, Sema decided to calculate iterations count on 806 // each iteration (e.g., it is foldable into a constant). 807 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 808 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 809 // Emit calculation of the iterations count. 810 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 811 } 812 813 CGF.EmitOMPSimdInit(S); 814 815 emitAlignedClause(CGF, S); 816 CGF.EmitOMPLinearClauseInit(S); 817 bool HasLastprivateClause; 818 { 819 OMPPrivateScope LoopScope(CGF); 820 emitPrivateLoopCounters(CGF, LoopScope, S.counters()); 821 emitPrivateLinearVars(CGF, S, LoopScope); 822 CGF.EmitOMPPrivateClause(S, LoopScope); 823 CGF.EmitOMPReductionClauseInit(S, LoopScope); 824 HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 825 (void)LoopScope.Privatize(); 826 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 827 S.getInc(), 828 [&S](CodeGenFunction &CGF) { 829 CGF.EmitOMPLoopBody(S, JumpDest()); 830 CGF.EmitStopPoint(&S); 831 }, 832 [](CodeGenFunction &) {}); 833 // Emit final copy of the lastprivate variables at the end of loops. 834 if (HasLastprivateClause) { 835 CGF.EmitOMPLastprivateClauseFinal(S); 836 } 837 CGF.EmitOMPReductionClauseFinal(S); 838 } 839 CGF.EmitOMPSimdFinal(S); 840 // Emit: if (PreCond) - end. 841 if (ContBlock) { 842 CGF.EmitBranch(ContBlock); 843 CGF.EmitBlock(ContBlock, true); 844 } 845 }; 846 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 847 } 848 849 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, 850 const OMPLoopDirective &S, 851 OMPPrivateScope &LoopScope, 852 bool Ordered, llvm::Value *LB, 853 llvm::Value *UB, llvm::Value *ST, 854 llvm::Value *IL, llvm::Value *Chunk) { 855 auto &RT = CGM.getOpenMPRuntime(); 856 857 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 858 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); 859 860 assert((Ordered || 861 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && 862 "static non-chunked schedule does not need outer loop"); 863 864 // Emit outer loop. 865 // 866 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 867 // When schedule(dynamic,chunk_size) is specified, the iterations are 868 // distributed to threads in the team in chunks as the threads request them. 869 // Each thread executes a chunk of iterations, then requests another chunk, 870 // until no chunks remain to be distributed. Each chunk contains chunk_size 871 // iterations, except for the last chunk to be distributed, which may have 872 // fewer iterations. When no chunk_size is specified, it defaults to 1. 873 // 874 // When schedule(guided,chunk_size) is specified, the iterations are assigned 875 // to threads in the team in chunks as the executing threads request them. 876 // Each thread executes a chunk of iterations, then requests another chunk, 877 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 878 // each chunk is proportional to the number of unassigned iterations divided 879 // by the number of threads in the team, decreasing to 1. For a chunk_size 880 // with value k (greater than 1), the size of each chunk is determined in the 881 // same way, with the restriction that the chunks do not contain fewer than k 882 // iterations (except for the last chunk to be assigned, which may have fewer 883 // than k iterations). 884 // 885 // When schedule(auto) is specified, the decision regarding scheduling is 886 // delegated to the compiler and/or runtime system. The programmer gives the 887 // implementation the freedom to choose any possible mapping of iterations to 888 // threads in the team. 889 // 890 // When schedule(runtime) is specified, the decision regarding scheduling is 891 // deferred until run time, and the schedule and chunk size are taken from the 892 // run-sched-var ICV. If the ICV is set to auto, the schedule is 893 // implementation defined 894 // 895 // while(__kmpc_dispatch_next(&LB, &UB)) { 896 // idx = LB; 897 // while (idx <= UB) { BODY; ++idx; 898 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 899 // } // inner loop 900 // } 901 // 902 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 903 // When schedule(static, chunk_size) is specified, iterations are divided into 904 // chunks of size chunk_size, and the chunks are assigned to the threads in 905 // the team in a round-robin fashion in the order of the thread number. 906 // 907 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 908 // while (idx <= UB) { BODY; ++idx; } // inner loop 909 // LB = LB + ST; 910 // UB = UB + ST; 911 // } 912 // 913 914 const Expr *IVExpr = S.getIterationVariable(); 915 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 916 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 917 918 RT.emitForInit( 919 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB, 920 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal() 921 : UB), 922 ST, Chunk); 923 924 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 925 926 // Start the loop with a block that tests the condition. 927 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 928 EmitBlock(CondBlock); 929 LoopStack.push(CondBlock); 930 931 llvm::Value *BoolCondVal = nullptr; 932 if (!DynamicOrOrdered) { 933 // UB = min(UB, GlobalUB) 934 EmitIgnoredExpr(S.getEnsureUpperBound()); 935 // IV = LB 936 EmitIgnoredExpr(S.getInit()); 937 // IV < UB 938 BoolCondVal = EvaluateExprAsBool(S.getCond()); 939 } else { 940 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, 941 IL, LB, UB, ST); 942 } 943 944 // If there are any cleanups between here and the loop-exit scope, 945 // create a block to stage a loop exit along. 946 auto ExitBlock = LoopExit.getBlock(); 947 if (LoopScope.requiresCleanups()) 948 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 949 950 auto LoopBody = createBasicBlock("omp.dispatch.body"); 951 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 952 if (ExitBlock != LoopExit.getBlock()) { 953 EmitBlock(ExitBlock); 954 EmitBranchThroughCleanup(LoopExit); 955 } 956 EmitBlock(LoopBody); 957 958 // Emit "IV = LB" (in case of static schedule, we have already calculated new 959 // LB for loop condition and emitted it above). 960 if (DynamicOrOrdered) 961 EmitIgnoredExpr(S.getInit()); 962 963 // Create a block for the increment. 964 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 965 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 966 967 // Generate !llvm.loop.parallel metadata for loads and stores for loops 968 // with dynamic/guided scheduling and without ordered clause. 969 if (!isOpenMPSimdDirective(S.getDirectiveKind())) { 970 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic || 971 ScheduleKind == OMPC_SCHEDULE_guided) && 972 !Ordered); 973 } else { 974 EmitOMPSimdInit(S); 975 } 976 977 SourceLocation Loc = S.getLocStart(); 978 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(), 979 [&S, LoopExit](CodeGenFunction &CGF) { 980 CGF.EmitOMPLoopBody(S, LoopExit); 981 CGF.EmitStopPoint(&S); 982 }, 983 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { 984 if (Ordered) { 985 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( 986 CGF, Loc, IVSize, IVSigned); 987 } 988 }); 989 990 EmitBlock(Continue.getBlock()); 991 BreakContinueStack.pop_back(); 992 if (!DynamicOrOrdered) { 993 // Emit "LB = LB + Stride", "UB = UB + Stride". 994 EmitIgnoredExpr(S.getNextLowerBound()); 995 EmitIgnoredExpr(S.getNextUpperBound()); 996 } 997 998 EmitBranch(CondBlock); 999 LoopStack.pop(); 1000 // Emit the fall-through block. 1001 EmitBlock(LoopExit.getBlock()); 1002 1003 // Tell the runtime we are done. 1004 if (!DynamicOrOrdered) 1005 RT.emitForStaticFinish(*this, S.getLocEnd()); 1006 } 1007 1008 /// \brief Emit a helper variable and return corresponding lvalue. 1009 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 1010 const DeclRefExpr *Helper) { 1011 auto VDecl = cast<VarDecl>(Helper->getDecl()); 1012 CGF.EmitVarDecl(*VDecl); 1013 return CGF.EmitLValue(Helper); 1014 } 1015 1016 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind> 1017 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, 1018 bool OuterRegion) { 1019 // Detect the loop schedule kind and chunk. 1020 auto ScheduleKind = OMPC_SCHEDULE_unknown; 1021 llvm::Value *Chunk = nullptr; 1022 if (auto *C = 1023 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) { 1024 ScheduleKind = C->getScheduleKind(); 1025 if (const auto *Ch = C->getChunkSize()) { 1026 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { 1027 if (OuterRegion) { 1028 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); 1029 CGF.EmitVarDecl(*ImpVar); 1030 CGF.EmitStoreThroughLValue( 1031 CGF.EmitAnyExpr(Ch), 1032 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), 1033 ImpVar->getType())); 1034 } else { 1035 Ch = ImpRef; 1036 } 1037 } 1038 if (!C->getHelperChunkSize() || !OuterRegion) { 1039 Chunk = CGF.EmitScalarExpr(Ch); 1040 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), 1041 S.getIterationVariable()->getType()); 1042 } 1043 } 1044 } 1045 return std::make_pair(Chunk, ScheduleKind); 1046 } 1047 1048 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { 1049 // Emit the loop iteration variable. 1050 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 1051 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 1052 EmitVarDecl(*IVDecl); 1053 1054 // Emit the iterations count variable. 1055 // If it is not a variable, Sema decided to calculate iterations count on each 1056 // iteration (e.g., it is foldable into a constant). 1057 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1058 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1059 // Emit calculation of the iterations count. 1060 EmitIgnoredExpr(S.getCalcLastIteration()); 1061 } 1062 1063 auto &RT = CGM.getOpenMPRuntime(); 1064 1065 bool HasLastprivateClause; 1066 // Check pre-condition. 1067 { 1068 // Skip the entire loop if we don't meet the precondition. 1069 // If the condition constant folds and can be elided, avoid emitting the 1070 // whole loop. 1071 bool CondConstant; 1072 llvm::BasicBlock *ContBlock = nullptr; 1073 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1074 if (!CondConstant) 1075 return false; 1076 } else { 1077 auto *ThenBlock = createBasicBlock("omp.precond.then"); 1078 ContBlock = createBasicBlock("omp.precond.end"); 1079 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 1080 getProfileCount(&S)); 1081 EmitBlock(ThenBlock); 1082 incrementProfileCounter(&S); 1083 } 1084 1085 emitAlignedClause(*this, S); 1086 EmitOMPLinearClauseInit(S); 1087 // Emit 'then' code. 1088 { 1089 // Emit helper vars inits. 1090 LValue LB = 1091 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable())); 1092 LValue UB = 1093 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable())); 1094 LValue ST = 1095 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 1096 LValue IL = 1097 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 1098 1099 OMPPrivateScope LoopScope(*this); 1100 if (EmitOMPFirstprivateClause(S, LoopScope)) { 1101 // Emit implicit barrier to synchronize threads and avoid data races on 1102 // initialization of firstprivate variables. 1103 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1104 OMPD_unknown); 1105 } 1106 EmitOMPPrivateClause(S, LoopScope); 1107 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 1108 EmitOMPReductionClauseInit(S, LoopScope); 1109 emitPrivateLoopCounters(*this, LoopScope, S.counters()); 1110 emitPrivateLinearVars(*this, S, LoopScope); 1111 (void)LoopScope.Privatize(); 1112 1113 // Detect the loop schedule kind and chunk. 1114 llvm::Value *Chunk; 1115 OpenMPScheduleClauseKind ScheduleKind; 1116 auto ScheduleInfo = 1117 emitScheduleClause(*this, S, /*OuterRegion=*/false); 1118 Chunk = ScheduleInfo.first; 1119 ScheduleKind = ScheduleInfo.second; 1120 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1121 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1122 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr; 1123 if (RT.isStaticNonchunked(ScheduleKind, 1124 /* Chunked */ Chunk != nullptr) && 1125 !Ordered) { 1126 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1127 EmitOMPSimdInit(S); 1128 } 1129 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1130 // When no chunk_size is specified, the iteration space is divided into 1131 // chunks that are approximately equal in size, and at most one chunk is 1132 // distributed to each thread. Note that the size of the chunks is 1133 // unspecified in this case. 1134 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, 1135 Ordered, IL.getAddress(), LB.getAddress(), 1136 UB.getAddress(), ST.getAddress()); 1137 auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit")); 1138 // UB = min(UB, GlobalUB); 1139 EmitIgnoredExpr(S.getEnsureUpperBound()); 1140 // IV = LB; 1141 EmitIgnoredExpr(S.getInit()); 1142 // while (idx <= UB) { BODY; ++idx; } 1143 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), 1144 S.getInc(), 1145 [&S, LoopExit](CodeGenFunction &CGF) { 1146 CGF.EmitOMPLoopBody(S, LoopExit); 1147 CGF.EmitStopPoint(&S); 1148 }, 1149 [](CodeGenFunction &) {}); 1150 EmitBlock(LoopExit.getBlock()); 1151 // Tell the runtime we are done. 1152 RT.emitForStaticFinish(*this, S.getLocStart()); 1153 } else { 1154 // Emit the outer loop, which requests its work chunk [LB..UB] from 1155 // runtime and runs the inner loop to process it. 1156 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered, 1157 LB.getAddress(), UB.getAddress(), ST.getAddress(), 1158 IL.getAddress(), Chunk); 1159 } 1160 EmitOMPReductionClauseFinal(S); 1161 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1162 if (HasLastprivateClause) 1163 EmitOMPLastprivateClauseFinal( 1164 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 1165 } 1166 if (isOpenMPSimdDirective(S.getDirectiveKind())) { 1167 EmitOMPSimdFinal(S); 1168 } 1169 // We're now done with the loop, so jump to the continuation block. 1170 if (ContBlock) { 1171 EmitBranch(ContBlock); 1172 EmitBlock(ContBlock, true); 1173 } 1174 } 1175 return HasLastprivateClause; 1176 } 1177 1178 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 1179 LexicalScope Scope(*this, S.getSourceRange()); 1180 bool HasLastprivates = false; 1181 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1182 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1183 }; 1184 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen); 1185 1186 // Emit an implicit barrier at the end. 1187 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { 1188 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1189 } 1190 } 1191 1192 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) { 1193 LexicalScope Scope(*this, S.getSourceRange()); 1194 bool HasLastprivates = false; 1195 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1196 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1197 }; 1198 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen); 1199 1200 // Emit an implicit barrier at the end. 1201 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { 1202 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1203 } 1204 } 1205 1206 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 1207 const Twine &Name, 1208 llvm::Value *Init = nullptr) { 1209 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 1210 if (Init) 1211 CGF.EmitScalarInit(Init, LVal); 1212 return LVal; 1213 } 1214 1215 OpenMPDirectiveKind 1216 CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { 1217 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 1218 auto *CS = dyn_cast<CompoundStmt>(Stmt); 1219 if (CS && CS->size() > 1) { 1220 bool HasLastprivates = false; 1221 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { 1222 auto &C = CGF.CGM.getContext(); 1223 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 1224 // Emit helper vars inits. 1225 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 1226 CGF.Builder.getInt32(0)); 1227 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); 1228 LValue UB = 1229 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 1230 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 1231 CGF.Builder.getInt32(1)); 1232 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 1233 CGF.Builder.getInt32(0)); 1234 // Loop counter. 1235 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 1236 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1237 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 1238 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1239 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 1240 // Generate condition for loop. 1241 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 1242 OK_Ordinary, S.getLocStart(), 1243 /*fpContractable=*/false); 1244 // Increment for loop counter. 1245 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, 1246 OK_Ordinary, S.getLocStart()); 1247 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { 1248 // Iterate through all sections and emit a switch construct: 1249 // switch (IV) { 1250 // case 0: 1251 // <SectionStmt[0]>; 1252 // break; 1253 // ... 1254 // case <NumSection> - 1: 1255 // <SectionStmt[<NumSection> - 1]>; 1256 // break; 1257 // } 1258 // .omp.sections.exit: 1259 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 1260 auto *SwitchStmt = CGF.Builder.CreateSwitch( 1261 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 1262 CS->size()); 1263 unsigned CaseNumber = 0; 1264 for (auto *SubStmt : CS->children()) { 1265 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 1266 CGF.EmitBlock(CaseBB); 1267 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 1268 CGF.EmitStmt(SubStmt); 1269 CGF.EmitBranch(ExitBB); 1270 ++CaseNumber; 1271 } 1272 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1273 }; 1274 1275 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 1276 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 1277 // Emit implicit barrier to synchronize threads and avoid data races on 1278 // initialization of firstprivate variables. 1279 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1280 OMPD_unknown); 1281 } 1282 CGF.EmitOMPPrivateClause(S, LoopScope); 1283 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1284 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1285 (void)LoopScope.Privatize(); 1286 1287 // Emit static non-chunked loop. 1288 CGF.CGM.getOpenMPRuntime().emitForInit( 1289 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, 1290 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 1291 LB.getAddress(), UB.getAddress(), ST.getAddress()); 1292 // UB = min(UB, GlobalUB); 1293 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 1294 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 1295 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 1296 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 1297 // IV = LB; 1298 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 1299 // while (idx <= UB) { BODY; ++idx; } 1300 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 1301 [](CodeGenFunction &) {}); 1302 // Tell the runtime we are done. 1303 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); 1304 CGF.EmitOMPReductionClauseFinal(S); 1305 1306 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1307 if (HasLastprivates) 1308 CGF.EmitOMPLastprivateClauseFinal( 1309 S, CGF.Builder.CreateIsNotNull( 1310 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 1311 }; 1312 1313 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen); 1314 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 1315 // clause. Otherwise the barrier will be generated by the codegen for the 1316 // directive. 1317 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) { 1318 // Emit implicit barrier to synchronize threads and avoid data races on 1319 // initialization of firstprivate variables. 1320 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1321 OMPD_unknown); 1322 } 1323 return OMPD_sections; 1324 } 1325 // If only one section is found - no need to generate loop, emit as a single 1326 // region. 1327 bool HasFirstprivates; 1328 // No need to generate reductions for sections with single section region, we 1329 // can use original shared variables for all operations. 1330 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty(); 1331 // No need to generate lastprivates for sections with single section region, 1332 // we can use original shared variable for all calculations with barrier at 1333 // the end of the sections. 1334 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty(); 1335 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { 1336 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1337 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1338 CGF.EmitOMPPrivateClause(S, SingleScope); 1339 (void)SingleScope.Privatize(); 1340 1341 CGF.EmitStmt(Stmt); 1342 }; 1343 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1344 llvm::None, llvm::None, llvm::None, 1345 llvm::None); 1346 // Emit barrier for firstprivates, lastprivates or reductions only if 1347 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be 1348 // generated by the codegen for the directive. 1349 if ((HasFirstprivates || HasLastprivates || HasReductions) && 1350 S.getSingleClause(OMPC_nowait)) { 1351 // Emit implicit barrier to synchronize threads and avoid data races on 1352 // initialization of firstprivate variables. 1353 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown); 1354 } 1355 return OMPD_single; 1356 } 1357 1358 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 1359 LexicalScope Scope(*this, S.getSourceRange()); 1360 OpenMPDirectiveKind EmittedAs = EmitSections(S); 1361 // Emit an implicit barrier at the end. 1362 if (!S.getSingleClause(OMPC_nowait)) { 1363 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); 1364 } 1365 } 1366 1367 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 1368 LexicalScope Scope(*this, S.getSourceRange()); 1369 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1370 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1371 CGF.EnsureInsertPoint(); 1372 }; 1373 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen); 1374 } 1375 1376 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 1377 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 1378 llvm::SmallVector<const Expr *, 8> DestExprs; 1379 llvm::SmallVector<const Expr *, 8> SrcExprs; 1380 llvm::SmallVector<const Expr *, 8> AssignmentOps; 1381 // Check if there are any 'copyprivate' clauses associated with this 1382 // 'single' 1383 // construct. 1384 // Build a list of copyprivate variables along with helper expressions 1385 // (<source>, <destination>, <destination>=<source> expressions) 1386 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) { 1387 auto *C = cast<OMPCopyprivateClause>(*I); 1388 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 1389 DestExprs.append(C->destination_exprs().begin(), 1390 C->destination_exprs().end()); 1391 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 1392 AssignmentOps.append(C->assignment_ops().begin(), 1393 C->assignment_ops().end()); 1394 } 1395 LexicalScope Scope(*this, S.getSourceRange()); 1396 // Emit code for 'single' region along with 'copyprivate' clauses 1397 bool HasFirstprivates; 1398 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { 1399 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1400 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1401 CGF.EmitOMPPrivateClause(S, SingleScope); 1402 (void)SingleScope.Privatize(); 1403 1404 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1405 CGF.EnsureInsertPoint(); 1406 }; 1407 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1408 CopyprivateVars, DestExprs, SrcExprs, 1409 AssignmentOps); 1410 // Emit an implicit barrier at the end (to avoid data race on firstprivate 1411 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 1412 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) && 1413 CopyprivateVars.empty()) { 1414 CGM.getOpenMPRuntime().emitBarrierCall( 1415 *this, S.getLocStart(), 1416 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single); 1417 } 1418 } 1419 1420 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 1421 LexicalScope Scope(*this, S.getSourceRange()); 1422 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1423 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1424 CGF.EnsureInsertPoint(); 1425 }; 1426 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 1427 } 1428 1429 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 1430 LexicalScope Scope(*this, S.getSourceRange()); 1431 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1432 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1433 CGF.EnsureInsertPoint(); 1434 }; 1435 CGM.getOpenMPRuntime().emitCriticalRegion( 1436 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart()); 1437 } 1438 1439 void CodeGenFunction::EmitOMPParallelForDirective( 1440 const OMPParallelForDirective &S) { 1441 // Emit directive as a combined directive that consists of two implicit 1442 // directives: 'parallel' with 'for' directive. 1443 LexicalScope Scope(*this, S.getSourceRange()); 1444 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1445 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1446 CGF.EmitOMPWorksharingLoop(S); 1447 // Emit implicit barrier at the end of parallel region, but this barrier 1448 // is at the end of 'for' directive, so emit it as the implicit barrier for 1449 // this 'for' directive. 1450 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1451 OMPD_parallel); 1452 }; 1453 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen); 1454 } 1455 1456 void CodeGenFunction::EmitOMPParallelForSimdDirective( 1457 const OMPParallelForSimdDirective &S) { 1458 // Emit directive as a combined directive that consists of two implicit 1459 // directives: 'parallel' with 'for' directive. 1460 LexicalScope Scope(*this, S.getSourceRange()); 1461 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1462 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1463 CGF.EmitOMPWorksharingLoop(S); 1464 // Emit implicit barrier at the end of parallel region, but this barrier 1465 // is at the end of 'for' directive, so emit it as the implicit barrier for 1466 // this 'for' directive. 1467 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1468 OMPD_parallel); 1469 }; 1470 emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen); 1471 } 1472 1473 void CodeGenFunction::EmitOMPParallelSectionsDirective( 1474 const OMPParallelSectionsDirective &S) { 1475 // Emit directive as a combined directive that consists of two implicit 1476 // directives: 'parallel' with 'sections' directive. 1477 LexicalScope Scope(*this, S.getSourceRange()); 1478 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1479 (void)CGF.EmitSections(S); 1480 // Emit implicit barrier at the end of parallel region. 1481 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1482 OMPD_parallel); 1483 }; 1484 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen); 1485 } 1486 1487 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 1488 // Emit outlined function for task construct. 1489 LexicalScope Scope(*this, S.getSourceRange()); 1490 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1491 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 1492 auto *I = CS->getCapturedDecl()->param_begin(); 1493 auto *PartId = std::next(I); 1494 // The first function argument for tasks is a thread id, the second one is a 1495 // part id (0 for tied tasks, >=0 for untied task). 1496 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 1497 // Get list of private variables. 1498 llvm::SmallVector<const Expr *, 8> PrivateVars; 1499 llvm::SmallVector<const Expr *, 8> PrivateCopies; 1500 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) { 1501 auto *C = cast<OMPPrivateClause>(*I); 1502 auto IRef = C->varlist_begin(); 1503 for (auto *IInit : C->private_copies()) { 1504 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1505 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1506 PrivateVars.push_back(*IRef); 1507 PrivateCopies.push_back(IInit); 1508 } 1509 ++IRef; 1510 } 1511 } 1512 EmittedAsPrivate.clear(); 1513 // Get list of firstprivate variables. 1514 llvm::SmallVector<const Expr *, 8> FirstprivateVars; 1515 llvm::SmallVector<const Expr *, 8> FirstprivateCopies; 1516 llvm::SmallVector<const Expr *, 8> FirstprivateInits; 1517 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) { 1518 auto *C = cast<OMPFirstprivateClause>(*I); 1519 auto IRef = C->varlist_begin(); 1520 auto IElemInitRef = C->inits().begin(); 1521 for (auto *IInit : C->private_copies()) { 1522 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1523 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1524 FirstprivateVars.push_back(*IRef); 1525 FirstprivateCopies.push_back(IInit); 1526 FirstprivateInits.push_back(*IElemInitRef); 1527 } 1528 ++IRef, ++IElemInitRef; 1529 } 1530 } 1531 // Build list of dependences. 1532 llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8> 1533 Dependences; 1534 for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) { 1535 auto *C = cast<OMPDependClause>(*I); 1536 for (auto *IRef : C->varlists()) { 1537 Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef)); 1538 } 1539 } 1540 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( 1541 CodeGenFunction &CGF) { 1542 // Set proper addresses for generated private copies. 1543 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1544 OMPPrivateScope Scope(CGF); 1545 if (!PrivateVars.empty() || !FirstprivateVars.empty()) { 1546 auto *CopyFn = CGF.Builder.CreateAlignedLoad( 1547 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)), 1548 CGF.PointerAlignInBytes); 1549 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad( 1550 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)), 1551 CGF.PointerAlignInBytes); 1552 // Map privates. 1553 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16> 1554 PrivatePtrs; 1555 llvm::SmallVector<llvm::Value *, 16> CallArgs; 1556 CallArgs.push_back(PrivatesPtr); 1557 for (auto *E : PrivateVars) { 1558 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1559 auto *PrivatePtr = 1560 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1561 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1562 CallArgs.push_back(PrivatePtr); 1563 } 1564 for (auto *E : FirstprivateVars) { 1565 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1566 auto *PrivatePtr = 1567 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1568 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1569 CallArgs.push_back(PrivatePtr); 1570 } 1571 CGF.EmitRuntimeCall(CopyFn, CallArgs); 1572 for (auto &&Pair : PrivatePtrs) { 1573 auto *Replacement = 1574 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes); 1575 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 1576 } 1577 } 1578 (void)Scope.Privatize(); 1579 if (*PartId) { 1580 // TODO: emit code for untied tasks. 1581 } 1582 CGF.EmitStmt(CS->getCapturedStmt()); 1583 }; 1584 auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction( 1585 S, *I, OMPD_task, CodeGen); 1586 // Check if we should emit tied or untied task. 1587 bool Tied = !S.getSingleClause(OMPC_untied); 1588 // Check if the task is final 1589 llvm::PointerIntPair<llvm::Value *, 1, bool> Final; 1590 if (auto *Clause = S.getSingleClause(OMPC_final)) { 1591 // If the condition constant folds and can be elided, try to avoid emitting 1592 // the condition and the dead arm of the if/else. 1593 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition(); 1594 bool CondConstant; 1595 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 1596 Final.setInt(CondConstant); 1597 else 1598 Final.setPointer(EvaluateExprAsBool(Cond)); 1599 } else { 1600 // By default the task is not final. 1601 Final.setInt(/*IntVal=*/false); 1602 } 1603 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 1604 const Expr *IfCond = nullptr; 1605 if (auto C = S.getSingleClause(OMPC_if)) { 1606 IfCond = cast<OMPIfClause>(C)->getCondition(); 1607 } 1608 CGM.getOpenMPRuntime().emitTaskCall( 1609 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, 1610 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, 1611 FirstprivateCopies, FirstprivateInits, Dependences); 1612 } 1613 1614 void CodeGenFunction::EmitOMPTaskyieldDirective( 1615 const OMPTaskyieldDirective &S) { 1616 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 1617 } 1618 1619 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 1620 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 1621 } 1622 1623 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 1624 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 1625 } 1626 1627 void CodeGenFunction::EmitOMPTaskgroupDirective( 1628 const OMPTaskgroupDirective &S) { 1629 LexicalScope Scope(*this, S.getSourceRange()); 1630 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1631 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1632 CGF.EnsureInsertPoint(); 1633 }; 1634 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart()); 1635 } 1636 1637 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 1638 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 1639 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) { 1640 auto FlushClause = cast<OMPFlushClause>(C); 1641 return llvm::makeArrayRef(FlushClause->varlist_begin(), 1642 FlushClause->varlist_end()); 1643 } 1644 return llvm::None; 1645 }(), S.getLocStart()); 1646 } 1647 1648 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 1649 LexicalScope Scope(*this, S.getSourceRange()); 1650 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1651 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1652 CGF.EnsureInsertPoint(); 1653 }; 1654 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart()); 1655 } 1656 1657 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 1658 QualType SrcType, QualType DestType) { 1659 assert(CGF.hasScalarEvaluationKind(DestType) && 1660 "DestType must have scalar evaluation kind."); 1661 assert(!Val.isAggregate() && "Must be a scalar or complex."); 1662 return Val.isScalar() 1663 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType) 1664 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 1665 DestType); 1666 } 1667 1668 static CodeGenFunction::ComplexPairTy 1669 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 1670 QualType DestType) { 1671 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 1672 "DestType must have complex evaluation kind."); 1673 CodeGenFunction::ComplexPairTy ComplexVal; 1674 if (Val.isScalar()) { 1675 // Convert the input element to the element type of the complex. 1676 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1677 auto ScalarVal = 1678 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType); 1679 ComplexVal = CodeGenFunction::ComplexPairTy( 1680 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 1681 } else { 1682 assert(Val.isComplex() && "Must be a scalar or complex."); 1683 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 1684 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1685 ComplexVal.first = CGF.EmitScalarConversion( 1686 Val.getComplexVal().first, SrcElementType, DestElementType); 1687 ComplexVal.second = CGF.EmitScalarConversion( 1688 Val.getComplexVal().second, SrcElementType, DestElementType); 1689 } 1690 return ComplexVal; 1691 } 1692 1693 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 1694 LValue LVal, RValue RVal) { 1695 if (LVal.isGlobalReg()) { 1696 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 1697 } else { 1698 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent 1699 : llvm::Monotonic, 1700 LVal.isVolatile(), /*IsInit=*/false); 1701 } 1702 } 1703 1704 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, 1705 QualType RValTy) { 1706 switch (CGF.getEvaluationKind(LVal.getType())) { 1707 case TEK_Scalar: 1708 CGF.EmitStoreThroughLValue( 1709 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())), 1710 LVal); 1711 break; 1712 case TEK_Complex: 1713 CGF.EmitStoreOfComplex( 1714 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal, 1715 /*isInit=*/false); 1716 break; 1717 case TEK_Aggregate: 1718 llvm_unreachable("Must be a scalar or complex."); 1719 } 1720 } 1721 1722 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 1723 const Expr *X, const Expr *V, 1724 SourceLocation Loc) { 1725 // v = x; 1726 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 1727 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 1728 LValue XLValue = CGF.EmitLValue(X); 1729 LValue VLValue = CGF.EmitLValue(V); 1730 RValue Res = XLValue.isGlobalReg() 1731 ? CGF.EmitLoadOfLValue(XLValue, Loc) 1732 : CGF.EmitAtomicLoad(XLValue, Loc, 1733 IsSeqCst ? llvm::SequentiallyConsistent 1734 : llvm::Monotonic, 1735 XLValue.isVolatile()); 1736 // OpenMP, 2.12.6, atomic Construct 1737 // Any atomic construct with a seq_cst clause forces the atomically 1738 // performed operation to include an implicit flush operation without a 1739 // list. 1740 if (IsSeqCst) 1741 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1742 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType()); 1743 } 1744 1745 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 1746 const Expr *X, const Expr *E, 1747 SourceLocation Loc) { 1748 // x = expr; 1749 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 1750 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 1751 // OpenMP, 2.12.6, atomic Construct 1752 // Any atomic construct with a seq_cst clause forces the atomically 1753 // performed operation to include an implicit flush operation without a 1754 // list. 1755 if (IsSeqCst) 1756 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1757 } 1758 1759 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 1760 RValue Update, 1761 BinaryOperatorKind BO, 1762 llvm::AtomicOrdering AO, 1763 bool IsXLHSInRHSPart) { 1764 auto &Context = CGF.CGM.getContext(); 1765 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 1766 // expression is simple and atomic is allowed for the given type for the 1767 // target platform. 1768 if (BO == BO_Comma || !Update.isScalar() || 1769 !Update.getScalarVal()->getType()->isIntegerTy() || 1770 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 1771 (Update.getScalarVal()->getType() != 1772 X.getAddress()->getType()->getPointerElementType())) || 1773 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() || 1774 !Context.getTargetInfo().hasBuiltinAtomic( 1775 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 1776 return std::make_pair(false, RValue::get(nullptr)); 1777 1778 llvm::AtomicRMWInst::BinOp RMWOp; 1779 switch (BO) { 1780 case BO_Add: 1781 RMWOp = llvm::AtomicRMWInst::Add; 1782 break; 1783 case BO_Sub: 1784 if (!IsXLHSInRHSPart) 1785 return std::make_pair(false, RValue::get(nullptr)); 1786 RMWOp = llvm::AtomicRMWInst::Sub; 1787 break; 1788 case BO_And: 1789 RMWOp = llvm::AtomicRMWInst::And; 1790 break; 1791 case BO_Or: 1792 RMWOp = llvm::AtomicRMWInst::Or; 1793 break; 1794 case BO_Xor: 1795 RMWOp = llvm::AtomicRMWInst::Xor; 1796 break; 1797 case BO_LT: 1798 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1799 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 1800 : llvm::AtomicRMWInst::Max) 1801 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 1802 : llvm::AtomicRMWInst::UMax); 1803 break; 1804 case BO_GT: 1805 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1806 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 1807 : llvm::AtomicRMWInst::Min) 1808 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 1809 : llvm::AtomicRMWInst::UMin); 1810 break; 1811 case BO_Assign: 1812 RMWOp = llvm::AtomicRMWInst::Xchg; 1813 break; 1814 case BO_Mul: 1815 case BO_Div: 1816 case BO_Rem: 1817 case BO_Shl: 1818 case BO_Shr: 1819 case BO_LAnd: 1820 case BO_LOr: 1821 return std::make_pair(false, RValue::get(nullptr)); 1822 case BO_PtrMemD: 1823 case BO_PtrMemI: 1824 case BO_LE: 1825 case BO_GE: 1826 case BO_EQ: 1827 case BO_NE: 1828 case BO_AddAssign: 1829 case BO_SubAssign: 1830 case BO_AndAssign: 1831 case BO_OrAssign: 1832 case BO_XorAssign: 1833 case BO_MulAssign: 1834 case BO_DivAssign: 1835 case BO_RemAssign: 1836 case BO_ShlAssign: 1837 case BO_ShrAssign: 1838 case BO_Comma: 1839 llvm_unreachable("Unsupported atomic update operation"); 1840 } 1841 auto *UpdateVal = Update.getScalarVal(); 1842 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 1843 UpdateVal = CGF.Builder.CreateIntCast( 1844 IC, X.getAddress()->getType()->getPointerElementType(), 1845 X.getType()->hasSignedIntegerRepresentation()); 1846 } 1847 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO); 1848 return std::make_pair(true, RValue::get(Res)); 1849 } 1850 1851 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 1852 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 1853 llvm::AtomicOrdering AO, SourceLocation Loc, 1854 const llvm::function_ref<RValue(RValue)> &CommonGen) { 1855 // Update expressions are allowed to have the following forms: 1856 // x binop= expr; -> xrval + expr; 1857 // x++, ++x -> xrval + 1; 1858 // x--, --x -> xrval - 1; 1859 // x = x binop expr; -> xrval binop expr 1860 // x = expr Op x; - > expr binop xrval; 1861 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 1862 if (!Res.first) { 1863 if (X.isGlobalReg()) { 1864 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 1865 // 'xrval'. 1866 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 1867 } else { 1868 // Perform compare-and-swap procedure. 1869 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 1870 } 1871 } 1872 return Res; 1873 } 1874 1875 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 1876 const Expr *X, const Expr *E, 1877 const Expr *UE, bool IsXLHSInRHSPart, 1878 SourceLocation Loc) { 1879 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1880 "Update expr in 'atomic update' must be a binary operator."); 1881 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1882 // Update expressions are allowed to have the following forms: 1883 // x binop= expr; -> xrval + expr; 1884 // x++, ++x -> xrval + 1; 1885 // x--, --x -> xrval - 1; 1886 // x = x binop expr; -> xrval binop expr 1887 // x = expr Op x; - > expr binop xrval; 1888 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 1889 LValue XLValue = CGF.EmitLValue(X); 1890 RValue ExprRValue = CGF.EmitAnyExpr(E); 1891 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1892 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1893 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1894 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1895 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1896 auto Gen = 1897 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 1898 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1899 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1900 return CGF.EmitAnyExpr(UE); 1901 }; 1902 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 1903 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1904 // OpenMP, 2.12.6, atomic Construct 1905 // Any atomic construct with a seq_cst clause forces the atomically 1906 // performed operation to include an implicit flush operation without a 1907 // list. 1908 if (IsSeqCst) 1909 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1910 } 1911 1912 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 1913 QualType SourceType, QualType ResType) { 1914 switch (CGF.getEvaluationKind(ResType)) { 1915 case TEK_Scalar: 1916 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType)); 1917 case TEK_Complex: { 1918 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType); 1919 return RValue::getComplex(Res.first, Res.second); 1920 } 1921 case TEK_Aggregate: 1922 break; 1923 } 1924 llvm_unreachable("Must be a scalar or complex."); 1925 } 1926 1927 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 1928 bool IsPostfixUpdate, const Expr *V, 1929 const Expr *X, const Expr *E, 1930 const Expr *UE, bool IsXLHSInRHSPart, 1931 SourceLocation Loc) { 1932 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 1933 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 1934 RValue NewVVal; 1935 LValue VLValue = CGF.EmitLValue(V); 1936 LValue XLValue = CGF.EmitLValue(X); 1937 RValue ExprRValue = CGF.EmitAnyExpr(E); 1938 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1939 QualType NewVValType; 1940 if (UE) { 1941 // 'x' is updated with some additional value. 1942 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1943 "Update expr in 'atomic capture' must be a binary operator."); 1944 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1945 // Update expressions are allowed to have the following forms: 1946 // x binop= expr; -> xrval + expr; 1947 // x++, ++x -> xrval + 1; 1948 // x--, --x -> xrval - 1; 1949 // x = x binop expr; -> xrval binop expr 1950 // x = expr Op x; - > expr binop xrval; 1951 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1952 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1953 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1954 NewVValType = XRValExpr->getType(); 1955 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1956 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 1957 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { 1958 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1959 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1960 RValue Res = CGF.EmitAnyExpr(UE); 1961 NewVVal = IsPostfixUpdate ? XRValue : Res; 1962 return Res; 1963 }; 1964 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 1965 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1966 if (Res.first) { 1967 // 'atomicrmw' instruction was generated. 1968 if (IsPostfixUpdate) { 1969 // Use old value from 'atomicrmw'. 1970 NewVVal = Res.second; 1971 } else { 1972 // 'atomicrmw' does not provide new value, so evaluate it using old 1973 // value of 'x'. 1974 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1975 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 1976 NewVVal = CGF.EmitAnyExpr(UE); 1977 } 1978 } 1979 } else { 1980 // 'x' is simply rewritten with some 'expr'. 1981 NewVValType = X->getType().getNonReferenceType(); 1982 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 1983 X->getType().getNonReferenceType()); 1984 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { 1985 NewVVal = XRValue; 1986 return ExprRValue; 1987 }; 1988 // Try to perform atomicrmw xchg, otherwise simple exchange. 1989 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 1990 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 1991 Loc, Gen); 1992 if (Res.first) { 1993 // 'atomicrmw' instruction was generated. 1994 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 1995 } 1996 } 1997 // Emit post-update store to 'v' of old/new 'x' value. 1998 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType); 1999 // OpenMP, 2.12.6, atomic Construct 2000 // Any atomic construct with a seq_cst clause forces the atomically 2001 // performed operation to include an implicit flush operation without a 2002 // list. 2003 if (IsSeqCst) 2004 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 2005 } 2006 2007 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 2008 bool IsSeqCst, bool IsPostfixUpdate, 2009 const Expr *X, const Expr *V, const Expr *E, 2010 const Expr *UE, bool IsXLHSInRHSPart, 2011 SourceLocation Loc) { 2012 switch (Kind) { 2013 case OMPC_read: 2014 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 2015 break; 2016 case OMPC_write: 2017 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 2018 break; 2019 case OMPC_unknown: 2020 case OMPC_update: 2021 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 2022 break; 2023 case OMPC_capture: 2024 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 2025 IsXLHSInRHSPart, Loc); 2026 break; 2027 case OMPC_if: 2028 case OMPC_final: 2029 case OMPC_num_threads: 2030 case OMPC_private: 2031 case OMPC_firstprivate: 2032 case OMPC_lastprivate: 2033 case OMPC_reduction: 2034 case OMPC_safelen: 2035 case OMPC_collapse: 2036 case OMPC_default: 2037 case OMPC_seq_cst: 2038 case OMPC_shared: 2039 case OMPC_linear: 2040 case OMPC_aligned: 2041 case OMPC_copyin: 2042 case OMPC_copyprivate: 2043 case OMPC_flush: 2044 case OMPC_proc_bind: 2045 case OMPC_schedule: 2046 case OMPC_ordered: 2047 case OMPC_nowait: 2048 case OMPC_untied: 2049 case OMPC_threadprivate: 2050 case OMPC_depend: 2051 case OMPC_mergeable: 2052 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 2053 } 2054 } 2055 2056 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 2057 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst); 2058 OpenMPClauseKind Kind = OMPC_unknown; 2059 for (auto *C : S.clauses()) { 2060 // Find first clause (skip seq_cst clause, if it is first). 2061 if (C->getClauseKind() != OMPC_seq_cst) { 2062 Kind = C->getClauseKind(); 2063 break; 2064 } 2065 } 2066 2067 const auto *CS = 2068 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 2069 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 2070 enterFullExpression(EWC); 2071 } 2072 // Processing for statements under 'atomic capture'. 2073 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 2074 for (const auto *C : Compound->body()) { 2075 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 2076 enterFullExpression(EWC); 2077 } 2078 } 2079 } 2080 2081 LexicalScope Scope(*this, S.getSourceRange()); 2082 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) { 2083 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 2084 S.getV(), S.getExpr(), S.getUpdateExpr(), 2085 S.isXLHSInRHSPart(), S.getLocStart()); 2086 }; 2087 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen); 2088 } 2089 2090 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { 2091 llvm_unreachable("CodeGen for 'omp target' is not supported yet."); 2092 } 2093 2094 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { 2095 llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); 2096 } 2097 2098 void CodeGenFunction::EmitOMPCancellationPointDirective( 2099 const OMPCancellationPointDirective &S) { 2100 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(), 2101 S.getCancelRegion()); 2102 } 2103 2104 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) { 2105 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), 2106 S.getCancelRegion()); 2107 } 2108 2109 CodeGenFunction::JumpDest 2110 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) { 2111 if (Kind == OMPD_parallel || Kind == OMPD_task) 2112 return ReturnBlock; 2113 else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections) 2114 return BreakContinueStack.empty() ? JumpDest() 2115 : BreakContinueStack.back().BreakBlock; 2116 return JumpDest(); 2117 } 2118