1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit OpenMP nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGOpenMPRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Stmt.h" 19 #include "clang/AST/StmtOpenMP.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 //===----------------------------------------------------------------------===// 24 // OpenMP Directive Emission 25 //===----------------------------------------------------------------------===// 26 void CodeGenFunction::EmitOMPAggregateAssign( 27 llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType, 28 const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) { 29 // Perform element-by-element initialization. 30 QualType ElementTy; 31 auto SrcBegin = SrcAddr; 32 auto DestBegin = DestAddr; 33 auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); 34 auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin); 35 // Cast from pointer to array type to pointer to single element. 36 SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin, 37 DestBegin->getType()); 38 auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); 39 // The basic structure here is a while-do loop. 40 auto BodyBB = createBasicBlock("omp.arraycpy.body"); 41 auto DoneBB = createBasicBlock("omp.arraycpy.done"); 42 auto IsEmpty = 43 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); 44 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 45 46 // Enter the loop body, making that address the current address. 47 auto EntryBB = Builder.GetInsertBlock(); 48 EmitBlock(BodyBB); 49 auto SrcElementCurrent = 50 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 51 SrcElementCurrent->addIncoming(SrcBegin, EntryBB); 52 auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2, 53 "omp.arraycpy.destElementPast"); 54 DestElementCurrent->addIncoming(DestBegin, EntryBB); 55 56 // Emit copy. 57 CopyGen(DestElementCurrent, SrcElementCurrent); 58 59 // Shift the address forward by one element. 60 auto DestElementNext = Builder.CreateConstGEP1_32( 61 DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element"); 62 auto SrcElementNext = Builder.CreateConstGEP1_32( 63 SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element"); 64 // Check whether we've reached the end. 65 auto Done = 66 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 67 Builder.CreateCondBr(Done, DoneBB, BodyBB); 68 DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock()); 69 SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock()); 70 71 // Done. 72 EmitBlock(DoneBB, /*IsFinished=*/true); 73 } 74 75 void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF, 76 QualType OriginalType, llvm::Value *DestAddr, 77 llvm::Value *SrcAddr, const VarDecl *DestVD, 78 const VarDecl *SrcVD, const Expr *Copy) { 79 if (OriginalType->isArrayType()) { 80 auto *BO = dyn_cast<BinaryOperator>(Copy); 81 if (BO && BO->getOpcode() == BO_Assign) { 82 // Perform simple memcpy for simple copying. 83 CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); 84 } else { 85 // For arrays with complex element types perform element by element 86 // copying. 87 CGF.EmitOMPAggregateAssign( 88 DestAddr, SrcAddr, OriginalType, 89 [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement, 90 llvm::Value *SrcElement) { 91 // Working with the single array element, so have to remap 92 // destination and source variables to corresponding array 93 // elements. 94 CodeGenFunction::OMPPrivateScope Remap(CGF); 95 Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{ 96 return DestElement; 97 }); 98 Remap.addPrivate( 99 SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; }); 100 (void)Remap.Privatize(); 101 CGF.EmitIgnoredExpr(Copy); 102 }); 103 } 104 } else { 105 // Remap pseudo source variable to private copy. 106 CodeGenFunction::OMPPrivateScope Remap(CGF); 107 Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; }); 108 Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; }); 109 (void)Remap.Privatize(); 110 // Emit copying of the whole variable. 111 CGF.EmitIgnoredExpr(Copy); 112 } 113 } 114 115 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, 116 OMPPrivateScope &PrivateScope) { 117 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; 118 for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) { 119 auto *C = cast<OMPFirstprivateClause>(*I); 120 auto IRef = C->varlist_begin(); 121 auto InitsRef = C->inits().begin(); 122 for (auto IInit : C->private_copies()) { 123 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 124 if (EmittedAsFirstprivate.count(OrigVD) == 0) { 125 EmittedAsFirstprivate.insert(OrigVD); 126 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 127 auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); 128 bool IsRegistered; 129 DeclRefExpr DRE( 130 const_cast<VarDecl *>(OrigVD), 131 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 132 OrigVD) != nullptr, 133 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 134 auto *OriginalAddr = EmitLValue(&DRE).getAddress(); 135 QualType Type = OrigVD->getType(); 136 if (Type->isArrayType()) { 137 // Emit VarDecl with copy init for arrays. 138 // Get the address of the original variable captured in current 139 // captured region. 140 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 141 auto Emission = EmitAutoVarAlloca(*VD); 142 auto *Init = VD->getInit(); 143 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { 144 // Perform simple memcpy. 145 EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, 146 Type); 147 } else { 148 EmitOMPAggregateAssign( 149 Emission.getAllocatedAddress(), OriginalAddr, Type, 150 [this, VDInit, Init](llvm::Value *DestElement, 151 llvm::Value *SrcElement) { 152 // Clean up any temporaries needed by the initialization. 153 RunCleanupsScope InitScope(*this); 154 // Emit initialization for single element. 155 LocalDeclMap[VDInit] = SrcElement; 156 EmitAnyExprToMem(Init, DestElement, 157 Init->getType().getQualifiers(), 158 /*IsInitializer*/ false); 159 LocalDeclMap.erase(VDInit); 160 }); 161 } 162 EmitAutoVarCleanups(Emission); 163 return Emission.getAllocatedAddress(); 164 }); 165 } else { 166 IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 167 // Emit private VarDecl with copy init. 168 // Remap temp VDInit variable to the address of the original 169 // variable 170 // (for proper handling of captured global variables). 171 LocalDeclMap[VDInit] = OriginalAddr; 172 EmitDecl(*VD); 173 LocalDeclMap.erase(VDInit); 174 return GetAddrOfLocalVar(VD); 175 }); 176 } 177 assert(IsRegistered && 178 "firstprivate var already registered as private"); 179 // Silence the warning about unused variable. 180 (void)IsRegistered; 181 } 182 ++IRef, ++InitsRef; 183 } 184 } 185 return !EmittedAsFirstprivate.empty(); 186 } 187 188 void CodeGenFunction::EmitOMPPrivateClause( 189 const OMPExecutableDirective &D, 190 CodeGenFunction::OMPPrivateScope &PrivateScope) { 191 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 192 for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) { 193 auto *C = cast<OMPPrivateClause>(*I); 194 auto IRef = C->varlist_begin(); 195 for (auto IInit : C->private_copies()) { 196 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 197 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 198 auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 199 bool IsRegistered = 200 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 201 // Emit private VarDecl with copy init. 202 EmitDecl(*VD); 203 return GetAddrOfLocalVar(VD); 204 }); 205 assert(IsRegistered && "private var already registered as private"); 206 // Silence the warning about unused variable. 207 (void)IsRegistered; 208 } 209 ++IRef; 210 } 211 } 212 } 213 214 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { 215 // threadprivate_var1 = master_threadprivate_var1; 216 // operator=(threadprivate_var2, master_threadprivate_var2); 217 // ... 218 // __kmpc_barrier(&loc, global_tid); 219 llvm::DenseSet<const VarDecl *> CopiedVars; 220 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; 221 for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) { 222 auto *C = cast<OMPCopyinClause>(*I); 223 auto IRef = C->varlist_begin(); 224 auto ISrcRef = C->source_exprs().begin(); 225 auto IDestRef = C->destination_exprs().begin(); 226 for (auto *AssignOp : C->assignment_ops()) { 227 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 228 QualType Type = VD->getType(); 229 if (CopiedVars.insert(VD->getCanonicalDecl()).second) { 230 // Get the address of the master variable. 231 auto *MasterAddr = VD->isStaticLocal() 232 ? CGM.getStaticLocalDeclAddress(VD) 233 : CGM.GetAddrOfGlobal(VD); 234 // Get the address of the threadprivate variable. 235 auto *PrivateAddr = EmitLValue(*IRef).getAddress(); 236 if (CopiedVars.size() == 1) { 237 // At first check if current thread is a master thread. If it is, no 238 // need to copy data. 239 CopyBegin = createBasicBlock("copyin.not.master"); 240 CopyEnd = createBasicBlock("copyin.not.master.end"); 241 Builder.CreateCondBr( 242 Builder.CreateICmpNE( 243 Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy), 244 Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)), 245 CopyBegin, CopyEnd); 246 EmitBlock(CopyBegin); 247 } 248 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 249 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 250 EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD, 251 AssignOp); 252 } 253 ++IRef; 254 ++ISrcRef; 255 ++IDestRef; 256 } 257 } 258 if (CopyEnd) { 259 // Exit out of copying procedure for non-master thread. 260 EmitBlock(CopyEnd, /*IsFinished=*/true); 261 return true; 262 } 263 return false; 264 } 265 266 bool CodeGenFunction::EmitOMPLastprivateClauseInit( 267 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { 268 bool HasAtLeastOneLastprivate = false; 269 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 270 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 271 HasAtLeastOneLastprivate = true; 272 auto *C = cast<OMPLastprivateClause>(*I); 273 auto IRef = C->varlist_begin(); 274 auto IDestRef = C->destination_exprs().begin(); 275 for (auto *IInit : C->private_copies()) { 276 // Keep the address of the original variable for future update at the end 277 // of the loop. 278 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 279 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { 280 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 281 PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{ 282 DeclRefExpr DRE( 283 const_cast<VarDecl *>(OrigVD), 284 /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( 285 OrigVD) != nullptr, 286 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); 287 return EmitLValue(&DRE).getAddress(); 288 }); 289 // Check if the variable is also a firstprivate: in this case IInit is 290 // not generated. Initialization of this variable will happen in codegen 291 // for 'firstprivate' clause. 292 if (IInit) { 293 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); 294 bool IsRegistered = 295 PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ 296 // Emit private VarDecl with copy init. 297 EmitDecl(*VD); 298 return GetAddrOfLocalVar(VD); 299 }); 300 assert(IsRegistered && 301 "lastprivate var already registered as private"); 302 (void)IsRegistered; 303 } 304 } 305 ++IRef, ++IDestRef; 306 } 307 } 308 return HasAtLeastOneLastprivate; 309 } 310 311 void CodeGenFunction::EmitOMPLastprivateClauseFinal( 312 const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { 313 // Emit following code: 314 // if (<IsLastIterCond>) { 315 // orig_var1 = private_orig_var1; 316 // ... 317 // orig_varn = private_orig_varn; 318 // } 319 auto *ThenBB = createBasicBlock(".omp.lastprivate.then"); 320 auto *DoneBB = createBasicBlock(".omp.lastprivate.done"); 321 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); 322 EmitBlock(ThenBB); 323 llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; 324 const Expr *LastIterVal = nullptr; 325 const Expr *IVExpr = nullptr; 326 const Expr *IncExpr = nullptr; 327 if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { 328 LastIterVal = 329 cast<VarDecl>(cast<DeclRefExpr>(LoopDirective->getUpperBoundVariable()) 330 ->getDecl()) 331 ->getAnyInitializer(); 332 IVExpr = LoopDirective->getIterationVariable(); 333 IncExpr = LoopDirective->getInc(); 334 auto IUpdate = LoopDirective->updates().begin(); 335 for (auto *E : LoopDirective->counters()) { 336 auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 337 LoopCountersAndUpdates[D] = *IUpdate; 338 ++IUpdate; 339 } 340 } 341 { 342 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; 343 bool FirstLCV = true; 344 for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { 345 auto *C = cast<OMPLastprivateClause>(*I); 346 auto IRef = C->varlist_begin(); 347 auto ISrcRef = C->source_exprs().begin(); 348 auto IDestRef = C->destination_exprs().begin(); 349 for (auto *AssignOp : C->assignment_ops()) { 350 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 351 QualType Type = PrivateVD->getType(); 352 auto *CanonicalVD = PrivateVD->getCanonicalDecl(); 353 if (AlreadyEmittedVars.insert(CanonicalVD).second) { 354 // If lastprivate variable is a loop control variable for loop-based 355 // directive, update its value before copyin back to original 356 // variable. 357 if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { 358 if (FirstLCV) { 359 EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), 360 IVExpr->getType().getQualifiers(), 361 /*IsInitializer=*/false); 362 EmitIgnoredExpr(IncExpr); 363 FirstLCV = false; 364 } 365 EmitIgnoredExpr(UpExpr); 366 } 367 auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); 368 auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); 369 // Get the address of the original variable. 370 auto *OriginalAddr = GetAddrOfLocalVar(DestVD); 371 // Get the address of the private variable. 372 auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD); 373 EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, 374 AssignOp); 375 } 376 ++IRef; 377 ++ISrcRef; 378 ++IDestRef; 379 } 380 } 381 } 382 EmitBlock(DoneBB, /*IsFinished=*/true); 383 } 384 385 void CodeGenFunction::EmitOMPReductionClauseInit( 386 const OMPExecutableDirective &D, 387 CodeGenFunction::OMPPrivateScope &PrivateScope) { 388 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 389 auto *C = cast<OMPReductionClause>(*I); 390 auto ILHS = C->lhs_exprs().begin(); 391 auto IRHS = C->rhs_exprs().begin(); 392 for (auto IRef : C->varlists()) { 393 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); 394 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 395 auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 396 // Store the address of the original variable associated with the LHS 397 // implicit variable. 398 PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{ 399 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 400 CapturedStmtInfo->lookup(OrigVD) != nullptr, 401 IRef->getType(), VK_LValue, IRef->getExprLoc()); 402 return EmitLValue(&DRE).getAddress(); 403 }); 404 // Emit reduction copy. 405 bool IsRegistered = 406 PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{ 407 // Emit private VarDecl with reduction init. 408 EmitDecl(*PrivateVD); 409 return GetAddrOfLocalVar(PrivateVD); 410 }); 411 assert(IsRegistered && "private var already registered as private"); 412 // Silence the warning about unused variable. 413 (void)IsRegistered; 414 ++ILHS, ++IRHS; 415 } 416 } 417 } 418 419 void CodeGenFunction::EmitOMPReductionClauseFinal( 420 const OMPExecutableDirective &D) { 421 llvm::SmallVector<const Expr *, 8> LHSExprs; 422 llvm::SmallVector<const Expr *, 8> RHSExprs; 423 llvm::SmallVector<const Expr *, 8> ReductionOps; 424 bool HasAtLeastOneReduction = false; 425 for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { 426 HasAtLeastOneReduction = true; 427 auto *C = cast<OMPReductionClause>(*I); 428 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); 429 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); 430 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); 431 } 432 if (HasAtLeastOneReduction) { 433 // Emit nowait reduction if nowait clause is present or directive is a 434 // parallel directive (it always has implicit barrier). 435 CGM.getOpenMPRuntime().emitReduction( 436 *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps, 437 D.getSingleClause(OMPC_nowait) || 438 isOpenMPParallelDirective(D.getDirectiveKind())); 439 } 440 } 441 442 static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, 443 const OMPExecutableDirective &S, 444 const RegionCodeGenTy &CodeGen) { 445 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 446 auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS); 447 auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( 448 S, *CS->getCapturedDecl()->param_begin(), CodeGen); 449 if (auto C = S.getSingleClause(OMPC_num_threads)) { 450 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 451 auto NumThreadsClause = cast<OMPNumThreadsClause>(C); 452 auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 453 /*IgnoreResultAssign*/ true); 454 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( 455 CGF, NumThreads, NumThreadsClause->getLocStart()); 456 } 457 const Expr *IfCond = nullptr; 458 if (auto C = S.getSingleClause(OMPC_if)) { 459 IfCond = cast<OMPIfClause>(C)->getCondition(); 460 } 461 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, 462 CapturedStruct, IfCond); 463 } 464 465 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { 466 LexicalScope Scope(*this, S.getSourceRange()); 467 // Emit parallel region as a standalone region. 468 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 469 OMPPrivateScope PrivateScope(CGF); 470 bool Copyins = CGF.EmitOMPCopyinClause(S); 471 bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); 472 if (Copyins || Firstprivates) { 473 // Emit implicit barrier to synchronize threads and avoid data races on 474 // initialization of firstprivate variables or propagation master's thread 475 // values of threadprivate variables to local instances of that variables 476 // of all other implicit threads. 477 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 478 OMPD_unknown); 479 } 480 CGF.EmitOMPPrivateClause(S, PrivateScope); 481 CGF.EmitOMPReductionClauseInit(S, PrivateScope); 482 (void)PrivateScope.Privatize(); 483 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 484 CGF.EmitOMPReductionClauseFinal(S); 485 // Emit implicit barrier at the end of the 'parallel' directive. 486 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 487 OMPD_unknown); 488 }; 489 emitCommonOMPParallelDirective(*this, S, CodeGen); 490 } 491 492 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S, 493 bool SeparateIter) { 494 RunCleanupsScope BodyScope(*this); 495 // Update counters values on current iteration. 496 for (auto I : S.updates()) { 497 EmitIgnoredExpr(I); 498 } 499 // Update the linear variables. 500 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { 501 auto *C = cast<OMPLinearClause>(*I); 502 for (auto U : C->updates()) { 503 EmitIgnoredExpr(U); 504 } 505 } 506 507 // On a continue in the body, jump to the end. 508 auto Continue = getJumpDestInCurrentScope("omp.body.continue"); 509 BreakContinueStack.push_back(BreakContinue(JumpDest(), Continue)); 510 // Emit loop body. 511 EmitStmt(S.getBody()); 512 // The end (updates/cleanups). 513 EmitBlock(Continue.getBlock()); 514 BreakContinueStack.pop_back(); 515 if (SeparateIter) { 516 // TODO: Update lastprivates if the SeparateIter flag is true. 517 // This will be implemented in a follow-up OMPLastprivateClause patch, but 518 // result should be still correct without it, as we do not make these 519 // variables private yet. 520 } 521 } 522 523 void CodeGenFunction::EmitOMPInnerLoop( 524 const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, 525 const Expr *IncExpr, 526 const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, 527 const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { 528 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); 529 530 // Start the loop with a block that tests the condition. 531 auto CondBlock = createBasicBlock("omp.inner.for.cond"); 532 EmitBlock(CondBlock); 533 LoopStack.push(CondBlock); 534 535 // If there are any cleanups between here and the loop-exit scope, 536 // create a block to stage a loop exit along. 537 auto ExitBlock = LoopExit.getBlock(); 538 if (RequiresCleanup) 539 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); 540 541 auto LoopBody = createBasicBlock("omp.inner.for.body"); 542 543 // Emit condition. 544 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); 545 if (ExitBlock != LoopExit.getBlock()) { 546 EmitBlock(ExitBlock); 547 EmitBranchThroughCleanup(LoopExit); 548 } 549 550 EmitBlock(LoopBody); 551 incrementProfileCounter(&S); 552 553 // Create a block for the increment. 554 auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); 555 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 556 557 BodyGen(*this); 558 559 // Emit "IV = IV + 1" and a back-edge to the condition block. 560 EmitBlock(Continue.getBlock()); 561 EmitIgnoredExpr(IncExpr); 562 PostIncGen(*this); 563 BreakContinueStack.pop_back(); 564 EmitBranch(CondBlock); 565 LoopStack.pop(); 566 // Emit the fall-through block. 567 EmitBlock(LoopExit.getBlock()); 568 } 569 570 void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &S) { 571 auto IC = S.counters().begin(); 572 for (auto F : S.finals()) { 573 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); 574 if (LocalDeclMap.lookup(OrigVD)) { 575 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 576 CapturedStmtInfo->lookup(OrigVD) != nullptr, 577 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 578 auto *OrigAddr = EmitLValue(&DRE).getAddress(); 579 OMPPrivateScope VarScope(*this); 580 VarScope.addPrivate(OrigVD, 581 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 582 (void)VarScope.Privatize(); 583 EmitIgnoredExpr(F); 584 } 585 ++IC; 586 } 587 // Emit the final values of the linear variables. 588 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { 589 auto *C = cast<OMPLinearClause>(*I); 590 auto IC = C->varlist_begin(); 591 for (auto F : C->finals()) { 592 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); 593 DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), 594 CapturedStmtInfo->lookup(OrigVD) != nullptr, 595 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); 596 auto *OrigAddr = EmitLValue(&DRE).getAddress(); 597 OMPPrivateScope VarScope(*this); 598 VarScope.addPrivate(OrigVD, 599 [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); 600 (void)VarScope.Privatize(); 601 EmitIgnoredExpr(F); 602 ++IC; 603 } 604 } 605 } 606 607 static void EmitOMPAlignedClause(CodeGenFunction &CGF, CodeGenModule &CGM, 608 const OMPAlignedClause &Clause) { 609 unsigned ClauseAlignment = 0; 610 if (auto AlignmentExpr = Clause.getAlignment()) { 611 auto AlignmentCI = 612 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr)); 613 ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue()); 614 } 615 for (auto E : Clause.varlists()) { 616 unsigned Alignment = ClauseAlignment; 617 if (Alignment == 0) { 618 // OpenMP [2.8.1, Description] 619 // If no optional parameter is specified, implementation-defined default 620 // alignments for SIMD instructions on the target platforms are assumed. 621 Alignment = CGM.getTargetCodeGenInfo().getOpenMPSimdDefaultAlignment( 622 E->getType()); 623 } 624 assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) && 625 "alignment is not power of 2"); 626 if (Alignment != 0) { 627 llvm::Value *PtrValue = CGF.EmitScalarExpr(E); 628 CGF.EmitAlignmentAssumption(PtrValue, Alignment); 629 } 630 } 631 } 632 633 static void EmitPrivateLoopCounters(CodeGenFunction &CGF, 634 CodeGenFunction::OMPPrivateScope &LoopScope, 635 ArrayRef<Expr *> Counters) { 636 for (auto *E : Counters) { 637 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 638 (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{ 639 // Emit var without initialization. 640 auto VarEmission = CGF.EmitAutoVarAlloca(*VD); 641 CGF.EmitAutoVarCleanups(VarEmission); 642 return VarEmission.getAllocatedAddress(); 643 }); 644 } 645 } 646 647 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, 648 const Expr *Cond, llvm::BasicBlock *TrueBlock, 649 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { 650 CodeGenFunction::OMPPrivateScope PreCondScope(CGF); 651 EmitPrivateLoopCounters(CGF, PreCondScope, S.counters()); 652 const VarDecl *IVDecl = 653 cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl()); 654 bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{ 655 // Emit var without initialization. 656 auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl); 657 CGF.EmitAutoVarCleanups(VarEmission); 658 return VarEmission.getAllocatedAddress(); 659 }); 660 assert(IsRegistered && "counter already registered as private"); 661 // Silence the warning about unused variable. 662 (void)IsRegistered; 663 (void)PreCondScope.Privatize(); 664 // Initialize internal counter to 0 to calculate initial values of real 665 // counters. 666 LValue IV = CGF.EmitLValue(S.getIterationVariable()); 667 CGF.EmitStoreOfScalar( 668 llvm::ConstantInt::getNullValue( 669 IV.getAddress()->getType()->getPointerElementType()), 670 CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true); 671 // Get initial values of real counters. 672 for (auto I : S.updates()) { 673 CGF.EmitIgnoredExpr(I); 674 } 675 // Check that loop is executed at least one time. 676 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); 677 } 678 679 static void 680 EmitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, 681 CodeGenFunction::OMPPrivateScope &PrivateScope) { 682 for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { 683 auto *C = cast<OMPLinearClause>(*I); 684 for (auto *E : C->varlists()) { 685 auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 686 bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * { 687 // Emit var without initialization. 688 auto VarEmission = CGF.EmitAutoVarAlloca(*VD); 689 CGF.EmitAutoVarCleanups(VarEmission); 690 return VarEmission.getAllocatedAddress(); 691 }); 692 assert(IsRegistered && "linear var already registered as private"); 693 // Silence the warning about unused variable. 694 (void)IsRegistered; 695 } 696 } 697 } 698 699 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { 700 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 701 // Pragma 'simd' code depends on presence of 'lastprivate'. 702 // If present, we have to separate last iteration of the loop: 703 // 704 // if (PreCond) { 705 // for (IV in 0..LastIteration-1) BODY; 706 // BODY with updates of lastprivate vars; 707 // <Final counter/linear vars updates>; 708 // } 709 // 710 // otherwise (when there's no lastprivate): 711 // 712 // if (PreCond) { 713 // for (IV in 0..LastIteration) BODY; 714 // <Final counter/linear vars updates>; 715 // } 716 // 717 718 // Emit: if (PreCond) - begin. 719 // If the condition constant folds and can be elided, avoid emitting the 720 // whole loop. 721 bool CondConstant; 722 llvm::BasicBlock *ContBlock = nullptr; 723 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 724 if (!CondConstant) 725 return; 726 } else { 727 auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); 728 ContBlock = CGF.createBasicBlock("simd.if.end"); 729 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, 730 CGF.getProfileCount(&S)); 731 CGF.EmitBlock(ThenBlock); 732 CGF.incrementProfileCounter(&S); 733 } 734 // Walk clauses and process safelen/lastprivate. 735 bool SeparateIter = false; 736 CGF.LoopStack.setParallel(); 737 CGF.LoopStack.setVectorizerEnable(true); 738 for (auto C : S.clauses()) { 739 switch (C->getClauseKind()) { 740 case OMPC_safelen: { 741 RValue Len = CGF.EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(), 742 AggValueSlot::ignored(), true); 743 llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); 744 CGF.LoopStack.setVectorizerWidth(Val->getZExtValue()); 745 // In presence of finite 'safelen', it may be unsafe to mark all 746 // the memory instructions parallel, because loop-carried 747 // dependences of 'safelen' iterations are possible. 748 CGF.LoopStack.setParallel(false); 749 break; 750 } 751 case OMPC_aligned: 752 EmitOMPAlignedClause(CGF, CGF.CGM, cast<OMPAlignedClause>(*C)); 753 break; 754 case OMPC_lastprivate: 755 SeparateIter = true; 756 break; 757 default: 758 // Not handled yet 759 ; 760 } 761 } 762 763 // Emit inits for the linear variables. 764 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { 765 auto *C = cast<OMPLinearClause>(*I); 766 for (auto Init : C->inits()) { 767 auto *D = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); 768 CGF.EmitVarDecl(*D); 769 } 770 } 771 772 // Emit the loop iteration variable. 773 const Expr *IVExpr = S.getIterationVariable(); 774 const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); 775 CGF.EmitVarDecl(*IVDecl); 776 CGF.EmitIgnoredExpr(S.getInit()); 777 778 // Emit the iterations count variable. 779 // If it is not a variable, Sema decided to calculate iterations count on 780 // each iteration (e.g., it is foldable into a constant). 781 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 782 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 783 // Emit calculation of the iterations count. 784 CGF.EmitIgnoredExpr(S.getCalcLastIteration()); 785 } 786 787 // Emit the linear steps for the linear clauses. 788 // If a step is not constant, it is pre-calculated before the loop. 789 for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { 790 auto *C = cast<OMPLinearClause>(*I); 791 if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) 792 if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { 793 CGF.EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); 794 // Emit calculation of the linear step. 795 CGF.EmitIgnoredExpr(CS); 796 } 797 } 798 799 { 800 OMPPrivateScope LoopScope(CGF); 801 EmitPrivateLoopCounters(CGF, LoopScope, S.counters()); 802 EmitPrivateLinearVars(CGF, S, LoopScope); 803 CGF.EmitOMPPrivateClause(S, LoopScope); 804 (void)LoopScope.Privatize(); 805 CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), 806 S.getCond(SeparateIter), S.getInc(), 807 [&S](CodeGenFunction &CGF) { 808 CGF.EmitOMPLoopBody(S); 809 CGF.EmitStopPoint(&S); 810 }, 811 [](CodeGenFunction &) {}); 812 if (SeparateIter) { 813 CGF.EmitOMPLoopBody(S, /*SeparateIter=*/true); 814 } 815 } 816 CGF.EmitOMPSimdFinal(S); 817 // Emit: if (PreCond) - end. 818 if (ContBlock) { 819 CGF.EmitBranch(ContBlock); 820 CGF.EmitBlock(ContBlock, true); 821 } 822 }; 823 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); 824 } 825 826 void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, 827 const OMPLoopDirective &S, 828 OMPPrivateScope &LoopScope, 829 bool Ordered, llvm::Value *LB, 830 llvm::Value *UB, llvm::Value *ST, 831 llvm::Value *IL, llvm::Value *Chunk) { 832 auto &RT = CGM.getOpenMPRuntime(); 833 834 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). 835 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); 836 837 assert((Ordered || 838 !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && 839 "static non-chunked schedule does not need outer loop"); 840 841 // Emit outer loop. 842 // 843 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 844 // When schedule(dynamic,chunk_size) is specified, the iterations are 845 // distributed to threads in the team in chunks as the threads request them. 846 // Each thread executes a chunk of iterations, then requests another chunk, 847 // until no chunks remain to be distributed. Each chunk contains chunk_size 848 // iterations, except for the last chunk to be distributed, which may have 849 // fewer iterations. When no chunk_size is specified, it defaults to 1. 850 // 851 // When schedule(guided,chunk_size) is specified, the iterations are assigned 852 // to threads in the team in chunks as the executing threads request them. 853 // Each thread executes a chunk of iterations, then requests another chunk, 854 // until no chunks remain to be assigned. For a chunk_size of 1, the size of 855 // each chunk is proportional to the number of unassigned iterations divided 856 // by the number of threads in the team, decreasing to 1. For a chunk_size 857 // with value k (greater than 1), the size of each chunk is determined in the 858 // same way, with the restriction that the chunks do not contain fewer than k 859 // iterations (except for the last chunk to be assigned, which may have fewer 860 // than k iterations). 861 // 862 // When schedule(auto) is specified, the decision regarding scheduling is 863 // delegated to the compiler and/or runtime system. The programmer gives the 864 // implementation the freedom to choose any possible mapping of iterations to 865 // threads in the team. 866 // 867 // When schedule(runtime) is specified, the decision regarding scheduling is 868 // deferred until run time, and the schedule and chunk size are taken from the 869 // run-sched-var ICV. If the ICV is set to auto, the schedule is 870 // implementation defined 871 // 872 // while(__kmpc_dispatch_next(&LB, &UB)) { 873 // idx = LB; 874 // while (idx <= UB) { BODY; ++idx; 875 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. 876 // } // inner loop 877 // } 878 // 879 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 880 // When schedule(static, chunk_size) is specified, iterations are divided into 881 // chunks of size chunk_size, and the chunks are assigned to the threads in 882 // the team in a round-robin fashion in the order of the thread number. 883 // 884 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { 885 // while (idx <= UB) { BODY; ++idx; } // inner loop 886 // LB = LB + ST; 887 // UB = UB + ST; 888 // } 889 // 890 891 const Expr *IVExpr = S.getIterationVariable(); 892 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 893 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 894 895 RT.emitForInit( 896 *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB, 897 (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal() 898 : UB), 899 ST, Chunk); 900 901 auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); 902 903 // Start the loop with a block that tests the condition. 904 auto CondBlock = createBasicBlock("omp.dispatch.cond"); 905 EmitBlock(CondBlock); 906 LoopStack.push(CondBlock); 907 908 llvm::Value *BoolCondVal = nullptr; 909 if (!DynamicOrOrdered) { 910 // UB = min(UB, GlobalUB) 911 EmitIgnoredExpr(S.getEnsureUpperBound()); 912 // IV = LB 913 EmitIgnoredExpr(S.getInit()); 914 // IV < UB 915 BoolCondVal = EvaluateExprAsBool(S.getCond(false)); 916 } else { 917 BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, 918 IL, LB, UB, ST); 919 } 920 921 // If there are any cleanups between here and the loop-exit scope, 922 // create a block to stage a loop exit along. 923 auto ExitBlock = LoopExit.getBlock(); 924 if (LoopScope.requiresCleanups()) 925 ExitBlock = createBasicBlock("omp.dispatch.cleanup"); 926 927 auto LoopBody = createBasicBlock("omp.dispatch.body"); 928 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); 929 if (ExitBlock != LoopExit.getBlock()) { 930 EmitBlock(ExitBlock); 931 EmitBranchThroughCleanup(LoopExit); 932 } 933 EmitBlock(LoopBody); 934 935 // Emit "IV = LB" (in case of static schedule, we have already calculated new 936 // LB for loop condition and emitted it above). 937 if (DynamicOrOrdered) 938 EmitIgnoredExpr(S.getInit()); 939 940 // Create a block for the increment. 941 auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); 942 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); 943 944 SourceLocation Loc = S.getLocStart(); 945 // Generate !llvm.loop.parallel metadata for loads and stores for loops with 946 // dynamic/guided scheduling and without ordered clause. 947 LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic || 948 ScheduleKind == OMPC_SCHEDULE_guided) && 949 !Ordered); 950 EmitOMPInnerLoop( 951 S, LoopScope.requiresCleanups(), S.getCond(/*SeparateIter=*/false), 952 S.getInc(), 953 [&S](CodeGenFunction &CGF) { 954 CGF.EmitOMPLoopBody(S); 955 CGF.EmitStopPoint(&S); 956 }, 957 [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { 958 if (Ordered) { 959 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( 960 CGF, Loc, IVSize, IVSigned); 961 } 962 }); 963 964 EmitBlock(Continue.getBlock()); 965 BreakContinueStack.pop_back(); 966 if (!DynamicOrOrdered) { 967 // Emit "LB = LB + Stride", "UB = UB + Stride". 968 EmitIgnoredExpr(S.getNextLowerBound()); 969 EmitIgnoredExpr(S.getNextUpperBound()); 970 } 971 972 EmitBranch(CondBlock); 973 LoopStack.pop(); 974 // Emit the fall-through block. 975 EmitBlock(LoopExit.getBlock()); 976 977 // Tell the runtime we are done. 978 if (!DynamicOrOrdered) 979 RT.emitForStaticFinish(*this, S.getLocEnd()); 980 } 981 982 /// \brief Emit a helper variable and return corresponding lvalue. 983 static LValue EmitOMPHelperVar(CodeGenFunction &CGF, 984 const DeclRefExpr *Helper) { 985 auto VDecl = cast<VarDecl>(Helper->getDecl()); 986 CGF.EmitVarDecl(*VDecl); 987 return CGF.EmitLValue(Helper); 988 } 989 990 static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind> 991 emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, 992 bool OuterRegion) { 993 // Detect the loop schedule kind and chunk. 994 auto ScheduleKind = OMPC_SCHEDULE_unknown; 995 llvm::Value *Chunk = nullptr; 996 if (auto *C = 997 cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) { 998 ScheduleKind = C->getScheduleKind(); 999 if (const auto *Ch = C->getChunkSize()) { 1000 if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { 1001 if (OuterRegion) { 1002 const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); 1003 CGF.EmitVarDecl(*ImpVar); 1004 CGF.EmitStoreThroughLValue( 1005 CGF.EmitAnyExpr(Ch), 1006 CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), 1007 ImpVar->getType())); 1008 } else { 1009 Ch = ImpRef; 1010 } 1011 } 1012 if (!C->getHelperChunkSize() || !OuterRegion) { 1013 Chunk = CGF.EmitScalarExpr(Ch); 1014 Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), 1015 S.getIterationVariable()->getType()); 1016 } 1017 } 1018 } 1019 return std::make_pair(Chunk, ScheduleKind); 1020 } 1021 1022 bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { 1023 // Emit the loop iteration variable. 1024 auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); 1025 auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); 1026 EmitVarDecl(*IVDecl); 1027 1028 // Emit the iterations count variable. 1029 // If it is not a variable, Sema decided to calculate iterations count on each 1030 // iteration (e.g., it is foldable into a constant). 1031 if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { 1032 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); 1033 // Emit calculation of the iterations count. 1034 EmitIgnoredExpr(S.getCalcLastIteration()); 1035 } 1036 1037 auto &RT = CGM.getOpenMPRuntime(); 1038 1039 bool HasLastprivateClause; 1040 // Check pre-condition. 1041 { 1042 // Skip the entire loop if we don't meet the precondition. 1043 // If the condition constant folds and can be elided, avoid emitting the 1044 // whole loop. 1045 bool CondConstant; 1046 llvm::BasicBlock *ContBlock = nullptr; 1047 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { 1048 if (!CondConstant) 1049 return false; 1050 } else { 1051 auto *ThenBlock = createBasicBlock("omp.precond.then"); 1052 ContBlock = createBasicBlock("omp.precond.end"); 1053 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, 1054 getProfileCount(&S)); 1055 EmitBlock(ThenBlock); 1056 incrementProfileCounter(&S); 1057 } 1058 // Emit 'then' code. 1059 { 1060 // Emit helper vars inits. 1061 LValue LB = 1062 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable())); 1063 LValue UB = 1064 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable())); 1065 LValue ST = 1066 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable())); 1067 LValue IL = 1068 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); 1069 1070 OMPPrivateScope LoopScope(*this); 1071 if (EmitOMPFirstprivateClause(S, LoopScope)) { 1072 // Emit implicit barrier to synchronize threads and avoid data races on 1073 // initialization of firstprivate variables. 1074 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), 1075 OMPD_unknown); 1076 } 1077 EmitOMPPrivateClause(S, LoopScope); 1078 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); 1079 EmitOMPReductionClauseInit(S, LoopScope); 1080 EmitPrivateLoopCounters(*this, LoopScope, S.counters()); 1081 (void)LoopScope.Privatize(); 1082 1083 // Detect the loop schedule kind and chunk. 1084 llvm::Value *Chunk; 1085 OpenMPScheduleClauseKind ScheduleKind; 1086 auto ScheduleInfo = 1087 emitScheduleClause(*this, S, /*OuterRegion=*/false); 1088 Chunk = ScheduleInfo.first; 1089 ScheduleKind = ScheduleInfo.second; 1090 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); 1091 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); 1092 const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr; 1093 if (RT.isStaticNonchunked(ScheduleKind, 1094 /* Chunked */ Chunk != nullptr) && 1095 !Ordered) { 1096 // OpenMP [2.7.1, Loop Construct, Description, table 2-1] 1097 // When no chunk_size is specified, the iteration space is divided into 1098 // chunks that are approximately equal in size, and at most one chunk is 1099 // distributed to each thread. Note that the size of the chunks is 1100 // unspecified in this case. 1101 RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, 1102 Ordered, IL.getAddress(), LB.getAddress(), 1103 UB.getAddress(), ST.getAddress()); 1104 // UB = min(UB, GlobalUB); 1105 EmitIgnoredExpr(S.getEnsureUpperBound()); 1106 // IV = LB; 1107 EmitIgnoredExpr(S.getInit()); 1108 // while (idx <= UB) { BODY; ++idx; } 1109 EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), 1110 S.getCond(/*SeparateIter=*/false), S.getInc(), 1111 [&S](CodeGenFunction &CGF) { 1112 CGF.EmitOMPLoopBody(S); 1113 CGF.EmitStopPoint(&S); 1114 }, 1115 [](CodeGenFunction &) {}); 1116 // Tell the runtime we are done. 1117 RT.emitForStaticFinish(*this, S.getLocStart()); 1118 } else { 1119 // Emit the outer loop, which requests its work chunk [LB..UB] from 1120 // runtime and runs the inner loop to process it. 1121 EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered, 1122 LB.getAddress(), UB.getAddress(), ST.getAddress(), 1123 IL.getAddress(), Chunk); 1124 } 1125 EmitOMPReductionClauseFinal(S); 1126 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1127 if (HasLastprivateClause) 1128 EmitOMPLastprivateClauseFinal( 1129 S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); 1130 } 1131 // We're now done with the loop, so jump to the continuation block. 1132 if (ContBlock) { 1133 EmitBranch(ContBlock); 1134 EmitBlock(ContBlock, true); 1135 } 1136 } 1137 return HasLastprivateClause; 1138 } 1139 1140 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { 1141 LexicalScope Scope(*this, S.getSourceRange()); 1142 bool HasLastprivates = false; 1143 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { 1144 HasLastprivates = CGF.EmitOMPWorksharingLoop(S); 1145 }; 1146 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); 1147 1148 // Emit an implicit barrier at the end. 1149 if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { 1150 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); 1151 } 1152 } 1153 1154 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &) { 1155 llvm_unreachable("CodeGen for 'omp for simd' is not supported yet."); 1156 } 1157 1158 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, 1159 const Twine &Name, 1160 llvm::Value *Init = nullptr) { 1161 auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); 1162 if (Init) 1163 CGF.EmitScalarInit(Init, LVal); 1164 return LVal; 1165 } 1166 1167 static OpenMPDirectiveKind emitSections(CodeGenFunction &CGF, 1168 const OMPExecutableDirective &S) { 1169 auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); 1170 auto *CS = dyn_cast<CompoundStmt>(Stmt); 1171 if (CS && CS->size() > 1) { 1172 bool HasLastprivates = false; 1173 auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { 1174 auto &C = CGF.CGM.getContext(); 1175 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 1176 // Emit helper vars inits. 1177 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", 1178 CGF.Builder.getInt32(0)); 1179 auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); 1180 LValue UB = 1181 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); 1182 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", 1183 CGF.Builder.getInt32(1)); 1184 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", 1185 CGF.Builder.getInt32(0)); 1186 // Loop counter. 1187 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); 1188 OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1189 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); 1190 OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); 1191 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); 1192 // Generate condition for loop. 1193 BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, 1194 OK_Ordinary, S.getLocStart(), 1195 /*fpContractable=*/false); 1196 // Increment for loop counter. 1197 UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, 1198 OK_Ordinary, S.getLocStart()); 1199 auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { 1200 // Iterate through all sections and emit a switch construct: 1201 // switch (IV) { 1202 // case 0: 1203 // <SectionStmt[0]>; 1204 // break; 1205 // ... 1206 // case <NumSection> - 1: 1207 // <SectionStmt[<NumSection> - 1]>; 1208 // break; 1209 // } 1210 // .omp.sections.exit: 1211 auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); 1212 auto *SwitchStmt = CGF.Builder.CreateSwitch( 1213 CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, 1214 CS->size()); 1215 unsigned CaseNumber = 0; 1216 for (auto C = CS->children(); C; ++C, ++CaseNumber) { 1217 auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); 1218 CGF.EmitBlock(CaseBB); 1219 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); 1220 CGF.EmitStmt(*C); 1221 CGF.EmitBranch(ExitBB); 1222 } 1223 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 1224 }; 1225 1226 CodeGenFunction::OMPPrivateScope LoopScope(CGF); 1227 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { 1228 // Emit implicit barrier to synchronize threads and avoid data races on 1229 // initialization of firstprivate variables. 1230 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1231 OMPD_unknown); 1232 } 1233 CGF.EmitOMPPrivateClause(S, LoopScope); 1234 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); 1235 CGF.EmitOMPReductionClauseInit(S, LoopScope); 1236 (void)LoopScope.Privatize(); 1237 1238 // Emit static non-chunked loop. 1239 CGF.CGM.getOpenMPRuntime().emitForInit( 1240 CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, 1241 /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), 1242 LB.getAddress(), UB.getAddress(), ST.getAddress()); 1243 // UB = min(UB, GlobalUB); 1244 auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); 1245 auto *MinUBGlobalUB = CGF.Builder.CreateSelect( 1246 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); 1247 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); 1248 // IV = LB; 1249 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); 1250 // while (idx <= UB) { BODY; ++idx; } 1251 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, 1252 [](CodeGenFunction &) {}); 1253 // Tell the runtime we are done. 1254 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); 1255 CGF.EmitOMPReductionClauseFinal(S); 1256 1257 // Emit final copy of the lastprivate variables if IsLastIter != 0. 1258 if (HasLastprivates) 1259 CGF.EmitOMPLastprivateClauseFinal( 1260 S, CGF.Builder.CreateIsNotNull( 1261 CGF.EmitLoadOfScalar(IL, S.getLocStart()))); 1262 }; 1263 1264 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, CodeGen); 1265 // Emit barrier for lastprivates only if 'sections' directive has 'nowait' 1266 // clause. Otherwise the barrier will be generated by the codegen for the 1267 // directive. 1268 if (HasLastprivates && S.getSingleClause(OMPC_nowait)) { 1269 // Emit implicit barrier to synchronize threads and avoid data races on 1270 // initialization of firstprivate variables. 1271 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1272 OMPD_unknown); 1273 } 1274 return OMPD_sections; 1275 } 1276 // If only one section is found - no need to generate loop, emit as a single 1277 // region. 1278 bool HasFirstprivates; 1279 // No need to generate reductions for sections with single section region, we 1280 // can use original shared variables for all operations. 1281 bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty(); 1282 // No need to generate lastprivates for sections with single section region, 1283 // we can use original shared variable for all calculations with barrier at 1284 // the end of the sections. 1285 bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty(); 1286 auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { 1287 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1288 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1289 CGF.EmitOMPPrivateClause(S, SingleScope); 1290 (void)SingleScope.Privatize(); 1291 1292 CGF.EmitStmt(Stmt); 1293 CGF.EnsureInsertPoint(); 1294 }; 1295 CGF.CGM.getOpenMPRuntime().emitSingleRegion(CGF, CodeGen, S.getLocStart(), 1296 llvm::None, llvm::None, 1297 llvm::None, llvm::None); 1298 // Emit barrier for firstprivates, lastprivates or reductions only if 1299 // 'sections' directive has 'nowait' clause. Otherwise the barrier will be 1300 // generated by the codegen for the directive. 1301 if ((HasFirstprivates || HasLastprivates || HasReductions) && 1302 S.getSingleClause(OMPC_nowait)) { 1303 // Emit implicit barrier to synchronize threads and avoid data races on 1304 // initialization of firstprivate variables. 1305 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1306 OMPD_unknown); 1307 } 1308 return OMPD_single; 1309 } 1310 1311 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { 1312 LexicalScope Scope(*this, S.getSourceRange()); 1313 OpenMPDirectiveKind EmittedAs = emitSections(*this, S); 1314 // Emit an implicit barrier at the end. 1315 if (!S.getSingleClause(OMPC_nowait)) { 1316 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); 1317 } 1318 } 1319 1320 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { 1321 LexicalScope Scope(*this, S.getSourceRange()); 1322 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1323 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1324 CGF.EnsureInsertPoint(); 1325 }; 1326 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); 1327 } 1328 1329 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { 1330 llvm::SmallVector<const Expr *, 8> CopyprivateVars; 1331 llvm::SmallVector<const Expr *, 8> DestExprs; 1332 llvm::SmallVector<const Expr *, 8> SrcExprs; 1333 llvm::SmallVector<const Expr *, 8> AssignmentOps; 1334 // Check if there are any 'copyprivate' clauses associated with this 1335 // 'single' 1336 // construct. 1337 // Build a list of copyprivate variables along with helper expressions 1338 // (<source>, <destination>, <destination>=<source> expressions) 1339 for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) { 1340 auto *C = cast<OMPCopyprivateClause>(*I); 1341 CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); 1342 DestExprs.append(C->destination_exprs().begin(), 1343 C->destination_exprs().end()); 1344 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); 1345 AssignmentOps.append(C->assignment_ops().begin(), 1346 C->assignment_ops().end()); 1347 } 1348 LexicalScope Scope(*this, S.getSourceRange()); 1349 // Emit code for 'single' region along with 'copyprivate' clauses 1350 bool HasFirstprivates; 1351 auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { 1352 CodeGenFunction::OMPPrivateScope SingleScope(CGF); 1353 HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); 1354 CGF.EmitOMPPrivateClause(S, SingleScope); 1355 (void)SingleScope.Privatize(); 1356 1357 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1358 CGF.EnsureInsertPoint(); 1359 }; 1360 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), 1361 CopyprivateVars, DestExprs, SrcExprs, 1362 AssignmentOps); 1363 // Emit an implicit barrier at the end (to avoid data race on firstprivate 1364 // init or if no 'nowait' clause was specified and no 'copyprivate' clause). 1365 if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) && 1366 CopyprivateVars.empty()) { 1367 CGM.getOpenMPRuntime().emitBarrierCall( 1368 *this, S.getLocStart(), 1369 S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single); 1370 } 1371 } 1372 1373 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { 1374 LexicalScope Scope(*this, S.getSourceRange()); 1375 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1376 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1377 CGF.EnsureInsertPoint(); 1378 }; 1379 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); 1380 } 1381 1382 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { 1383 LexicalScope Scope(*this, S.getSourceRange()); 1384 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1385 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1386 CGF.EnsureInsertPoint(); 1387 }; 1388 CGM.getOpenMPRuntime().emitCriticalRegion( 1389 *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart()); 1390 } 1391 1392 void CodeGenFunction::EmitOMPParallelForDirective( 1393 const OMPParallelForDirective &S) { 1394 // Emit directive as a combined directive that consists of two implicit 1395 // directives: 'parallel' with 'for' directive. 1396 LexicalScope Scope(*this, S.getSourceRange()); 1397 (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); 1398 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1399 CGF.EmitOMPWorksharingLoop(S); 1400 // Emit implicit barrier at the end of parallel region, but this barrier 1401 // is at the end of 'for' directive, so emit it as the implicit barrier for 1402 // this 'for' directive. 1403 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1404 OMPD_parallel); 1405 }; 1406 emitCommonOMPParallelDirective(*this, S, CodeGen); 1407 } 1408 1409 void CodeGenFunction::EmitOMPParallelForSimdDirective( 1410 const OMPParallelForSimdDirective &) { 1411 llvm_unreachable("CodeGen for 'omp parallel for simd' is not supported yet."); 1412 } 1413 1414 void CodeGenFunction::EmitOMPParallelSectionsDirective( 1415 const OMPParallelSectionsDirective &S) { 1416 // Emit directive as a combined directive that consists of two implicit 1417 // directives: 'parallel' with 'sections' directive. 1418 LexicalScope Scope(*this, S.getSourceRange()); 1419 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1420 (void)emitSections(CGF, S); 1421 // Emit implicit barrier at the end of parallel region. 1422 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), 1423 OMPD_parallel); 1424 }; 1425 emitCommonOMPParallelDirective(*this, S, CodeGen); 1426 } 1427 1428 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { 1429 // Emit outlined function for task construct. 1430 LexicalScope Scope(*this, S.getSourceRange()); 1431 auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1432 auto CapturedStruct = GenerateCapturedStmtArgument(*CS); 1433 auto *I = CS->getCapturedDecl()->param_begin(); 1434 auto *PartId = std::next(I); 1435 // The first function argument for tasks is a thread id, the second one is a 1436 // part id (0 for tied tasks, >=0 for untied task). 1437 llvm::DenseSet<const VarDecl *> EmittedAsPrivate; 1438 // Get list of private variables. 1439 llvm::SmallVector<const Expr *, 8> PrivateVars; 1440 llvm::SmallVector<const Expr *, 8> PrivateCopies; 1441 for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) { 1442 auto *C = cast<OMPPrivateClause>(*I); 1443 auto IRef = C->varlist_begin(); 1444 for (auto *IInit : C->private_copies()) { 1445 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1446 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1447 PrivateVars.push_back(*IRef); 1448 PrivateCopies.push_back(IInit); 1449 } 1450 ++IRef; 1451 } 1452 } 1453 EmittedAsPrivate.clear(); 1454 // Get list of firstprivate variables. 1455 llvm::SmallVector<const Expr *, 8> FirstprivateVars; 1456 llvm::SmallVector<const Expr *, 8> FirstprivateCopies; 1457 llvm::SmallVector<const Expr *, 8> FirstprivateInits; 1458 for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) { 1459 auto *C = cast<OMPFirstprivateClause>(*I); 1460 auto IRef = C->varlist_begin(); 1461 auto IElemInitRef = C->inits().begin(); 1462 for (auto *IInit : C->private_copies()) { 1463 auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); 1464 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { 1465 FirstprivateVars.push_back(*IRef); 1466 FirstprivateCopies.push_back(IInit); 1467 FirstprivateInits.push_back(*IElemInitRef); 1468 } 1469 ++IRef, ++IElemInitRef; 1470 } 1471 } 1472 auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( 1473 CodeGenFunction &CGF) { 1474 // Set proper addresses for generated private copies. 1475 auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); 1476 OMPPrivateScope Scope(CGF); 1477 if (!PrivateVars.empty() || !FirstprivateVars.empty()) { 1478 auto *CopyFn = CGF.Builder.CreateAlignedLoad( 1479 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)), 1480 CGF.PointerAlignInBytes); 1481 auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad( 1482 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)), 1483 CGF.PointerAlignInBytes); 1484 // Map privates. 1485 llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16> 1486 PrivatePtrs; 1487 llvm::SmallVector<llvm::Value *, 16> CallArgs; 1488 CallArgs.push_back(PrivatesPtr); 1489 for (auto *E : PrivateVars) { 1490 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1491 auto *PrivatePtr = 1492 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1493 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1494 CallArgs.push_back(PrivatePtr); 1495 } 1496 for (auto *E : FirstprivateVars) { 1497 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 1498 auto *PrivatePtr = 1499 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); 1500 PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); 1501 CallArgs.push_back(PrivatePtr); 1502 } 1503 CGF.EmitRuntimeCall(CopyFn, CallArgs); 1504 for (auto &&Pair : PrivatePtrs) { 1505 auto *Replacement = 1506 CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes); 1507 Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); 1508 } 1509 } 1510 (void)Scope.Privatize(); 1511 if (*PartId) { 1512 // TODO: emit code for untied tasks. 1513 } 1514 CGF.EmitStmt(CS->getCapturedStmt()); 1515 }; 1516 auto OutlinedFn = 1517 CGM.getOpenMPRuntime().emitTaskOutlinedFunction(S, *I, CodeGen); 1518 // Check if we should emit tied or untied task. 1519 bool Tied = !S.getSingleClause(OMPC_untied); 1520 // Check if the task is final 1521 llvm::PointerIntPair<llvm::Value *, 1, bool> Final; 1522 if (auto *Clause = S.getSingleClause(OMPC_final)) { 1523 // If the condition constant folds and can be elided, try to avoid emitting 1524 // the condition and the dead arm of the if/else. 1525 auto *Cond = cast<OMPFinalClause>(Clause)->getCondition(); 1526 bool CondConstant; 1527 if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) 1528 Final.setInt(CondConstant); 1529 else 1530 Final.setPointer(EvaluateExprAsBool(Cond)); 1531 } else { 1532 // By default the task is not final. 1533 Final.setInt(/*IntVal=*/false); 1534 } 1535 auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); 1536 const Expr *IfCond = nullptr; 1537 if (auto C = S.getSingleClause(OMPC_if)) { 1538 IfCond = cast<OMPIfClause>(C)->getCondition(); 1539 } 1540 CGM.getOpenMPRuntime().emitTaskCall( 1541 *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, 1542 CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, 1543 FirstprivateCopies, FirstprivateInits); 1544 } 1545 1546 void CodeGenFunction::EmitOMPTaskyieldDirective( 1547 const OMPTaskyieldDirective &S) { 1548 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); 1549 } 1550 1551 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { 1552 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); 1553 } 1554 1555 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { 1556 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); 1557 } 1558 1559 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { 1560 CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { 1561 if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) { 1562 auto FlushClause = cast<OMPFlushClause>(C); 1563 return llvm::makeArrayRef(FlushClause->varlist_begin(), 1564 FlushClause->varlist_end()); 1565 } 1566 return llvm::None; 1567 }(), S.getLocStart()); 1568 } 1569 1570 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { 1571 LexicalScope Scope(*this, S.getSourceRange()); 1572 auto &&CodeGen = [&S](CodeGenFunction &CGF) { 1573 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); 1574 CGF.EnsureInsertPoint(); 1575 }; 1576 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart()); 1577 } 1578 1579 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, 1580 QualType SrcType, QualType DestType) { 1581 assert(CGF.hasScalarEvaluationKind(DestType) && 1582 "DestType must have scalar evaluation kind."); 1583 assert(!Val.isAggregate() && "Must be a scalar or complex."); 1584 return Val.isScalar() 1585 ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType) 1586 : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, 1587 DestType); 1588 } 1589 1590 static CodeGenFunction::ComplexPairTy 1591 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, 1592 QualType DestType) { 1593 assert(CGF.getEvaluationKind(DestType) == TEK_Complex && 1594 "DestType must have complex evaluation kind."); 1595 CodeGenFunction::ComplexPairTy ComplexVal; 1596 if (Val.isScalar()) { 1597 // Convert the input element to the element type of the complex. 1598 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1599 auto ScalarVal = 1600 CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType); 1601 ComplexVal = CodeGenFunction::ComplexPairTy( 1602 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); 1603 } else { 1604 assert(Val.isComplex() && "Must be a scalar or complex."); 1605 auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); 1606 auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); 1607 ComplexVal.first = CGF.EmitScalarConversion( 1608 Val.getComplexVal().first, SrcElementType, DestElementType); 1609 ComplexVal.second = CGF.EmitScalarConversion( 1610 Val.getComplexVal().second, SrcElementType, DestElementType); 1611 } 1612 return ComplexVal; 1613 } 1614 1615 static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, 1616 LValue LVal, RValue RVal) { 1617 if (LVal.isGlobalReg()) { 1618 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); 1619 } else { 1620 CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent 1621 : llvm::Monotonic, 1622 LVal.isVolatile(), /*IsInit=*/false); 1623 } 1624 } 1625 1626 static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, 1627 QualType RValTy) { 1628 switch (CGF.getEvaluationKind(LVal.getType())) { 1629 case TEK_Scalar: 1630 CGF.EmitStoreThroughLValue( 1631 RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())), 1632 LVal); 1633 break; 1634 case TEK_Complex: 1635 CGF.EmitStoreOfComplex( 1636 convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal, 1637 /*isInit=*/false); 1638 break; 1639 case TEK_Aggregate: 1640 llvm_unreachable("Must be a scalar or complex."); 1641 } 1642 } 1643 1644 static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, 1645 const Expr *X, const Expr *V, 1646 SourceLocation Loc) { 1647 // v = x; 1648 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); 1649 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); 1650 LValue XLValue = CGF.EmitLValue(X); 1651 LValue VLValue = CGF.EmitLValue(V); 1652 RValue Res = XLValue.isGlobalReg() 1653 ? CGF.EmitLoadOfLValue(XLValue, Loc) 1654 : CGF.EmitAtomicLoad(XLValue, Loc, 1655 IsSeqCst ? llvm::SequentiallyConsistent 1656 : llvm::Monotonic, 1657 XLValue.isVolatile()); 1658 // OpenMP, 2.12.6, atomic Construct 1659 // Any atomic construct with a seq_cst clause forces the atomically 1660 // performed operation to include an implicit flush operation without a 1661 // list. 1662 if (IsSeqCst) 1663 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1664 emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType()); 1665 } 1666 1667 static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, 1668 const Expr *X, const Expr *E, 1669 SourceLocation Loc) { 1670 // x = expr; 1671 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); 1672 emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); 1673 // OpenMP, 2.12.6, atomic Construct 1674 // Any atomic construct with a seq_cst clause forces the atomically 1675 // performed operation to include an implicit flush operation without a 1676 // list. 1677 if (IsSeqCst) 1678 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1679 } 1680 1681 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, 1682 RValue Update, 1683 BinaryOperatorKind BO, 1684 llvm::AtomicOrdering AO, 1685 bool IsXLHSInRHSPart) { 1686 auto &Context = CGF.CGM.getContext(); 1687 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' 1688 // expression is simple and atomic is allowed for the given type for the 1689 // target platform. 1690 if (BO == BO_Comma || !Update.isScalar() || 1691 !Update.getScalarVal()->getType()->isIntegerTy() || 1692 !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && 1693 (Update.getScalarVal()->getType() != 1694 X.getAddress()->getType()->getPointerElementType())) || 1695 !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() || 1696 !Context.getTargetInfo().hasBuiltinAtomic( 1697 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) 1698 return std::make_pair(false, RValue::get(nullptr)); 1699 1700 llvm::AtomicRMWInst::BinOp RMWOp; 1701 switch (BO) { 1702 case BO_Add: 1703 RMWOp = llvm::AtomicRMWInst::Add; 1704 break; 1705 case BO_Sub: 1706 if (!IsXLHSInRHSPart) 1707 return std::make_pair(false, RValue::get(nullptr)); 1708 RMWOp = llvm::AtomicRMWInst::Sub; 1709 break; 1710 case BO_And: 1711 RMWOp = llvm::AtomicRMWInst::And; 1712 break; 1713 case BO_Or: 1714 RMWOp = llvm::AtomicRMWInst::Or; 1715 break; 1716 case BO_Xor: 1717 RMWOp = llvm::AtomicRMWInst::Xor; 1718 break; 1719 case BO_LT: 1720 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1721 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min 1722 : llvm::AtomicRMWInst::Max) 1723 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin 1724 : llvm::AtomicRMWInst::UMax); 1725 break; 1726 case BO_GT: 1727 RMWOp = X.getType()->hasSignedIntegerRepresentation() 1728 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max 1729 : llvm::AtomicRMWInst::Min) 1730 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax 1731 : llvm::AtomicRMWInst::UMin); 1732 break; 1733 case BO_Assign: 1734 RMWOp = llvm::AtomicRMWInst::Xchg; 1735 break; 1736 case BO_Mul: 1737 case BO_Div: 1738 case BO_Rem: 1739 case BO_Shl: 1740 case BO_Shr: 1741 case BO_LAnd: 1742 case BO_LOr: 1743 return std::make_pair(false, RValue::get(nullptr)); 1744 case BO_PtrMemD: 1745 case BO_PtrMemI: 1746 case BO_LE: 1747 case BO_GE: 1748 case BO_EQ: 1749 case BO_NE: 1750 case BO_AddAssign: 1751 case BO_SubAssign: 1752 case BO_AndAssign: 1753 case BO_OrAssign: 1754 case BO_XorAssign: 1755 case BO_MulAssign: 1756 case BO_DivAssign: 1757 case BO_RemAssign: 1758 case BO_ShlAssign: 1759 case BO_ShrAssign: 1760 case BO_Comma: 1761 llvm_unreachable("Unsupported atomic update operation"); 1762 } 1763 auto *UpdateVal = Update.getScalarVal(); 1764 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { 1765 UpdateVal = CGF.Builder.CreateIntCast( 1766 IC, X.getAddress()->getType()->getPointerElementType(), 1767 X.getType()->hasSignedIntegerRepresentation()); 1768 } 1769 auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO); 1770 return std::make_pair(true, RValue::get(Res)); 1771 } 1772 1773 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( 1774 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, 1775 llvm::AtomicOrdering AO, SourceLocation Loc, 1776 const llvm::function_ref<RValue(RValue)> &CommonGen) { 1777 // Update expressions are allowed to have the following forms: 1778 // x binop= expr; -> xrval + expr; 1779 // x++, ++x -> xrval + 1; 1780 // x--, --x -> xrval - 1; 1781 // x = x binop expr; -> xrval binop expr 1782 // x = expr Op x; - > expr binop xrval; 1783 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); 1784 if (!Res.first) { 1785 if (X.isGlobalReg()) { 1786 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop 1787 // 'xrval'. 1788 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); 1789 } else { 1790 // Perform compare-and-swap procedure. 1791 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); 1792 } 1793 } 1794 return Res; 1795 } 1796 1797 static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, 1798 const Expr *X, const Expr *E, 1799 const Expr *UE, bool IsXLHSInRHSPart, 1800 SourceLocation Loc) { 1801 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1802 "Update expr in 'atomic update' must be a binary operator."); 1803 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1804 // Update expressions are allowed to have the following forms: 1805 // x binop= expr; -> xrval + expr; 1806 // x++, ++x -> xrval + 1; 1807 // x--, --x -> xrval - 1; 1808 // x = x binop expr; -> xrval binop expr 1809 // x = expr Op x; - > expr binop xrval; 1810 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); 1811 LValue XLValue = CGF.EmitLValue(X); 1812 RValue ExprRValue = CGF.EmitAnyExpr(E); 1813 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1814 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1815 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1816 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1817 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1818 auto Gen = 1819 [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { 1820 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1821 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1822 return CGF.EmitAnyExpr(UE); 1823 }; 1824 (void)CGF.EmitOMPAtomicSimpleUpdateExpr( 1825 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1826 // OpenMP, 2.12.6, atomic Construct 1827 // Any atomic construct with a seq_cst clause forces the atomically 1828 // performed operation to include an implicit flush operation without a 1829 // list. 1830 if (IsSeqCst) 1831 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1832 } 1833 1834 static RValue convertToType(CodeGenFunction &CGF, RValue Value, 1835 QualType SourceType, QualType ResType) { 1836 switch (CGF.getEvaluationKind(ResType)) { 1837 case TEK_Scalar: 1838 return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType)); 1839 case TEK_Complex: { 1840 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType); 1841 return RValue::getComplex(Res.first, Res.second); 1842 } 1843 case TEK_Aggregate: 1844 break; 1845 } 1846 llvm_unreachable("Must be a scalar or complex."); 1847 } 1848 1849 static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, 1850 bool IsPostfixUpdate, const Expr *V, 1851 const Expr *X, const Expr *E, 1852 const Expr *UE, bool IsXLHSInRHSPart, 1853 SourceLocation Loc) { 1854 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); 1855 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); 1856 RValue NewVVal; 1857 LValue VLValue = CGF.EmitLValue(V); 1858 LValue XLValue = CGF.EmitLValue(X); 1859 RValue ExprRValue = CGF.EmitAnyExpr(E); 1860 auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; 1861 QualType NewVValType; 1862 if (UE) { 1863 // 'x' is updated with some additional value. 1864 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && 1865 "Update expr in 'atomic capture' must be a binary operator."); 1866 auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); 1867 // Update expressions are allowed to have the following forms: 1868 // x binop= expr; -> xrval + expr; 1869 // x++, ++x -> xrval + 1; 1870 // x--, --x -> xrval - 1; 1871 // x = x binop expr; -> xrval binop expr 1872 // x = expr Op x; - > expr binop xrval; 1873 auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); 1874 auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); 1875 auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; 1876 NewVValType = XRValExpr->getType(); 1877 auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; 1878 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, 1879 IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { 1880 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1881 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); 1882 RValue Res = CGF.EmitAnyExpr(UE); 1883 NewVVal = IsPostfixUpdate ? XRValue : Res; 1884 return Res; 1885 }; 1886 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 1887 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); 1888 if (Res.first) { 1889 // 'atomicrmw' instruction was generated. 1890 if (IsPostfixUpdate) { 1891 // Use old value from 'atomicrmw'. 1892 NewVVal = Res.second; 1893 } else { 1894 // 'atomicrmw' does not provide new value, so evaluate it using old 1895 // value of 'x'. 1896 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); 1897 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); 1898 NewVVal = CGF.EmitAnyExpr(UE); 1899 } 1900 } 1901 } else { 1902 // 'x' is simply rewritten with some 'expr'. 1903 NewVValType = X->getType().getNonReferenceType(); 1904 ExprRValue = convertToType(CGF, ExprRValue, E->getType(), 1905 X->getType().getNonReferenceType()); 1906 auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { 1907 NewVVal = XRValue; 1908 return ExprRValue; 1909 }; 1910 // Try to perform atomicrmw xchg, otherwise simple exchange. 1911 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( 1912 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, 1913 Loc, Gen); 1914 if (Res.first) { 1915 // 'atomicrmw' instruction was generated. 1916 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; 1917 } 1918 } 1919 // Emit post-update store to 'v' of old/new 'x' value. 1920 emitSimpleStore(CGF, VLValue, NewVVal, NewVValType); 1921 // OpenMP, 2.12.6, atomic Construct 1922 // Any atomic construct with a seq_cst clause forces the atomically 1923 // performed operation to include an implicit flush operation without a 1924 // list. 1925 if (IsSeqCst) 1926 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); 1927 } 1928 1929 static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, 1930 bool IsSeqCst, bool IsPostfixUpdate, 1931 const Expr *X, const Expr *V, const Expr *E, 1932 const Expr *UE, bool IsXLHSInRHSPart, 1933 SourceLocation Loc) { 1934 switch (Kind) { 1935 case OMPC_read: 1936 EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); 1937 break; 1938 case OMPC_write: 1939 EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); 1940 break; 1941 case OMPC_unknown: 1942 case OMPC_update: 1943 EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); 1944 break; 1945 case OMPC_capture: 1946 EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, 1947 IsXLHSInRHSPart, Loc); 1948 break; 1949 case OMPC_if: 1950 case OMPC_final: 1951 case OMPC_num_threads: 1952 case OMPC_private: 1953 case OMPC_firstprivate: 1954 case OMPC_lastprivate: 1955 case OMPC_reduction: 1956 case OMPC_safelen: 1957 case OMPC_collapse: 1958 case OMPC_default: 1959 case OMPC_seq_cst: 1960 case OMPC_shared: 1961 case OMPC_linear: 1962 case OMPC_aligned: 1963 case OMPC_copyin: 1964 case OMPC_copyprivate: 1965 case OMPC_flush: 1966 case OMPC_proc_bind: 1967 case OMPC_schedule: 1968 case OMPC_ordered: 1969 case OMPC_nowait: 1970 case OMPC_untied: 1971 case OMPC_threadprivate: 1972 case OMPC_mergeable: 1973 llvm_unreachable("Clause is not allowed in 'omp atomic'."); 1974 } 1975 } 1976 1977 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { 1978 bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst); 1979 OpenMPClauseKind Kind = OMPC_unknown; 1980 for (auto *C : S.clauses()) { 1981 // Find first clause (skip seq_cst clause, if it is first). 1982 if (C->getClauseKind() != OMPC_seq_cst) { 1983 Kind = C->getClauseKind(); 1984 break; 1985 } 1986 } 1987 1988 const auto *CS = 1989 S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 1990 if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { 1991 enterFullExpression(EWC); 1992 } 1993 // Processing for statements under 'atomic capture'. 1994 if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { 1995 for (const auto *C : Compound->body()) { 1996 if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { 1997 enterFullExpression(EWC); 1998 } 1999 } 2000 } 2001 2002 LexicalScope Scope(*this, S.getSourceRange()); 2003 auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) { 2004 EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), 2005 S.getV(), S.getExpr(), S.getUpdateExpr(), 2006 S.isXLHSInRHSPart(), S.getLocStart()); 2007 }; 2008 CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); 2009 } 2010 2011 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { 2012 llvm_unreachable("CodeGen for 'omp target' is not supported yet."); 2013 } 2014 2015 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { 2016 llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); 2017 } 2018