1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit OpenMP nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/Stmt.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "clang/Basic/OpenMPKinds.h"
26 #include "clang/Basic/PrettyStackTrace.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/BinaryFormat/Dwarf.h"
29 #include "llvm/Frontend/OpenMP/OMPConstants.h"
30 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/Support/AtomicOrdering.h"
37 using namespace clang;
38 using namespace CodeGen;
39 using namespace llvm::omp;
40
41 static const VarDecl *getBaseDecl(const Expr *Ref);
42
43 namespace {
44 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
45 /// for captured expressions.
46 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
emitPreInitStmt(CodeGenFunction & CGF,const OMPExecutableDirective & S)47 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
48 for (const auto *C : S.clauses()) {
49 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
50 if (const auto *PreInit =
51 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
52 for (const auto *I : PreInit->decls()) {
53 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
54 CGF.EmitVarDecl(cast<VarDecl>(*I));
55 } else {
56 CodeGenFunction::AutoVarEmission Emission =
57 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
58 CGF.EmitAutoVarCleanups(Emission);
59 }
60 }
61 }
62 }
63 }
64 }
65 CodeGenFunction::OMPPrivateScope InlinedShareds;
66
isCapturedVar(CodeGenFunction & CGF,const VarDecl * VD)67 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
68 return CGF.LambdaCaptureFields.lookup(VD) ||
69 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
70 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
71 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
72 }
73
74 public:
OMPLexicalScope(CodeGenFunction & CGF,const OMPExecutableDirective & S,const llvm::Optional<OpenMPDirectiveKind> CapturedRegion=llvm::None,const bool EmitPreInitStmt=true)75 OMPLexicalScope(
76 CodeGenFunction &CGF, const OMPExecutableDirective &S,
77 const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
78 const bool EmitPreInitStmt = true)
79 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
80 InlinedShareds(CGF) {
81 if (EmitPreInitStmt)
82 emitPreInitStmt(CGF, S);
83 if (!CapturedRegion)
84 return;
85 assert(S.hasAssociatedStmt() &&
86 "Expected associated statement for inlined directive.");
87 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
88 for (const auto &C : CS->captures()) {
89 if (C.capturesVariable() || C.capturesVariableByCopy()) {
90 auto *VD = C.getCapturedVar();
91 assert(VD == VD->getCanonicalDecl() &&
92 "Canonical decl must be captured.");
93 DeclRefExpr DRE(
94 CGF.getContext(), const_cast<VarDecl *>(VD),
95 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
96 InlinedShareds.isGlobalVarCaptured(VD)),
97 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
98 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
99 }
100 }
101 (void)InlinedShareds.Privatize();
102 }
103 };
104
105 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
106 /// for captured expressions.
107 class OMPParallelScope final : public OMPLexicalScope {
EmitPreInitStmt(const OMPExecutableDirective & S)108 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
109 OpenMPDirectiveKind Kind = S.getDirectiveKind();
110 return !(isOpenMPTargetExecutionDirective(Kind) ||
111 isOpenMPLoopBoundSharingDirective(Kind)) &&
112 isOpenMPParallelDirective(Kind);
113 }
114
115 public:
OMPParallelScope(CodeGenFunction & CGF,const OMPExecutableDirective & S)116 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
117 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
118 EmitPreInitStmt(S)) {}
119 };
120
121 /// Lexical scope for OpenMP teams construct, that handles correct codegen
122 /// for captured expressions.
123 class OMPTeamsScope final : public OMPLexicalScope {
EmitPreInitStmt(const OMPExecutableDirective & S)124 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
125 OpenMPDirectiveKind Kind = S.getDirectiveKind();
126 return !isOpenMPTargetExecutionDirective(Kind) &&
127 isOpenMPTeamsDirective(Kind);
128 }
129
130 public:
OMPTeamsScope(CodeGenFunction & CGF,const OMPExecutableDirective & S)131 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
132 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
133 EmitPreInitStmt(S)) {}
134 };
135
136 /// Private scope for OpenMP loop-based directives, that supports capturing
137 /// of used expression from loop statement.
138 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
emitPreInitStmt(CodeGenFunction & CGF,const OMPLoopBasedDirective & S)139 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
140 const DeclStmt *PreInits;
141 CodeGenFunction::OMPMapVars PreCondVars;
142 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
143 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
144 for (const auto *E : LD->counters()) {
145 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
146 EmittedAsPrivate.insert(VD->getCanonicalDecl());
147 (void)PreCondVars.setVarAddr(
148 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
149 }
150 // Mark private vars as undefs.
151 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) {
152 for (const Expr *IRef : C->varlists()) {
153 const auto *OrigVD =
154 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
155 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
156 QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
157 (void)PreCondVars.setVarAddr(
158 CGF, OrigVD,
159 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
160 CGF.getContext().getPointerType(OrigVDTy))),
161 CGF.ConvertTypeForMem(OrigVDTy),
162 CGF.getContext().getDeclAlign(OrigVD)));
163 }
164 }
165 }
166 (void)PreCondVars.apply(CGF);
167 // Emit init, __range and __end variables for C++ range loops.
168 (void)OMPLoopBasedDirective::doForAllLoops(
169 LD->getInnermostCapturedStmt()->getCapturedStmt(),
170 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(),
171 [&CGF](unsigned Cnt, const Stmt *CurStmt) {
172 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
173 if (const Stmt *Init = CXXFor->getInit())
174 CGF.EmitStmt(Init);
175 CGF.EmitStmt(CXXFor->getRangeStmt());
176 CGF.EmitStmt(CXXFor->getEndStmt());
177 }
178 return false;
179 });
180 PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
181 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
182 PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
183 } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
184 PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
185 } else {
186 llvm_unreachable("Unknown loop-based directive kind.");
187 }
188 if (PreInits) {
189 for (const auto *I : PreInits->decls())
190 CGF.EmitVarDecl(cast<VarDecl>(*I));
191 }
192 PreCondVars.restore(CGF);
193 }
194
195 public:
OMPLoopScope(CodeGenFunction & CGF,const OMPLoopBasedDirective & S)196 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S)
197 : CodeGenFunction::RunCleanupsScope(CGF) {
198 emitPreInitStmt(CGF, S);
199 }
200 };
201
202 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
203 CodeGenFunction::OMPPrivateScope InlinedShareds;
204
isCapturedVar(CodeGenFunction & CGF,const VarDecl * VD)205 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
206 return CGF.LambdaCaptureFields.lookup(VD) ||
207 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
208 (CGF.CurCodeDecl && isa<BlockDecl>(CGF.CurCodeDecl) &&
209 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
210 }
211
212 public:
OMPSimdLexicalScope(CodeGenFunction & CGF,const OMPExecutableDirective & S)213 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
214 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
215 InlinedShareds(CGF) {
216 for (const auto *C : S.clauses()) {
217 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
218 if (const auto *PreInit =
219 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
220 for (const auto *I : PreInit->decls()) {
221 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
222 CGF.EmitVarDecl(cast<VarDecl>(*I));
223 } else {
224 CodeGenFunction::AutoVarEmission Emission =
225 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
226 CGF.EmitAutoVarCleanups(Emission);
227 }
228 }
229 }
230 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
231 for (const Expr *E : UDP->varlists()) {
232 const Decl *D = cast<DeclRefExpr>(E)->getDecl();
233 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
234 CGF.EmitVarDecl(*OED);
235 }
236 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
237 for (const Expr *E : UDP->varlists()) {
238 const Decl *D = getBaseDecl(E);
239 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
240 CGF.EmitVarDecl(*OED);
241 }
242 }
243 }
244 if (!isOpenMPSimdDirective(S.getDirectiveKind()))
245 CGF.EmitOMPPrivateClause(S, InlinedShareds);
246 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
247 if (const Expr *E = TG->getReductionRef())
248 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
249 }
250 // Temp copy arrays for inscan reductions should not be emitted as they are
251 // not used in simd only mode.
252 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps;
253 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
254 if (C->getModifier() != OMPC_REDUCTION_inscan)
255 continue;
256 for (const Expr *E : C->copy_array_temps())
257 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl());
258 }
259 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
260 while (CS) {
261 for (auto &C : CS->captures()) {
262 if (C.capturesVariable() || C.capturesVariableByCopy()) {
263 auto *VD = C.getCapturedVar();
264 if (CopyArrayTemps.contains(VD))
265 continue;
266 assert(VD == VD->getCanonicalDecl() &&
267 "Canonical decl must be captured.");
268 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
269 isCapturedVar(CGF, VD) ||
270 (CGF.CapturedStmtInfo &&
271 InlinedShareds.isGlobalVarCaptured(VD)),
272 VD->getType().getNonReferenceType(), VK_LValue,
273 C.getLocation());
274 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
275 }
276 }
277 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
278 }
279 (void)InlinedShareds.Privatize();
280 }
281 };
282
283 } // namespace
284
285 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
286 const OMPExecutableDirective &S,
287 const RegionCodeGenTy &CodeGen);
288
EmitOMPSharedLValue(const Expr * E)289 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
290 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
291 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
292 OrigVD = OrigVD->getCanonicalDecl();
293 bool IsCaptured =
294 LambdaCaptureFields.lookup(OrigVD) ||
295 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
296 (CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
297 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
298 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
299 return EmitLValue(&DRE);
300 }
301 }
302 return EmitLValue(E);
303 }
304
getTypeSize(QualType Ty)305 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
306 ASTContext &C = getContext();
307 llvm::Value *Size = nullptr;
308 auto SizeInChars = C.getTypeSizeInChars(Ty);
309 if (SizeInChars.isZero()) {
310 // getTypeSizeInChars() returns 0 for a VLA.
311 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
312 VlaSizePair VlaSize = getVLASize(VAT);
313 Ty = VlaSize.Type;
314 Size =
315 Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
316 }
317 SizeInChars = C.getTypeSizeInChars(Ty);
318 if (SizeInChars.isZero())
319 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
320 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
321 }
322 return CGM.getSize(SizeInChars);
323 }
324
GenerateOpenMPCapturedVars(const CapturedStmt & S,SmallVectorImpl<llvm::Value * > & CapturedVars)325 void CodeGenFunction::GenerateOpenMPCapturedVars(
326 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
327 const RecordDecl *RD = S.getCapturedRecordDecl();
328 auto CurField = RD->field_begin();
329 auto CurCap = S.captures().begin();
330 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
331 E = S.capture_init_end();
332 I != E; ++I, ++CurField, ++CurCap) {
333 if (CurField->hasCapturedVLAType()) {
334 const VariableArrayType *VAT = CurField->getCapturedVLAType();
335 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
336 CapturedVars.push_back(Val);
337 } else if (CurCap->capturesThis()) {
338 CapturedVars.push_back(CXXThisValue);
339 } else if (CurCap->capturesVariableByCopy()) {
340 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
341
342 // If the field is not a pointer, we need to save the actual value
343 // and load it as a void pointer.
344 if (!CurField->getType()->isAnyPointerType()) {
345 ASTContext &Ctx = getContext();
346 Address DstAddr = CreateMemTemp(
347 Ctx.getUIntPtrType(),
348 Twine(CurCap->getCapturedVar()->getName(), ".casted"));
349 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
350
351 llvm::Value *SrcAddrVal = EmitScalarConversion(
352 DstAddr.getPointer(), Ctx.getPointerType(Ctx.getUIntPtrType()),
353 Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
354 LValue SrcLV =
355 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
356
357 // Store the value using the source type pointer.
358 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
359
360 // Load the value using the destination type pointer.
361 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
362 }
363 CapturedVars.push_back(CV);
364 } else {
365 assert(CurCap->capturesVariable() && "Expected capture by reference.");
366 CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer());
367 }
368 }
369 }
370
castValueFromUintptr(CodeGenFunction & CGF,SourceLocation Loc,QualType DstType,StringRef Name,LValue AddrLV)371 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
372 QualType DstType, StringRef Name,
373 LValue AddrLV) {
374 ASTContext &Ctx = CGF.getContext();
375
376 llvm::Value *CastedPtr = CGF.EmitScalarConversion(
377 AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
378 Ctx.getPointerType(DstType), Loc);
379 Address TmpAddr =
380 CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
381 return TmpAddr;
382 }
383
getCanonicalParamType(ASTContext & C,QualType T)384 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
385 if (T->isLValueReferenceType())
386 return C.getLValueReferenceType(
387 getCanonicalParamType(C, T.getNonReferenceType()),
388 /*SpelledAsLValue=*/false);
389 if (T->isPointerType())
390 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
391 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
392 if (const auto *VLA = dyn_cast<VariableArrayType>(A))
393 return getCanonicalParamType(C, VLA->getElementType());
394 if (!A->isVariablyModifiedType())
395 return C.getCanonicalType(T);
396 }
397 return C.getCanonicalParamType(T);
398 }
399
400 namespace {
401 /// Contains required data for proper outlined function codegen.
402 struct FunctionOptions {
403 /// Captured statement for which the function is generated.
404 const CapturedStmt *S = nullptr;
405 /// true if cast to/from UIntPtr is required for variables captured by
406 /// value.
407 const bool UIntPtrCastRequired = true;
408 /// true if only casted arguments must be registered as local args or VLA
409 /// sizes.
410 const bool RegisterCastedArgsOnly = false;
411 /// Name of the generated function.
412 const StringRef FunctionName;
413 /// Location of the non-debug version of the outlined function.
414 SourceLocation Loc;
FunctionOptions__anon8cd96fef0311::FunctionOptions415 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
416 bool RegisterCastedArgsOnly, StringRef FunctionName,
417 SourceLocation Loc)
418 : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
419 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
420 FunctionName(FunctionName), Loc(Loc) {}
421 };
422 } // namespace
423
emitOutlinedFunctionPrologue(CodeGenFunction & CGF,FunctionArgList & Args,llvm::MapVector<const Decl *,std::pair<const VarDecl *,Address>> & LocalAddrs,llvm::DenseMap<const Decl *,std::pair<const Expr *,llvm::Value * >> & VLASizes,llvm::Value * & CXXThisValue,const FunctionOptions & FO)424 static llvm::Function *emitOutlinedFunctionPrologue(
425 CodeGenFunction &CGF, FunctionArgList &Args,
426 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
427 &LocalAddrs,
428 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
429 &VLASizes,
430 llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
431 const CapturedDecl *CD = FO.S->getCapturedDecl();
432 const RecordDecl *RD = FO.S->getCapturedRecordDecl();
433 assert(CD->hasBody() && "missing CapturedDecl body");
434
435 CXXThisValue = nullptr;
436 // Build the argument list.
437 CodeGenModule &CGM = CGF.CGM;
438 ASTContext &Ctx = CGM.getContext();
439 FunctionArgList TargetArgs;
440 Args.append(CD->param_begin(),
441 std::next(CD->param_begin(), CD->getContextParamPosition()));
442 TargetArgs.append(
443 CD->param_begin(),
444 std::next(CD->param_begin(), CD->getContextParamPosition()));
445 auto I = FO.S->captures().begin();
446 FunctionDecl *DebugFunctionDecl = nullptr;
447 if (!FO.UIntPtrCastRequired) {
448 FunctionProtoType::ExtProtoInfo EPI;
449 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
450 DebugFunctionDecl = FunctionDecl::Create(
451 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
452 SourceLocation(), DeclarationName(), FunctionTy,
453 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
454 /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false,
455 /*hasWrittenPrototype=*/false);
456 }
457 for (const FieldDecl *FD : RD->fields()) {
458 QualType ArgType = FD->getType();
459 IdentifierInfo *II = nullptr;
460 VarDecl *CapVar = nullptr;
461
462 // If this is a capture by copy and the type is not a pointer, the outlined
463 // function argument type should be uintptr and the value properly casted to
464 // uintptr. This is necessary given that the runtime library is only able to
465 // deal with pointers. We can pass in the same way the VLA type sizes to the
466 // outlined function.
467 if (FO.UIntPtrCastRequired &&
468 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
469 I->capturesVariableArrayType()))
470 ArgType = Ctx.getUIntPtrType();
471
472 if (I->capturesVariable() || I->capturesVariableByCopy()) {
473 CapVar = I->getCapturedVar();
474 II = CapVar->getIdentifier();
475 } else if (I->capturesThis()) {
476 II = &Ctx.Idents.get("this");
477 } else {
478 assert(I->capturesVariableArrayType());
479 II = &Ctx.Idents.get("vla");
480 }
481 if (ArgType->isVariablyModifiedType())
482 ArgType = getCanonicalParamType(Ctx, ArgType);
483 VarDecl *Arg;
484 if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
485 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
486 II, ArgType,
487 ImplicitParamDecl::ThreadPrivateVar);
488 } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
489 Arg = ParmVarDecl::Create(
490 Ctx, DebugFunctionDecl,
491 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
492 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
493 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
494 } else {
495 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
496 II, ArgType, ImplicitParamDecl::Other);
497 }
498 Args.emplace_back(Arg);
499 // Do not cast arguments if we emit function with non-original types.
500 TargetArgs.emplace_back(
501 FO.UIntPtrCastRequired
502 ? Arg
503 : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
504 ++I;
505 }
506 Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
507 CD->param_end());
508 TargetArgs.append(
509 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
510 CD->param_end());
511
512 // Create the function declaration.
513 const CGFunctionInfo &FuncInfo =
514 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
515 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
516
517 auto *F =
518 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
519 FO.FunctionName, &CGM.getModule());
520 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
521 if (CD->isNothrow())
522 F->setDoesNotThrow();
523 F->setDoesNotRecurse();
524
525 // Always inline the outlined function if optimizations are enabled.
526 if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
527 F->removeFnAttr(llvm::Attribute::NoInline);
528 F->addFnAttr(llvm::Attribute::AlwaysInline);
529 }
530
531 // Generate the function.
532 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
533 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
534 FO.UIntPtrCastRequired ? FO.Loc
535 : CD->getBody()->getBeginLoc());
536 unsigned Cnt = CD->getContextParamPosition();
537 I = FO.S->captures().begin();
538 for (const FieldDecl *FD : RD->fields()) {
539 // Do not map arguments if we emit function with non-original types.
540 Address LocalAddr(Address::invalid());
541 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
542 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
543 TargetArgs[Cnt]);
544 } else {
545 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
546 }
547 // If we are capturing a pointer by copy we don't need to do anything, just
548 // use the value that we get from the arguments.
549 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
550 const VarDecl *CurVD = I->getCapturedVar();
551 if (!FO.RegisterCastedArgsOnly)
552 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
553 ++Cnt;
554 ++I;
555 continue;
556 }
557
558 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
559 AlignmentSource::Decl);
560 if (FD->hasCapturedVLAType()) {
561 if (FO.UIntPtrCastRequired) {
562 ArgLVal = CGF.MakeAddrLValue(
563 castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
564 Args[Cnt]->getName(), ArgLVal),
565 FD->getType(), AlignmentSource::Decl);
566 }
567 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
568 const VariableArrayType *VAT = FD->getCapturedVLAType();
569 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
570 } else if (I->capturesVariable()) {
571 const VarDecl *Var = I->getCapturedVar();
572 QualType VarTy = Var->getType();
573 Address ArgAddr = ArgLVal.getAddress(CGF);
574 if (ArgLVal.getType()->isLValueReferenceType()) {
575 ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
576 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
577 assert(ArgLVal.getType()->isPointerType());
578 ArgAddr = CGF.EmitLoadOfPointer(
579 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
580 }
581 if (!FO.RegisterCastedArgsOnly) {
582 LocalAddrs.insert(
583 {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
584 }
585 } else if (I->capturesVariableByCopy()) {
586 assert(!FD->getType()->isAnyPointerType() &&
587 "Not expecting a captured pointer.");
588 const VarDecl *Var = I->getCapturedVar();
589 LocalAddrs.insert({Args[Cnt],
590 {Var, FO.UIntPtrCastRequired
591 ? castValueFromUintptr(
592 CGF, I->getLocation(), FD->getType(),
593 Args[Cnt]->getName(), ArgLVal)
594 : ArgLVal.getAddress(CGF)}});
595 } else {
596 // If 'this' is captured, load it into CXXThisValue.
597 assert(I->capturesThis());
598 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
599 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
600 }
601 ++Cnt;
602 ++I;
603 }
604
605 return F;
606 }
607
608 llvm::Function *
GenerateOpenMPCapturedStmtFunction(const CapturedStmt & S,SourceLocation Loc)609 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
610 SourceLocation Loc) {
611 assert(
612 CapturedStmtInfo &&
613 "CapturedStmtInfo should be set when generating the captured function");
614 const CapturedDecl *CD = S.getCapturedDecl();
615 // Build the argument list.
616 bool NeedWrapperFunction =
617 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo();
618 FunctionArgList Args;
619 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs;
620 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes;
621 SmallString<256> Buffer;
622 llvm::raw_svector_ostream Out(Buffer);
623 Out << CapturedStmtInfo->getHelperName();
624 if (NeedWrapperFunction)
625 Out << "_debug__";
626 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
627 Out.str(), Loc);
628 llvm::Function *F = emitOutlinedFunctionPrologue(*this, Args, LocalAddrs,
629 VLASizes, CXXThisValue, FO);
630 CodeGenFunction::OMPPrivateScope LocalScope(*this);
631 for (const auto &LocalAddrPair : LocalAddrs) {
632 if (LocalAddrPair.second.first) {
633 LocalScope.addPrivate(LocalAddrPair.second.first,
634 LocalAddrPair.second.second);
635 }
636 }
637 (void)LocalScope.Privatize();
638 for (const auto &VLASizePair : VLASizes)
639 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
640 PGO.assignRegionCounters(GlobalDecl(CD), F);
641 CapturedStmtInfo->EmitBody(*this, CD->getBody());
642 (void)LocalScope.ForceCleanup();
643 FinishFunction(CD->getBodyRBrace());
644 if (!NeedWrapperFunction)
645 return F;
646
647 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
648 /*RegisterCastedArgsOnly=*/true,
649 CapturedStmtInfo->getHelperName(), Loc);
650 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
651 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
652 Args.clear();
653 LocalAddrs.clear();
654 VLASizes.clear();
655 llvm::Function *WrapperF =
656 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
657 WrapperCGF.CXXThisValue, WrapperFO);
658 llvm::SmallVector<llvm::Value *, 4> CallArgs;
659 auto *PI = F->arg_begin();
660 for (const auto *Arg : Args) {
661 llvm::Value *CallArg;
662 auto I = LocalAddrs.find(Arg);
663 if (I != LocalAddrs.end()) {
664 LValue LV = WrapperCGF.MakeAddrLValue(
665 I->second.second,
666 I->second.first ? I->second.first->getType() : Arg->getType(),
667 AlignmentSource::Decl);
668 if (LV.getType()->isAnyComplexType())
669 LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
670 LV.getAddress(WrapperCGF),
671 PI->getType()->getPointerTo(
672 LV.getAddress(WrapperCGF).getAddressSpace()),
673 PI->getType()));
674 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
675 } else {
676 auto EI = VLASizes.find(Arg);
677 if (EI != VLASizes.end()) {
678 CallArg = EI->second.second;
679 } else {
680 LValue LV =
681 WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
682 Arg->getType(), AlignmentSource::Decl);
683 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
684 }
685 }
686 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
687 ++PI;
688 }
689 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
690 WrapperCGF.FinishFunction();
691 return WrapperF;
692 }
693
694 //===----------------------------------------------------------------------===//
695 // OpenMP Directive Emission
696 //===----------------------------------------------------------------------===//
EmitOMPAggregateAssign(Address DestAddr,Address SrcAddr,QualType OriginalType,const llvm::function_ref<void (Address,Address)> CopyGen)697 void CodeGenFunction::EmitOMPAggregateAssign(
698 Address DestAddr, Address SrcAddr, QualType OriginalType,
699 const llvm::function_ref<void(Address, Address)> CopyGen) {
700 // Perform element-by-element initialization.
701 QualType ElementTy;
702
703 // Drill down to the base element type on both arrays.
704 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
705 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
706 SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
707
708 llvm::Value *SrcBegin = SrcAddr.getPointer();
709 llvm::Value *DestBegin = DestAddr.getPointer();
710 // Cast from pointer to array type to pointer to single element.
711 llvm::Value *DestEnd =
712 Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
713 // The basic structure here is a while-do loop.
714 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
715 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
716 llvm::Value *IsEmpty =
717 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
718 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
719
720 // Enter the loop body, making that address the current address.
721 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
722 EmitBlock(BodyBB);
723
724 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
725
726 llvm::PHINode *SrcElementPHI =
727 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
728 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
729 Address SrcElementCurrent =
730 Address(SrcElementPHI, SrcAddr.getElementType(),
731 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
732
733 llvm::PHINode *DestElementPHI = Builder.CreatePHI(
734 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
735 DestElementPHI->addIncoming(DestBegin, EntryBB);
736 Address DestElementCurrent =
737 Address(DestElementPHI, DestAddr.getElementType(),
738 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
739
740 // Emit copy.
741 CopyGen(DestElementCurrent, SrcElementCurrent);
742
743 // Shift the address forward by one element.
744 llvm::Value *DestElementNext =
745 Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI,
746 /*Idx0=*/1, "omp.arraycpy.dest.element");
747 llvm::Value *SrcElementNext =
748 Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI,
749 /*Idx0=*/1, "omp.arraycpy.src.element");
750 // Check whether we've reached the end.
751 llvm::Value *Done =
752 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
753 Builder.CreateCondBr(Done, DoneBB, BodyBB);
754 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
755 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
756
757 // Done.
758 EmitBlock(DoneBB, /*IsFinished=*/true);
759 }
760
EmitOMPCopy(QualType OriginalType,Address DestAddr,Address SrcAddr,const VarDecl * DestVD,const VarDecl * SrcVD,const Expr * Copy)761 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
762 Address SrcAddr, const VarDecl *DestVD,
763 const VarDecl *SrcVD, const Expr *Copy) {
764 if (OriginalType->isArrayType()) {
765 const auto *BO = dyn_cast<BinaryOperator>(Copy);
766 if (BO && BO->getOpcode() == BO_Assign) {
767 // Perform simple memcpy for simple copying.
768 LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
769 LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
770 EmitAggregateAssign(Dest, Src, OriginalType);
771 } else {
772 // For arrays with complex element types perform element by element
773 // copying.
774 EmitOMPAggregateAssign(
775 DestAddr, SrcAddr, OriginalType,
776 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
777 // Working with the single array element, so have to remap
778 // destination and source variables to corresponding array
779 // elements.
780 CodeGenFunction::OMPPrivateScope Remap(*this);
781 Remap.addPrivate(DestVD, DestElement);
782 Remap.addPrivate(SrcVD, SrcElement);
783 (void)Remap.Privatize();
784 EmitIgnoredExpr(Copy);
785 });
786 }
787 } else {
788 // Remap pseudo source variable to private copy.
789 CodeGenFunction::OMPPrivateScope Remap(*this);
790 Remap.addPrivate(SrcVD, SrcAddr);
791 Remap.addPrivate(DestVD, DestAddr);
792 (void)Remap.Privatize();
793 // Emit copying of the whole variable.
794 EmitIgnoredExpr(Copy);
795 }
796 }
797
EmitOMPFirstprivateClause(const OMPExecutableDirective & D,OMPPrivateScope & PrivateScope)798 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
799 OMPPrivateScope &PrivateScope) {
800 if (!HaveInsertPoint())
801 return false;
802 bool DeviceConstTarget =
803 getLangOpts().OpenMPIsDevice &&
804 isOpenMPTargetExecutionDirective(D.getDirectiveKind());
805 bool FirstprivateIsLastprivate = false;
806 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
807 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
808 for (const auto *D : C->varlists())
809 Lastprivates.try_emplace(
810 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
811 C->getKind());
812 }
813 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
814 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
815 getOpenMPCaptureRegions(CaptureRegions, D.getDirectiveKind());
816 // Force emission of the firstprivate copy if the directive does not emit
817 // outlined function, like omp for, omp simd, omp distribute etc.
818 bool MustEmitFirstprivateCopy =
819 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
820 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
821 const auto *IRef = C->varlist_begin();
822 const auto *InitsRef = C->inits().begin();
823 for (const Expr *IInit : C->private_copies()) {
824 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
825 bool ThisFirstprivateIsLastprivate =
826 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
827 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
828 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
829 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
830 !FD->getType()->isReferenceType() &&
831 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
832 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
833 ++IRef;
834 ++InitsRef;
835 continue;
836 }
837 // Do not emit copy for firstprivate constant variables in target regions,
838 // captured by reference.
839 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
840 FD && FD->getType()->isReferenceType() &&
841 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
842 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
843 ++IRef;
844 ++InitsRef;
845 continue;
846 }
847 FirstprivateIsLastprivate =
848 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
849 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
850 const auto *VDInit =
851 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
852 bool IsRegistered;
853 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
854 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
855 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
856 LValue OriginalLVal;
857 if (!FD) {
858 // Check if the firstprivate variable is just a constant value.
859 ConstantEmission CE = tryEmitAsConstant(&DRE);
860 if (CE && !CE.isReference()) {
861 // Constant value, no need to create a copy.
862 ++IRef;
863 ++InitsRef;
864 continue;
865 }
866 if (CE && CE.isReference()) {
867 OriginalLVal = CE.getReferenceLValue(*this, &DRE);
868 } else {
869 assert(!CE && "Expected non-constant firstprivate.");
870 OriginalLVal = EmitLValue(&DRE);
871 }
872 } else {
873 OriginalLVal = EmitLValue(&DRE);
874 }
875 QualType Type = VD->getType();
876 if (Type->isArrayType()) {
877 // Emit VarDecl with copy init for arrays.
878 // Get the address of the original variable captured in current
879 // captured region.
880 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
881 const Expr *Init = VD->getInit();
882 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
883 // Perform simple memcpy.
884 LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
885 EmitAggregateAssign(Dest, OriginalLVal, Type);
886 } else {
887 EmitOMPAggregateAssign(
888 Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
889 Type,
890 [this, VDInit, Init](Address DestElement, Address SrcElement) {
891 // Clean up any temporaries needed by the
892 // initialization.
893 RunCleanupsScope InitScope(*this);
894 // Emit initialization for single element.
895 setAddrOfLocalVar(VDInit, SrcElement);
896 EmitAnyExprToMem(Init, DestElement,
897 Init->getType().getQualifiers(),
898 /*IsInitializer*/ false);
899 LocalDeclMap.erase(VDInit);
900 });
901 }
902 EmitAutoVarCleanups(Emission);
903 IsRegistered =
904 PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
905 } else {
906 Address OriginalAddr = OriginalLVal.getAddress(*this);
907 // Emit private VarDecl with copy init.
908 // Remap temp VDInit variable to the address of the original
909 // variable (for proper handling of captured global variables).
910 setAddrOfLocalVar(VDInit, OriginalAddr);
911 EmitDecl(*VD);
912 LocalDeclMap.erase(VDInit);
913 Address VDAddr = GetAddrOfLocalVar(VD);
914 if (ThisFirstprivateIsLastprivate &&
915 Lastprivates[OrigVD->getCanonicalDecl()] ==
916 OMPC_LASTPRIVATE_conditional) {
917 // Create/init special variable for lastprivate conditionals.
918 llvm::Value *V =
919 EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
920 AlignmentSource::Decl),
921 (*IRef)->getExprLoc());
922 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
923 *this, OrigVD);
924 EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
925 AlignmentSource::Decl));
926 LocalDeclMap.erase(VD);
927 setAddrOfLocalVar(VD, VDAddr);
928 }
929 IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
930 }
931 assert(IsRegistered &&
932 "firstprivate var already registered as private");
933 // Silence the warning about unused variable.
934 (void)IsRegistered;
935 }
936 ++IRef;
937 ++InitsRef;
938 }
939 }
940 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
941 }
942
EmitOMPPrivateClause(const OMPExecutableDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope)943 void CodeGenFunction::EmitOMPPrivateClause(
944 const OMPExecutableDirective &D,
945 CodeGenFunction::OMPPrivateScope &PrivateScope) {
946 if (!HaveInsertPoint())
947 return;
948 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
949 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
950 auto IRef = C->varlist_begin();
951 for (const Expr *IInit : C->private_copies()) {
952 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
953 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
954 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
955 EmitDecl(*VD);
956 // Emit private VarDecl with copy init.
957 bool IsRegistered =
958 PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
959 assert(IsRegistered && "private var already registered as private");
960 // Silence the warning about unused variable.
961 (void)IsRegistered;
962 }
963 ++IRef;
964 }
965 }
966 }
967
EmitOMPCopyinClause(const OMPExecutableDirective & D)968 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
969 if (!HaveInsertPoint())
970 return false;
971 // threadprivate_var1 = master_threadprivate_var1;
972 // operator=(threadprivate_var2, master_threadprivate_var2);
973 // ...
974 // __kmpc_barrier(&loc, global_tid);
975 llvm::DenseSet<const VarDecl *> CopiedVars;
976 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
977 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
978 auto IRef = C->varlist_begin();
979 auto ISrcRef = C->source_exprs().begin();
980 auto IDestRef = C->destination_exprs().begin();
981 for (const Expr *AssignOp : C->assignment_ops()) {
982 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
983 QualType Type = VD->getType();
984 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
985 // Get the address of the master variable. If we are emitting code with
986 // TLS support, the address is passed from the master as field in the
987 // captured declaration.
988 Address MasterAddr = Address::invalid();
989 if (getLangOpts().OpenMPUseTLS &&
990 getContext().getTargetInfo().isTLSSupported()) {
991 assert(CapturedStmtInfo->lookup(VD) &&
992 "Copyin threadprivates should have been captured!");
993 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
994 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
995 MasterAddr = EmitLValue(&DRE).getAddress(*this);
996 LocalDeclMap.erase(VD);
997 } else {
998 MasterAddr =
999 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
1000 : CGM.GetAddrOfGlobal(VD),
1001 CGM.getTypes().ConvertTypeForMem(VD->getType()),
1002 getContext().getDeclAlign(VD));
1003 }
1004 // Get the address of the threadprivate variable.
1005 Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
1006 if (CopiedVars.size() == 1) {
1007 // At first check if current thread is a master thread. If it is, no
1008 // need to copy data.
1009 CopyBegin = createBasicBlock("copyin.not.master");
1010 CopyEnd = createBasicBlock("copyin.not.master.end");
1011 // TODO: Avoid ptrtoint conversion.
1012 auto *MasterAddrInt =
1013 Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
1014 auto *PrivateAddrInt =
1015 Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
1016 Builder.CreateCondBr(
1017 Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
1018 CopyEnd);
1019 EmitBlock(CopyBegin);
1020 }
1021 const auto *SrcVD =
1022 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1023 const auto *DestVD =
1024 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1025 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
1026 }
1027 ++IRef;
1028 ++ISrcRef;
1029 ++IDestRef;
1030 }
1031 }
1032 if (CopyEnd) {
1033 // Exit out of copying procedure for non-master thread.
1034 EmitBlock(CopyEnd, /*IsFinished=*/true);
1035 return true;
1036 }
1037 return false;
1038 }
1039
EmitOMPLastprivateClauseInit(const OMPExecutableDirective & D,OMPPrivateScope & PrivateScope)1040 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
1041 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
1042 if (!HaveInsertPoint())
1043 return false;
1044 bool HasAtLeastOneLastprivate = false;
1045 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1046 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
1047 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1048 for (const Expr *C : LoopDirective->counters()) {
1049 SIMDLCVs.insert(
1050 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1051 }
1052 }
1053 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1054 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1055 HasAtLeastOneLastprivate = true;
1056 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
1057 !getLangOpts().OpenMPSimd)
1058 break;
1059 const auto *IRef = C->varlist_begin();
1060 const auto *IDestRef = C->destination_exprs().begin();
1061 for (const Expr *IInit : C->private_copies()) {
1062 // Keep the address of the original variable for future update at the end
1063 // of the loop.
1064 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1065 // Taskloops do not require additional initialization, it is done in
1066 // runtime support library.
1067 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1068 const auto *DestVD =
1069 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1070 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1071 /*RefersToEnclosingVariableOrCapture=*/
1072 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1073 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1074 PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
1075 // Check if the variable is also a firstprivate: in this case IInit is
1076 // not generated. Initialization of this variable will happen in codegen
1077 // for 'firstprivate' clause.
1078 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1079 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1080 Address VDAddr = Address::invalid();
1081 if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
1082 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
1083 *this, OrigVD);
1084 setAddrOfLocalVar(VD, VDAddr);
1085 } else {
1086 // Emit private VarDecl with copy init.
1087 EmitDecl(*VD);
1088 VDAddr = GetAddrOfLocalVar(VD);
1089 }
1090 bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
1091 assert(IsRegistered &&
1092 "lastprivate var already registered as private");
1093 (void)IsRegistered;
1094 }
1095 }
1096 ++IRef;
1097 ++IDestRef;
1098 }
1099 }
1100 return HasAtLeastOneLastprivate;
1101 }
1102
EmitOMPLastprivateClauseFinal(const OMPExecutableDirective & D,bool NoFinals,llvm::Value * IsLastIterCond)1103 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
1104 const OMPExecutableDirective &D, bool NoFinals,
1105 llvm::Value *IsLastIterCond) {
1106 if (!HaveInsertPoint())
1107 return;
1108 // Emit following code:
1109 // if (<IsLastIterCond>) {
1110 // orig_var1 = private_orig_var1;
1111 // ...
1112 // orig_varn = private_orig_varn;
1113 // }
1114 llvm::BasicBlock *ThenBB = nullptr;
1115 llvm::BasicBlock *DoneBB = nullptr;
1116 if (IsLastIterCond) {
1117 // Emit implicit barrier if at least one lastprivate conditional is found
1118 // and this is not a simd mode.
1119 if (!getLangOpts().OpenMPSimd &&
1120 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1121 [](const OMPLastprivateClause *C) {
1122 return C->getKind() == OMPC_LASTPRIVATE_conditional;
1123 })) {
1124 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1125 OMPD_unknown,
1126 /*EmitChecks=*/false,
1127 /*ForceSimpleCall=*/true);
1128 }
1129 ThenBB = createBasicBlock(".omp.lastprivate.then");
1130 DoneBB = createBasicBlock(".omp.lastprivate.done");
1131 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1132 EmitBlock(ThenBB);
1133 }
1134 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1135 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1136 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1137 auto IC = LoopDirective->counters().begin();
1138 for (const Expr *F : LoopDirective->finals()) {
1139 const auto *D =
1140 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1141 if (NoFinals)
1142 AlreadyEmittedVars.insert(D);
1143 else
1144 LoopCountersAndUpdates[D] = F;
1145 ++IC;
1146 }
1147 }
1148 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1149 auto IRef = C->varlist_begin();
1150 auto ISrcRef = C->source_exprs().begin();
1151 auto IDestRef = C->destination_exprs().begin();
1152 for (const Expr *AssignOp : C->assignment_ops()) {
1153 const auto *PrivateVD =
1154 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1155 QualType Type = PrivateVD->getType();
1156 const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1157 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1158 // If lastprivate variable is a loop control variable for loop-based
1159 // directive, update its value before copyin back to original
1160 // variable.
1161 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1162 EmitIgnoredExpr(FinalExpr);
1163 const auto *SrcVD =
1164 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1165 const auto *DestVD =
1166 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1167 // Get the address of the private variable.
1168 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1169 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1170 PrivateAddr = Address(
1171 Builder.CreateLoad(PrivateAddr),
1172 CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
1173 CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
1174 // Store the last value to the private copy in the last iteration.
1175 if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1176 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1177 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1178 (*IRef)->getExprLoc());
1179 // Get the address of the original variable.
1180 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1181 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1182 }
1183 ++IRef;
1184 ++ISrcRef;
1185 ++IDestRef;
1186 }
1187 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1188 EmitIgnoredExpr(PostUpdate);
1189 }
1190 if (IsLastIterCond)
1191 EmitBlock(DoneBB, /*IsFinished=*/true);
1192 }
1193
EmitOMPReductionClauseInit(const OMPExecutableDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope,bool ForInscan)1194 void CodeGenFunction::EmitOMPReductionClauseInit(
1195 const OMPExecutableDirective &D,
1196 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
1197 if (!HaveInsertPoint())
1198 return;
1199 SmallVector<const Expr *, 4> Shareds;
1200 SmallVector<const Expr *, 4> Privates;
1201 SmallVector<const Expr *, 4> ReductionOps;
1202 SmallVector<const Expr *, 4> LHSs;
1203 SmallVector<const Expr *, 4> RHSs;
1204 OMPTaskDataTy Data;
1205 SmallVector<const Expr *, 4> TaskLHSs;
1206 SmallVector<const Expr *, 4> TaskRHSs;
1207 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1208 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
1209 continue;
1210 Shareds.append(C->varlist_begin(), C->varlist_end());
1211 Privates.append(C->privates().begin(), C->privates().end());
1212 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1213 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1214 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1215 if (C->getModifier() == OMPC_REDUCTION_task) {
1216 Data.ReductionVars.append(C->privates().begin(), C->privates().end());
1217 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
1218 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
1219 Data.ReductionOps.append(C->reduction_ops().begin(),
1220 C->reduction_ops().end());
1221 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1222 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1223 }
1224 }
1225 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
1226 unsigned Count = 0;
1227 auto *ILHS = LHSs.begin();
1228 auto *IRHS = RHSs.begin();
1229 auto *IPriv = Privates.begin();
1230 for (const Expr *IRef : Shareds) {
1231 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1232 // Emit private VarDecl with reduction init.
1233 RedCG.emitSharedOrigLValue(*this, Count);
1234 RedCG.emitAggregateType(*this, Count);
1235 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1236 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1237 RedCG.getSharedLValue(Count).getAddress(*this),
1238 [&Emission](CodeGenFunction &CGF) {
1239 CGF.EmitAutoVarInit(Emission);
1240 return true;
1241 });
1242 EmitAutoVarCleanups(Emission);
1243 Address BaseAddr = RedCG.adjustPrivateAddress(
1244 *this, Count, Emission.getAllocatedAddress());
1245 bool IsRegistered =
1246 PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
1247 assert(IsRegistered && "private var already registered as private");
1248 // Silence the warning about unused variable.
1249 (void)IsRegistered;
1250
1251 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1252 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1253 QualType Type = PrivateVD->getType();
1254 bool isaOMPArraySectionExpr = isa<OMPArraySectionExpr>(IRef);
1255 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1256 // Store the address of the original variable associated with the LHS
1257 // implicit variable.
1258 PrivateScope.addPrivate(LHSVD,
1259 RedCG.getSharedLValue(Count).getAddress(*this));
1260 PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
1261 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1262 isa<ArraySubscriptExpr>(IRef)) {
1263 // Store the address of the original variable associated with the LHS
1264 // implicit variable.
1265 PrivateScope.addPrivate(LHSVD,
1266 RedCG.getSharedLValue(Count).getAddress(*this));
1267 PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast(
1268 GetAddrOfLocalVar(PrivateVD),
1269 ConvertTypeForMem(RHSVD->getType()),
1270 "rhs.begin"));
1271 } else {
1272 QualType Type = PrivateVD->getType();
1273 bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1274 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
1275 // Store the address of the original variable associated with the LHS
1276 // implicit variable.
1277 if (IsArray) {
1278 OriginalAddr = Builder.CreateElementBitCast(
1279 OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
1280 }
1281 PrivateScope.addPrivate(LHSVD, OriginalAddr);
1282 PrivateScope.addPrivate(
1283 RHSVD, IsArray ? Builder.CreateElementBitCast(
1284 GetAddrOfLocalVar(PrivateVD),
1285 ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
1286 : GetAddrOfLocalVar(PrivateVD));
1287 }
1288 ++ILHS;
1289 ++IRHS;
1290 ++IPriv;
1291 ++Count;
1292 }
1293 if (!Data.ReductionVars.empty()) {
1294 Data.IsReductionWithTaskMod = true;
1295 Data.IsWorksharingReduction =
1296 isOpenMPWorksharingDirective(D.getDirectiveKind());
1297 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
1298 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
1299 const Expr *TaskRedRef = nullptr;
1300 switch (D.getDirectiveKind()) {
1301 case OMPD_parallel:
1302 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
1303 break;
1304 case OMPD_for:
1305 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
1306 break;
1307 case OMPD_sections:
1308 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
1309 break;
1310 case OMPD_parallel_for:
1311 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
1312 break;
1313 case OMPD_parallel_master:
1314 TaskRedRef =
1315 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
1316 break;
1317 case OMPD_parallel_sections:
1318 TaskRedRef =
1319 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
1320 break;
1321 case OMPD_target_parallel:
1322 TaskRedRef =
1323 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
1324 break;
1325 case OMPD_target_parallel_for:
1326 TaskRedRef =
1327 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
1328 break;
1329 case OMPD_distribute_parallel_for:
1330 TaskRedRef =
1331 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
1332 break;
1333 case OMPD_teams_distribute_parallel_for:
1334 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
1335 .getTaskReductionRefExpr();
1336 break;
1337 case OMPD_target_teams_distribute_parallel_for:
1338 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
1339 .getTaskReductionRefExpr();
1340 break;
1341 case OMPD_simd:
1342 case OMPD_for_simd:
1343 case OMPD_section:
1344 case OMPD_single:
1345 case OMPD_master:
1346 case OMPD_critical:
1347 case OMPD_parallel_for_simd:
1348 case OMPD_task:
1349 case OMPD_taskyield:
1350 case OMPD_barrier:
1351 case OMPD_taskwait:
1352 case OMPD_taskgroup:
1353 case OMPD_flush:
1354 case OMPD_depobj:
1355 case OMPD_scan:
1356 case OMPD_ordered:
1357 case OMPD_atomic:
1358 case OMPD_teams:
1359 case OMPD_target:
1360 case OMPD_cancellation_point:
1361 case OMPD_cancel:
1362 case OMPD_target_data:
1363 case OMPD_target_enter_data:
1364 case OMPD_target_exit_data:
1365 case OMPD_taskloop:
1366 case OMPD_taskloop_simd:
1367 case OMPD_master_taskloop:
1368 case OMPD_master_taskloop_simd:
1369 case OMPD_parallel_master_taskloop:
1370 case OMPD_parallel_master_taskloop_simd:
1371 case OMPD_distribute:
1372 case OMPD_target_update:
1373 case OMPD_distribute_parallel_for_simd:
1374 case OMPD_distribute_simd:
1375 case OMPD_target_parallel_for_simd:
1376 case OMPD_target_simd:
1377 case OMPD_teams_distribute:
1378 case OMPD_teams_distribute_simd:
1379 case OMPD_teams_distribute_parallel_for_simd:
1380 case OMPD_target_teams:
1381 case OMPD_target_teams_distribute:
1382 case OMPD_target_teams_distribute_parallel_for_simd:
1383 case OMPD_target_teams_distribute_simd:
1384 case OMPD_declare_target:
1385 case OMPD_end_declare_target:
1386 case OMPD_threadprivate:
1387 case OMPD_allocate:
1388 case OMPD_declare_reduction:
1389 case OMPD_declare_mapper:
1390 case OMPD_declare_simd:
1391 case OMPD_requires:
1392 case OMPD_declare_variant:
1393 case OMPD_begin_declare_variant:
1394 case OMPD_end_declare_variant:
1395 case OMPD_unknown:
1396 default:
1397 llvm_unreachable("Enexpected directive with task reductions.");
1398 }
1399
1400 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1401 EmitVarDecl(*VD);
1402 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
1403 /*Volatile=*/false, TaskRedRef->getType());
1404 }
1405 }
1406
EmitOMPReductionClauseFinal(const OMPExecutableDirective & D,const OpenMPDirectiveKind ReductionKind)1407 void CodeGenFunction::EmitOMPReductionClauseFinal(
1408 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1409 if (!HaveInsertPoint())
1410 return;
1411 llvm::SmallVector<const Expr *, 8> Privates;
1412 llvm::SmallVector<const Expr *, 8> LHSExprs;
1413 llvm::SmallVector<const Expr *, 8> RHSExprs;
1414 llvm::SmallVector<const Expr *, 8> ReductionOps;
1415 bool HasAtLeastOneReduction = false;
1416 bool IsReductionWithTaskMod = false;
1417 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1418 // Do not emit for inscan reductions.
1419 if (C->getModifier() == OMPC_REDUCTION_inscan)
1420 continue;
1421 HasAtLeastOneReduction = true;
1422 Privates.append(C->privates().begin(), C->privates().end());
1423 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1424 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1425 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1426 IsReductionWithTaskMod =
1427 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
1428 }
1429 if (HasAtLeastOneReduction) {
1430 if (IsReductionWithTaskMod) {
1431 CGM.getOpenMPRuntime().emitTaskReductionFini(
1432 *this, D.getBeginLoc(),
1433 isOpenMPWorksharingDirective(D.getDirectiveKind()));
1434 }
1435 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1436 isOpenMPParallelDirective(D.getDirectiveKind()) ||
1437 ReductionKind == OMPD_simd;
1438 bool SimpleReduction = ReductionKind == OMPD_simd;
1439 // Emit nowait reduction if nowait clause is present or directive is a
1440 // parallel directive (it always has implicit barrier).
1441 CGM.getOpenMPRuntime().emitReduction(
1442 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1443 {WithNowait, SimpleReduction, ReductionKind});
1444 }
1445 }
1446
emitPostUpdateForReductionClause(CodeGenFunction & CGF,const OMPExecutableDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> CondGen)1447 static void emitPostUpdateForReductionClause(
1448 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1449 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1450 if (!CGF.HaveInsertPoint())
1451 return;
1452 llvm::BasicBlock *DoneBB = nullptr;
1453 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1454 if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1455 if (!DoneBB) {
1456 if (llvm::Value *Cond = CondGen(CGF)) {
1457 // If the first post-update expression is found, emit conditional
1458 // block if it was requested.
1459 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1460 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1461 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1462 CGF.EmitBlock(ThenBB);
1463 }
1464 }
1465 CGF.EmitIgnoredExpr(PostUpdate);
1466 }
1467 }
1468 if (DoneBB)
1469 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1470 }
1471
1472 namespace {
1473 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1474 /// parallel function. This is necessary for combined constructs such as
1475 /// 'distribute parallel for'
1476 typedef llvm::function_ref<void(CodeGenFunction &,
1477 const OMPExecutableDirective &,
1478 llvm::SmallVectorImpl<llvm::Value *> &)>
1479 CodeGenBoundParametersTy;
1480 } // anonymous namespace
1481
1482 static void
checkForLastprivateConditionalUpdate(CodeGenFunction & CGF,const OMPExecutableDirective & S)1483 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF,
1484 const OMPExecutableDirective &S) {
1485 if (CGF.getLangOpts().OpenMP < 50)
1486 return;
1487 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
1488 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
1489 for (const Expr *Ref : C->varlists()) {
1490 if (!Ref->getType()->isScalarType())
1491 continue;
1492 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1493 if (!DRE)
1494 continue;
1495 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1496 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1497 }
1498 }
1499 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
1500 for (const Expr *Ref : C->varlists()) {
1501 if (!Ref->getType()->isScalarType())
1502 continue;
1503 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1504 if (!DRE)
1505 continue;
1506 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1507 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1508 }
1509 }
1510 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
1511 for (const Expr *Ref : C->varlists()) {
1512 if (!Ref->getType()->isScalarType())
1513 continue;
1514 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1515 if (!DRE)
1516 continue;
1517 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1518 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1519 }
1520 }
1521 // Privates should ne analyzed since they are not captured at all.
1522 // Task reductions may be skipped - tasks are ignored.
1523 // Firstprivates do not return value but may be passed by reference - no need
1524 // to check for updated lastprivate conditional.
1525 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1526 for (const Expr *Ref : C->varlists()) {
1527 if (!Ref->getType()->isScalarType())
1528 continue;
1529 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1530 if (!DRE)
1531 continue;
1532 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1533 }
1534 }
1535 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional(
1536 CGF, S, PrivateDecls);
1537 }
1538
emitCommonOMPParallelDirective(CodeGenFunction & CGF,const OMPExecutableDirective & S,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen,const CodeGenBoundParametersTy & CodeGenBoundParameters)1539 static void emitCommonOMPParallelDirective(
1540 CodeGenFunction &CGF, const OMPExecutableDirective &S,
1541 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1542 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1543 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1544 llvm::Value *NumThreads = nullptr;
1545 llvm::Function *OutlinedFn =
1546 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1547 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
1548 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1549 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1550 NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1551 /*IgnoreResultAssign=*/true);
1552 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1553 CGF, NumThreads, NumThreadsClause->getBeginLoc());
1554 }
1555 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1556 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1557 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1558 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1559 }
1560 const Expr *IfCond = nullptr;
1561 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1562 if (C->getNameModifier() == OMPD_unknown ||
1563 C->getNameModifier() == OMPD_parallel) {
1564 IfCond = C->getCondition();
1565 break;
1566 }
1567 }
1568
1569 OMPParallelScope Scope(CGF, S);
1570 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1571 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1572 // lower and upper bounds with the pragma 'for' chunking mechanism.
1573 // The following lambda takes care of appending the lower and upper bound
1574 // parameters when necessary
1575 CodeGenBoundParameters(CGF, S, CapturedVars);
1576 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1577 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1578 CapturedVars, IfCond, NumThreads);
1579 }
1580
isAllocatableDecl(const VarDecl * VD)1581 static bool isAllocatableDecl(const VarDecl *VD) {
1582 const VarDecl *CVD = VD->getCanonicalDecl();
1583 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
1584 return false;
1585 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1586 // Use the default allocation.
1587 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1588 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1589 !AA->getAllocator());
1590 }
1591
emitEmptyBoundParameters(CodeGenFunction &,const OMPExecutableDirective &,llvm::SmallVectorImpl<llvm::Value * > &)1592 static void emitEmptyBoundParameters(CodeGenFunction &,
1593 const OMPExecutableDirective &,
1594 llvm::SmallVectorImpl<llvm::Value *> &) {}
1595
getAddressOfLocalVariable(CodeGenFunction & CGF,const VarDecl * VD)1596 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
1597 CodeGenFunction &CGF, const VarDecl *VD) {
1598 CodeGenModule &CGM = CGF.CGM;
1599 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1600
1601 if (!VD)
1602 return Address::invalid();
1603 const VarDecl *CVD = VD->getCanonicalDecl();
1604 if (!isAllocatableDecl(CVD))
1605 return Address::invalid();
1606 llvm::Value *Size;
1607 CharUnits Align = CGM.getContext().getDeclAlign(CVD);
1608 if (CVD->getType()->isVariablyModifiedType()) {
1609 Size = CGF.getTypeSize(CVD->getType());
1610 // Align the size: ((size + align - 1) / align) * align
1611 Size = CGF.Builder.CreateNUWAdd(
1612 Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
1613 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
1614 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
1615 } else {
1616 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
1617 Size = CGM.getSize(Sz.alignTo(Align));
1618 }
1619
1620 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1621 assert(AA->getAllocator() &&
1622 "Expected allocator expression for non-default allocator.");
1623 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
1624 // According to the standard, the original allocator type is a enum (integer).
1625 // Convert to pointer type, if required.
1626 if (Allocator->getType()->isIntegerTy())
1627 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
1628 else if (Allocator->getType()->isPointerTy())
1629 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
1630 CGM.VoidPtrTy);
1631
1632 llvm::Value *Addr = OMPBuilder.createOMPAlloc(
1633 CGF.Builder, Size, Allocator,
1634 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
1635 llvm::CallInst *FreeCI =
1636 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator);
1637
1638 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
1639 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1640 Addr,
1641 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
1642 getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
1643 return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
1644 }
1645
getAddrOfThreadPrivate(CodeGenFunction & CGF,const VarDecl * VD,Address VDAddr,SourceLocation Loc)1646 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
1647 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
1648 SourceLocation Loc) {
1649 CodeGenModule &CGM = CGF.CGM;
1650 if (CGM.getLangOpts().OpenMPUseTLS &&
1651 CGM.getContext().getTargetInfo().isTLSSupported())
1652 return VDAddr;
1653
1654 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1655
1656 llvm::Type *VarTy = VDAddr.getElementType();
1657 llvm::Value *Data =
1658 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy);
1659 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
1660 std::string Suffix = getNameWithSeparators({"cache", ""});
1661 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
1662
1663 llvm::CallInst *ThreadPrivateCacheCall =
1664 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
1665
1666 return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
1667 }
1668
getNameWithSeparators(ArrayRef<StringRef> Parts,StringRef FirstSeparator,StringRef Separator)1669 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
1670 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
1671 SmallString<128> Buffer;
1672 llvm::raw_svector_ostream OS(Buffer);
1673 StringRef Sep = FirstSeparator;
1674 for (StringRef Part : Parts) {
1675 OS << Sep << Part;
1676 Sep = Separator;
1677 }
1678 return OS.str().str();
1679 }
1680
EmitOMPInlinedRegionBody(CodeGenFunction & CGF,const Stmt * RegionBodyStmt,InsertPointTy AllocaIP,InsertPointTy CodeGenIP,Twine RegionName)1681 void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
1682 CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1683 InsertPointTy CodeGenIP, Twine RegionName) {
1684 CGBuilderTy &Builder = CGF.Builder;
1685 Builder.restoreIP(CodeGenIP);
1686 llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1687 "." + RegionName + ".after");
1688
1689 {
1690 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1691 CGF.EmitStmt(RegionBodyStmt);
1692 }
1693
1694 if (Builder.saveIP().isSet())
1695 Builder.CreateBr(FiniBB);
1696 }
1697
EmitOMPOutlinedRegionBody(CodeGenFunction & CGF,const Stmt * RegionBodyStmt,InsertPointTy AllocaIP,InsertPointTy CodeGenIP,Twine RegionName)1698 void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1699 CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1700 InsertPointTy CodeGenIP, Twine RegionName) {
1701 CGBuilderTy &Builder = CGF.Builder;
1702 Builder.restoreIP(CodeGenIP);
1703 llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1704 "." + RegionName + ".after");
1705
1706 {
1707 OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1708 CGF.EmitStmt(RegionBodyStmt);
1709 }
1710
1711 if (Builder.saveIP().isSet())
1712 Builder.CreateBr(FiniBB);
1713 }
1714
EmitOMPParallelDirective(const OMPParallelDirective & S)1715 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1716 if (CGM.getLangOpts().OpenMPIRBuilder) {
1717 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1718 // Check if we have any if clause associated with the directive.
1719 llvm::Value *IfCond = nullptr;
1720 if (const auto *C = S.getSingleClause<OMPIfClause>())
1721 IfCond = EmitScalarExpr(C->getCondition(),
1722 /*IgnoreResultAssign=*/true);
1723
1724 llvm::Value *NumThreads = nullptr;
1725 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1726 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1727 /*IgnoreResultAssign=*/true);
1728
1729 ProcBindKind ProcBind = OMP_PROC_BIND_default;
1730 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1731 ProcBind = ProcBindClause->getProcBindKind();
1732
1733 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1734
1735 // The cleanup callback that finalizes all variabels at the given location,
1736 // thus calls destructors etc.
1737 auto FiniCB = [this](InsertPointTy IP) {
1738 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
1739 };
1740
1741 // Privatization callback that performs appropriate action for
1742 // shared/private/firstprivate/lastprivate/copyin/... variables.
1743 //
1744 // TODO: This defaults to shared right now.
1745 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1746 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
1747 // The next line is appropriate only for variables (Val) with the
1748 // data-sharing attribute "shared".
1749 ReplVal = &Val;
1750
1751 return CodeGenIP;
1752 };
1753
1754 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1755 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1756
1757 auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
1758 InsertPointTy CodeGenIP) {
1759 OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1760 *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
1761 };
1762
1763 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1764 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1765 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1766 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
1767 Builder.restoreIP(
1768 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1769 IfCond, NumThreads, ProcBind, S.hasCancel()));
1770 return;
1771 }
1772
1773 // Emit parallel region as a standalone region.
1774 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1775 Action.Enter(CGF);
1776 OMPPrivateScope PrivateScope(CGF);
1777 bool Copyins = CGF.EmitOMPCopyinClause(S);
1778 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1779 if (Copyins) {
1780 // Emit implicit barrier to synchronize threads and avoid data races on
1781 // propagation master's thread values of threadprivate variables to local
1782 // instances of that variables of all other implicit threads.
1783 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1784 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1785 /*ForceSimpleCall=*/true);
1786 }
1787 CGF.EmitOMPPrivateClause(S, PrivateScope);
1788 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1789 (void)PrivateScope.Privatize();
1790 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1791 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1792 };
1793 {
1794 auto LPCRegion =
1795 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
1796 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1797 emitEmptyBoundParameters);
1798 emitPostUpdateForReductionClause(*this, S,
1799 [](CodeGenFunction &) { return nullptr; });
1800 }
1801 // Check for outer lastprivate conditional update.
1802 checkForLastprivateConditionalUpdate(*this, S);
1803 }
1804
EmitOMPMetaDirective(const OMPMetaDirective & S)1805 void CodeGenFunction::EmitOMPMetaDirective(const OMPMetaDirective &S) {
1806 EmitStmt(S.getIfStmt());
1807 }
1808
1809 namespace {
1810 /// RAII to handle scopes for loop transformation directives.
1811 class OMPTransformDirectiveScopeRAII {
1812 OMPLoopScope *Scope = nullptr;
1813 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
1814 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
1815
1816 public:
OMPTransformDirectiveScopeRAII(CodeGenFunction & CGF,const Stmt * S)1817 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
1818 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
1819 Scope = new OMPLoopScope(CGF, *Dir);
1820 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP);
1821 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
1822 }
1823 }
~OMPTransformDirectiveScopeRAII()1824 ~OMPTransformDirectiveScopeRAII() {
1825 if (!Scope)
1826 return;
1827 delete CapInfoRAII;
1828 delete CGSI;
1829 delete Scope;
1830 }
1831 };
1832 } // namespace
1833
emitBody(CodeGenFunction & CGF,const Stmt * S,const Stmt * NextLoop,int MaxLevel,int Level=0)1834 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1835 int MaxLevel, int Level = 0) {
1836 assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1837 const Stmt *SimplifiedS = S->IgnoreContainers();
1838 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1839 PrettyStackTraceLoc CrashInfo(
1840 CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1841 "LLVM IR generation of compound statement ('{}')");
1842
1843 // Keep track of the current cleanup stack depth, including debug scopes.
1844 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1845 for (const Stmt *CurStmt : CS->body())
1846 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1847 return;
1848 }
1849 if (SimplifiedS == NextLoop) {
1850 if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
1851 SimplifiedS = Dir->getTransformedStmt();
1852 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
1853 SimplifiedS = CanonLoop->getLoopStmt();
1854 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1855 S = For->getBody();
1856 } else {
1857 assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1858 "Expected canonical for loop or range-based for loop.");
1859 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1860 CGF.EmitStmt(CXXFor->getLoopVarStmt());
1861 S = CXXFor->getBody();
1862 }
1863 if (Level + 1 < MaxLevel) {
1864 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop(
1865 S, /*TryImperfectlyNestedLoops=*/true);
1866 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1867 return;
1868 }
1869 }
1870 CGF.EmitStmt(S);
1871 }
1872
EmitOMPLoopBody(const OMPLoopDirective & D,JumpDest LoopExit)1873 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1874 JumpDest LoopExit) {
1875 RunCleanupsScope BodyScope(*this);
1876 // Update counters values on current iteration.
1877 for (const Expr *UE : D.updates())
1878 EmitIgnoredExpr(UE);
1879 // Update the linear variables.
1880 // In distribute directives only loop counters may be marked as linear, no
1881 // need to generate the code for them.
1882 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1883 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1884 for (const Expr *UE : C->updates())
1885 EmitIgnoredExpr(UE);
1886 }
1887 }
1888
1889 // On a continue in the body, jump to the end.
1890 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1891 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1892 for (const Expr *E : D.finals_conditions()) {
1893 if (!E)
1894 continue;
1895 // Check that loop counter in non-rectangular nest fits into the iteration
1896 // space.
1897 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1898 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1899 getProfileCount(D.getBody()));
1900 EmitBlock(NextBB);
1901 }
1902
1903 OMPPrivateScope InscanScope(*this);
1904 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
1905 bool IsInscanRegion = InscanScope.Privatize();
1906 if (IsInscanRegion) {
1907 // Need to remember the block before and after scan directive
1908 // to dispatch them correctly depending on the clause used in
1909 // this directive, inclusive or exclusive. For inclusive scan the natural
1910 // order of the blocks is used, for exclusive clause the blocks must be
1911 // executed in reverse order.
1912 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
1913 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
1914 // No need to allocate inscan exit block, in simd mode it is selected in the
1915 // codegen for the scan directive.
1916 if (D.getDirectiveKind() != OMPD_simd && !getLangOpts().OpenMPSimd)
1917 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
1918 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
1919 EmitBranch(OMPScanDispatch);
1920 EmitBlock(OMPBeforeScanBlock);
1921 }
1922
1923 // Emit loop variables for C++ range loops.
1924 const Stmt *Body =
1925 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
1926 // Emit loop body.
1927 emitBody(*this, Body,
1928 OMPLoopBasedDirective::tryToFindNextInnerLoop(
1929 Body, /*TryImperfectlyNestedLoops=*/true),
1930 D.getLoopsNumber());
1931
1932 // Jump to the dispatcher at the end of the loop body.
1933 if (IsInscanRegion)
1934 EmitBranch(OMPScanExitBlock);
1935
1936 // The end (updates/cleanups).
1937 EmitBlock(Continue.getBlock());
1938 BreakContinueStack.pop_back();
1939 }
1940
1941 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>;
1942
1943 /// Emit a captured statement and return the function as well as its captured
1944 /// closure context.
emitCapturedStmtFunc(CodeGenFunction & ParentCGF,const CapturedStmt * S)1945 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF,
1946 const CapturedStmt *S) {
1947 LValue CapStruct = ParentCGF.InitCapturedStruct(*S);
1948 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true);
1949 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
1950 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
1951 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
1952 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S);
1953
1954 return {F, CapStruct.getPointer(ParentCGF)};
1955 }
1956
1957 /// Emit a call to a previously captured closure.
1958 static llvm::CallInst *
emitCapturedStmtCall(CodeGenFunction & ParentCGF,EmittedClosureTy Cap,llvm::ArrayRef<llvm::Value * > Args)1959 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap,
1960 llvm::ArrayRef<llvm::Value *> Args) {
1961 // Append the closure context to the argument.
1962 SmallVector<llvm::Value *> EffectiveArgs;
1963 EffectiveArgs.reserve(Args.size() + 1);
1964 llvm::append_range(EffectiveArgs, Args);
1965 EffectiveArgs.push_back(Cap.second);
1966
1967 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs);
1968 }
1969
1970 llvm::CanonicalLoopInfo *
EmitOMPCollapsedCanonicalLoopNest(const Stmt * S,int Depth)1971 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
1972 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
1973
1974 // The caller is processing the loop-associated directive processing the \p
1975 // Depth loops nested in \p S. Put the previous pending loop-associated
1976 // directive to the stack. If the current loop-associated directive is a loop
1977 // transformation directive, it will push its generated loops onto the stack
1978 // such that together with the loops left here they form the combined loop
1979 // nest for the parent loop-associated directive.
1980 int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth;
1981 ExpectedOMPLoopDepth = Depth;
1982
1983 EmitStmt(S);
1984 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
1985
1986 // The last added loop is the outermost one.
1987 llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back();
1988
1989 // Pop the \p Depth loops requested by the call from that stack and restore
1990 // the previous context.
1991 OMPLoopNestStack.pop_back_n(Depth);
1992 ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
1993
1994 return Result;
1995 }
1996
EmitOMPCanonicalLoop(const OMPCanonicalLoop * S)1997 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
1998 const Stmt *SyntacticalLoop = S->getLoopStmt();
1999 if (!getLangOpts().OpenMPIRBuilder) {
2000 // Ignore if OpenMPIRBuilder is not enabled.
2001 EmitStmt(SyntacticalLoop);
2002 return;
2003 }
2004
2005 LexicalScope ForScope(*this, S->getSourceRange());
2006
2007 // Emit init statements. The Distance/LoopVar funcs may reference variable
2008 // declarations they contain.
2009 const Stmt *BodyStmt;
2010 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
2011 if (const Stmt *InitStmt = For->getInit())
2012 EmitStmt(InitStmt);
2013 BodyStmt = For->getBody();
2014 } else if (const auto *RangeFor =
2015 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
2016 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
2017 EmitStmt(RangeStmt);
2018 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
2019 EmitStmt(BeginStmt);
2020 if (const DeclStmt *EndStmt = RangeFor->getEndStmt())
2021 EmitStmt(EndStmt);
2022 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
2023 EmitStmt(LoopVarStmt);
2024 BodyStmt = RangeFor->getBody();
2025 } else
2026 llvm_unreachable("Expected for-stmt or range-based for-stmt");
2027
2028 // Emit closure for later use. By-value captures will be captured here.
2029 const CapturedStmt *DistanceFunc = S->getDistanceFunc();
2030 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc);
2031 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc();
2032 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc);
2033
2034 // Call the distance function to get the number of iterations of the loop to
2035 // come.
2036 QualType LogicalTy = DistanceFunc->getCapturedDecl()
2037 ->getParam(0)
2038 ->getType()
2039 .getNonReferenceType();
2040 Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
2041 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
2042 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
2043
2044 // Emit the loop structure.
2045 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2046 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
2047 llvm::Value *IndVar) {
2048 Builder.restoreIP(CodeGenIP);
2049
2050 // Emit the loop body: Convert the logical iteration number to the loop
2051 // variable and emit the body.
2052 const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
2053 LValue LCVal = EmitLValue(LoopVarRef);
2054 Address LoopVarAddress = LCVal.getAddress(*this);
2055 emitCapturedStmtCall(*this, LoopVarClosure,
2056 {LoopVarAddress.getPointer(), IndVar});
2057
2058 RunCleanupsScope BodyScope(*this);
2059 EmitStmt(BodyStmt);
2060 };
2061 llvm::CanonicalLoopInfo *CL =
2062 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal);
2063
2064 // Finish up the loop.
2065 Builder.restoreIP(CL->getAfterIP());
2066 ForScope.ForceCleanup();
2067
2068 // Remember the CanonicalLoopInfo for parent AST nodes consuming it.
2069 OMPLoopNestStack.push_back(CL);
2070 }
2071
EmitOMPInnerLoop(const OMPExecutableDirective & S,bool RequiresCleanup,const Expr * LoopCond,const Expr * IncExpr,const llvm::function_ref<void (CodeGenFunction &)> BodyGen,const llvm::function_ref<void (CodeGenFunction &)> PostIncGen)2072 void CodeGenFunction::EmitOMPInnerLoop(
2073 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
2074 const Expr *IncExpr,
2075 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
2076 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
2077 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
2078
2079 // Start the loop with a block that tests the condition.
2080 auto CondBlock = createBasicBlock("omp.inner.for.cond");
2081 EmitBlock(CondBlock);
2082 const SourceRange R = S.getSourceRange();
2083
2084 // If attributes are attached, push to the basic block with them.
2085 const auto &OMPED = cast<OMPExecutableDirective>(S);
2086 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
2087 const Stmt *SS = ICS->getCapturedStmt();
2088 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
2089 OMPLoopNestStack.clear();
2090 if (AS)
2091 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
2092 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
2093 SourceLocToDebugLoc(R.getEnd()));
2094 else
2095 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2096 SourceLocToDebugLoc(R.getEnd()));
2097
2098 // If there are any cleanups between here and the loop-exit scope,
2099 // create a block to stage a loop exit along.
2100 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2101 if (RequiresCleanup)
2102 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
2103
2104 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
2105
2106 // Emit condition.
2107 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
2108 if (ExitBlock != LoopExit.getBlock()) {
2109 EmitBlock(ExitBlock);
2110 EmitBranchThroughCleanup(LoopExit);
2111 }
2112
2113 EmitBlock(LoopBody);
2114 incrementProfileCounter(&S);
2115
2116 // Create a block for the increment.
2117 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
2118 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2119
2120 BodyGen(*this);
2121
2122 // Emit "IV = IV + 1" and a back-edge to the condition block.
2123 EmitBlock(Continue.getBlock());
2124 EmitIgnoredExpr(IncExpr);
2125 PostIncGen(*this);
2126 BreakContinueStack.pop_back();
2127 EmitBranch(CondBlock);
2128 LoopStack.pop();
2129 // Emit the fall-through block.
2130 EmitBlock(LoopExit.getBlock());
2131 }
2132
EmitOMPLinearClauseInit(const OMPLoopDirective & D)2133 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
2134 if (!HaveInsertPoint())
2135 return false;
2136 // Emit inits for the linear variables.
2137 bool HasLinears = false;
2138 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2139 for (const Expr *Init : C->inits()) {
2140 HasLinears = true;
2141 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
2142 if (const auto *Ref =
2143 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
2144 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
2145 const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
2146 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2147 CapturedStmtInfo->lookup(OrigVD) != nullptr,
2148 VD->getInit()->getType(), VK_LValue,
2149 VD->getInit()->getExprLoc());
2150 EmitExprAsInit(
2151 &DRE, VD,
2152 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
2153 /*capturedByInit=*/false);
2154 EmitAutoVarCleanups(Emission);
2155 } else {
2156 EmitVarDecl(*VD);
2157 }
2158 }
2159 // Emit the linear steps for the linear clauses.
2160 // If a step is not constant, it is pre-calculated before the loop.
2161 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
2162 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
2163 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
2164 // Emit calculation of the linear step.
2165 EmitIgnoredExpr(CS);
2166 }
2167 }
2168 return HasLinears;
2169 }
2170
EmitOMPLinearClauseFinal(const OMPLoopDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> CondGen)2171 void CodeGenFunction::EmitOMPLinearClauseFinal(
2172 const OMPLoopDirective &D,
2173 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2174 if (!HaveInsertPoint())
2175 return;
2176 llvm::BasicBlock *DoneBB = nullptr;
2177 // Emit the final values of the linear variables.
2178 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2179 auto IC = C->varlist_begin();
2180 for (const Expr *F : C->finals()) {
2181 if (!DoneBB) {
2182 if (llvm::Value *Cond = CondGen(*this)) {
2183 // If the first post-update expression is found, emit conditional
2184 // block if it was requested.
2185 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
2186 DoneBB = createBasicBlock(".omp.linear.pu.done");
2187 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2188 EmitBlock(ThenBB);
2189 }
2190 }
2191 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
2192 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2193 CapturedStmtInfo->lookup(OrigVD) != nullptr,
2194 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
2195 Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
2196 CodeGenFunction::OMPPrivateScope VarScope(*this);
2197 VarScope.addPrivate(OrigVD, OrigAddr);
2198 (void)VarScope.Privatize();
2199 EmitIgnoredExpr(F);
2200 ++IC;
2201 }
2202 if (const Expr *PostUpdate = C->getPostUpdateExpr())
2203 EmitIgnoredExpr(PostUpdate);
2204 }
2205 if (DoneBB)
2206 EmitBlock(DoneBB, /*IsFinished=*/true);
2207 }
2208
emitAlignedClause(CodeGenFunction & CGF,const OMPExecutableDirective & D)2209 static void emitAlignedClause(CodeGenFunction &CGF,
2210 const OMPExecutableDirective &D) {
2211 if (!CGF.HaveInsertPoint())
2212 return;
2213 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
2214 llvm::APInt ClauseAlignment(64, 0);
2215 if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2216 auto *AlignmentCI =
2217 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2218 ClauseAlignment = AlignmentCI->getValue();
2219 }
2220 for (const Expr *E : Clause->varlists()) {
2221 llvm::APInt Alignment(ClauseAlignment);
2222 if (Alignment == 0) {
2223 // OpenMP [2.8.1, Description]
2224 // If no optional parameter is specified, implementation-defined default
2225 // alignments for SIMD instructions on the target platforms are assumed.
2226 Alignment =
2227 CGF.getContext()
2228 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2229 E->getType()->getPointeeType()))
2230 .getQuantity();
2231 }
2232 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2233 "alignment is not power of 2");
2234 if (Alignment != 0) {
2235 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2236 CGF.emitAlignmentAssumption(
2237 PtrValue, E, /*No second loc needed*/ SourceLocation(),
2238 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
2239 }
2240 }
2241 }
2242 }
2243
EmitOMPPrivateLoopCounters(const OMPLoopDirective & S,CodeGenFunction::OMPPrivateScope & LoopScope)2244 void CodeGenFunction::EmitOMPPrivateLoopCounters(
2245 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
2246 if (!HaveInsertPoint())
2247 return;
2248 auto I = S.private_counters().begin();
2249 for (const Expr *E : S.counters()) {
2250 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2251 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2252 // Emit var without initialization.
2253 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
2254 EmitAutoVarCleanups(VarEmission);
2255 LocalDeclMap.erase(PrivateVD);
2256 (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
2257 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
2258 VD->hasGlobalStorage()) {
2259 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
2260 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
2261 E->getType(), VK_LValue, E->getExprLoc());
2262 (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
2263 } else {
2264 (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
2265 }
2266 ++I;
2267 }
2268 // Privatize extra loop counters used in loops for ordered(n) clauses.
2269 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
2270 if (!C->getNumForLoops())
2271 continue;
2272 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size();
2273 I < E; ++I) {
2274 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
2275 const auto *VD = cast<VarDecl>(DRE->getDecl());
2276 // Override only those variables that can be captured to avoid re-emission
2277 // of the variables declared within the loops.
2278 if (DRE->refersToEnclosingVariableOrCapture()) {
2279 (void)LoopScope.addPrivate(
2280 VD, CreateMemTemp(DRE->getType(), VD->getName()));
2281 }
2282 }
2283 }
2284 }
2285
emitPreCond(CodeGenFunction & CGF,const OMPLoopDirective & S,const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount)2286 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
2287 const Expr *Cond, llvm::BasicBlock *TrueBlock,
2288 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2289 if (!CGF.HaveInsertPoint())
2290 return;
2291 {
2292 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2293 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
2294 (void)PreCondScope.Privatize();
2295 // Get initial values of real counters.
2296 for (const Expr *I : S.inits()) {
2297 CGF.EmitIgnoredExpr(I);
2298 }
2299 }
2300 // Create temp loop control variables with their init values to support
2301 // non-rectangular loops.
2302 CodeGenFunction::OMPMapVars PreCondVars;
2303 for (const Expr *E : S.dependent_counters()) {
2304 if (!E)
2305 continue;
2306 assert(!E->getType().getNonReferenceType()->isRecordType() &&
2307 "dependent counter must not be an iterator.");
2308 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2309 Address CounterAddr =
2310 CGF.CreateMemTemp(VD->getType().getNonReferenceType());
2311 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2312 }
2313 (void)PreCondVars.apply(CGF);
2314 for (const Expr *E : S.dependent_inits()) {
2315 if (!E)
2316 continue;
2317 CGF.EmitIgnoredExpr(E);
2318 }
2319 // Check that loop is executed at least one time.
2320 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
2321 PreCondVars.restore(CGF);
2322 }
2323
EmitOMPLinearClause(const OMPLoopDirective & D,CodeGenFunction::OMPPrivateScope & PrivateScope)2324 void CodeGenFunction::EmitOMPLinearClause(
2325 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
2326 if (!HaveInsertPoint())
2327 return;
2328 llvm::DenseSet<const VarDecl *> SIMDLCVs;
2329 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
2330 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
2331 for (const Expr *C : LoopDirective->counters()) {
2332 SIMDLCVs.insert(
2333 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
2334 }
2335 }
2336 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2337 auto CurPrivate = C->privates().begin();
2338 for (const Expr *E : C->varlists()) {
2339 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2340 const auto *PrivateVD =
2341 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2342 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
2343 // Emit private VarDecl with copy init.
2344 EmitVarDecl(*PrivateVD);
2345 bool IsRegistered =
2346 PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
2347 assert(IsRegistered && "linear var already registered as private");
2348 // Silence the warning about unused variable.
2349 (void)IsRegistered;
2350 } else {
2351 EmitVarDecl(*PrivateVD);
2352 }
2353 ++CurPrivate;
2354 }
2355 }
2356 }
2357
emitSimdlenSafelenClause(CodeGenFunction & CGF,const OMPExecutableDirective & D)2358 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
2359 const OMPExecutableDirective &D) {
2360 if (!CGF.HaveInsertPoint())
2361 return;
2362 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
2363 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2364 /*ignoreResult=*/true);
2365 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2366 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2367 // In presence of finite 'safelen', it may be unsafe to mark all
2368 // the memory instructions parallel, because loop-carried
2369 // dependences of 'safelen' iterations are possible.
2370 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
2371 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
2372 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2373 /*ignoreResult=*/true);
2374 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2375 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2376 // In presence of finite 'safelen', it may be unsafe to mark all
2377 // the memory instructions parallel, because loop-carried
2378 // dependences of 'safelen' iterations are possible.
2379 CGF.LoopStack.setParallel(/*Enable=*/false);
2380 }
2381 }
2382
EmitOMPSimdInit(const OMPLoopDirective & D)2383 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
2384 // Walk clauses and process safelen/lastprivate.
2385 LoopStack.setParallel(/*Enable=*/true);
2386 LoopStack.setVectorizeEnable();
2387 emitSimdlenSafelenClause(*this, D);
2388 if (const auto *C = D.getSingleClause<OMPOrderClause>())
2389 if (C->getKind() == OMPC_ORDER_concurrent)
2390 LoopStack.setParallel(/*Enable=*/true);
2391 if ((D.getDirectiveKind() == OMPD_simd ||
2392 (getLangOpts().OpenMPSimd &&
2393 isOpenMPSimdDirective(D.getDirectiveKind()))) &&
2394 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
2395 [](const OMPReductionClause *C) {
2396 return C->getModifier() == OMPC_REDUCTION_inscan;
2397 }))
2398 // Disable parallel access in case of prefix sum.
2399 LoopStack.setParallel(/*Enable=*/false);
2400 }
2401
EmitOMPSimdFinal(const OMPLoopDirective & D,const llvm::function_ref<llvm::Value * (CodeGenFunction &)> CondGen)2402 void CodeGenFunction::EmitOMPSimdFinal(
2403 const OMPLoopDirective &D,
2404 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2405 if (!HaveInsertPoint())
2406 return;
2407 llvm::BasicBlock *DoneBB = nullptr;
2408 auto IC = D.counters().begin();
2409 auto IPC = D.private_counters().begin();
2410 for (const Expr *F : D.finals()) {
2411 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2412 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2413 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2414 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
2415 OrigVD->hasGlobalStorage() || CED) {
2416 if (!DoneBB) {
2417 if (llvm::Value *Cond = CondGen(*this)) {
2418 // If the first post-update expression is found, emit conditional
2419 // block if it was requested.
2420 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
2421 DoneBB = createBasicBlock(".omp.final.done");
2422 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2423 EmitBlock(ThenBB);
2424 }
2425 }
2426 Address OrigAddr = Address::invalid();
2427 if (CED) {
2428 OrigAddr =
2429 EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
2430 } else {
2431 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
2432 /*RefersToEnclosingVariableOrCapture=*/false,
2433 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
2434 OrigAddr = EmitLValue(&DRE).getAddress(*this);
2435 }
2436 OMPPrivateScope VarScope(*this);
2437 VarScope.addPrivate(OrigVD, OrigAddr);
2438 (void)VarScope.Privatize();
2439 EmitIgnoredExpr(F);
2440 }
2441 ++IC;
2442 ++IPC;
2443 }
2444 if (DoneBB)
2445 EmitBlock(DoneBB, /*IsFinished=*/true);
2446 }
2447
emitOMPLoopBodyWithStopPoint(CodeGenFunction & CGF,const OMPLoopDirective & S,CodeGenFunction::JumpDest LoopExit)2448 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
2449 const OMPLoopDirective &S,
2450 CodeGenFunction::JumpDest LoopExit) {
2451 CGF.EmitOMPLoopBody(S, LoopExit);
2452 CGF.EmitStopPoint(&S);
2453 }
2454
2455 /// Emit a helper variable and return corresponding lvalue.
EmitOMPHelperVar(CodeGenFunction & CGF,const DeclRefExpr * Helper)2456 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2457 const DeclRefExpr *Helper) {
2458 auto VDecl = cast<VarDecl>(Helper->getDecl());
2459 CGF.EmitVarDecl(*VDecl);
2460 return CGF.EmitLValue(Helper);
2461 }
2462
emitCommonSimdLoop(CodeGenFunction & CGF,const OMPLoopDirective & S,const RegionCodeGenTy & SimdInitGen,const RegionCodeGenTy & BodyCodeGen)2463 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
2464 const RegionCodeGenTy &SimdInitGen,
2465 const RegionCodeGenTy &BodyCodeGen) {
2466 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
2467 PrePostActionTy &) {
2468 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
2469 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2470 SimdInitGen(CGF);
2471
2472 BodyCodeGen(CGF);
2473 };
2474 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
2475 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2476 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
2477
2478 BodyCodeGen(CGF);
2479 };
2480 const Expr *IfCond = nullptr;
2481 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
2482 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2483 if (CGF.getLangOpts().OpenMP >= 50 &&
2484 (C->getNameModifier() == OMPD_unknown ||
2485 C->getNameModifier() == OMPD_simd)) {
2486 IfCond = C->getCondition();
2487 break;
2488 }
2489 }
2490 }
2491 if (IfCond) {
2492 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2493 } else {
2494 RegionCodeGenTy ThenRCG(ThenGen);
2495 ThenRCG(CGF);
2496 }
2497 }
2498
emitOMPSimdRegion(CodeGenFunction & CGF,const OMPLoopDirective & S,PrePostActionTy & Action)2499 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
2500 PrePostActionTy &Action) {
2501 Action.Enter(CGF);
2502 assert(isOpenMPSimdDirective(S.getDirectiveKind()) &&
2503 "Expected simd directive");
2504 OMPLoopScope PreInitScope(CGF, S);
2505 // if (PreCond) {
2506 // for (IV in 0..LastIteration) BODY;
2507 // <Final counter/linear vars updates>;
2508 // }
2509 //
2510 if (isOpenMPDistributeDirective(S.getDirectiveKind()) ||
2511 isOpenMPWorksharingDirective(S.getDirectiveKind()) ||
2512 isOpenMPTaskLoopDirective(S.getDirectiveKind())) {
2513 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2514 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2515 }
2516
2517 // Emit: if (PreCond) - begin.
2518 // If the condition constant folds and can be elided, avoid emitting the
2519 // whole loop.
2520 bool CondConstant;
2521 llvm::BasicBlock *ContBlock = nullptr;
2522 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2523 if (!CondConstant)
2524 return;
2525 } else {
2526 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
2527 ContBlock = CGF.createBasicBlock("simd.if.end");
2528 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2529 CGF.getProfileCount(&S));
2530 CGF.EmitBlock(ThenBlock);
2531 CGF.incrementProfileCounter(&S);
2532 }
2533
2534 // Emit the loop iteration variable.
2535 const Expr *IVExpr = S.getIterationVariable();
2536 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2537 CGF.EmitVarDecl(*IVDecl);
2538 CGF.EmitIgnoredExpr(S.getInit());
2539
2540 // Emit the iterations count variable.
2541 // If it is not a variable, Sema decided to calculate iterations count on
2542 // each iteration (e.g., it is foldable into a constant).
2543 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2544 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2545 // Emit calculation of the iterations count.
2546 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
2547 }
2548
2549 emitAlignedClause(CGF, S);
2550 (void)CGF.EmitOMPLinearClauseInit(S);
2551 {
2552 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2553 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
2554 CGF.EmitOMPLinearClause(S, LoopScope);
2555 CGF.EmitOMPPrivateClause(S, LoopScope);
2556 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2557 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
2558 CGF, S, CGF.EmitLValue(S.getIterationVariable()));
2559 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2560 (void)LoopScope.Privatize();
2561 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
2562 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2563
2564 emitCommonSimdLoop(
2565 CGF, S,
2566 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2567 CGF.EmitOMPSimdInit(S);
2568 },
2569 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2570 CGF.EmitOMPInnerLoop(
2571 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2572 [&S](CodeGenFunction &CGF) {
2573 emitOMPLoopBodyWithStopPoint(CGF, S,
2574 CodeGenFunction::JumpDest());
2575 },
2576 [](CodeGenFunction &) {});
2577 });
2578 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2579 // Emit final copy of the lastprivate variables at the end of loops.
2580 if (HasLastprivateClause)
2581 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2582 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2583 emitPostUpdateForReductionClause(CGF, S,
2584 [](CodeGenFunction &) { return nullptr; });
2585 LoopScope.restoreMap();
2586 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2587 }
2588 // Emit: if (PreCond) - end.
2589 if (ContBlock) {
2590 CGF.EmitBranch(ContBlock);
2591 CGF.EmitBlock(ContBlock, true);
2592 }
2593 }
2594
isSupportedByOpenMPIRBuilder(const OMPSimdDirective & S)2595 static bool isSupportedByOpenMPIRBuilder(const OMPSimdDirective &S) {
2596 // Check for unsupported clauses
2597 for (OMPClause *C : S.clauses()) {
2598 // Currently only simdlen clause is supported
2599 if (!isa<OMPSimdlenClause>(C))
2600 return false;
2601 }
2602
2603 // Check if we have a statement with the ordered directive.
2604 // Visit the statement hierarchy to find a compound statement
2605 // with a ordered directive in it.
2606 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
2607 if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
2608 for (const Stmt *SubStmt : SyntacticalLoop->children()) {
2609 if (!SubStmt)
2610 continue;
2611 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
2612 for (const Stmt *CSSubStmt : CS->children()) {
2613 if (!CSSubStmt)
2614 continue;
2615 if (isa<OMPOrderedDirective>(CSSubStmt)) {
2616 return false;
2617 }
2618 }
2619 }
2620 }
2621 }
2622 }
2623 return true;
2624 }
2625
EmitOMPSimdDirective(const OMPSimdDirective & S)2626 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
2627 bool UseOMPIRBuilder =
2628 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
2629 if (UseOMPIRBuilder) {
2630 auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
2631 PrePostActionTy &) {
2632 // Use the OpenMPIRBuilder if enabled.
2633 if (UseOMPIRBuilder) {
2634 // Emit the associated statement and get its loop representation.
2635 const Stmt *Inner = S.getRawStmt();
2636 llvm::CanonicalLoopInfo *CLI =
2637 EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2638
2639 llvm::OpenMPIRBuilder &OMPBuilder =
2640 CGM.getOpenMPRuntime().getOMPBuilder();
2641 // Add SIMD specific metadata
2642 llvm::ConstantInt *Simdlen = nullptr;
2643 if (const auto *C = S.getSingleClause<OMPSimdlenClause>()) {
2644 RValue Len =
2645 this->EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2646 /*ignoreResult=*/true);
2647 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2648 Simdlen = Val;
2649 }
2650 OMPBuilder.applySimd(CLI, Simdlen);
2651 return;
2652 }
2653 };
2654 {
2655 auto LPCRegion =
2656 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
2657 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2658 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
2659 CodeGenIRBuilder);
2660 }
2661 return;
2662 }
2663
2664 ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
2665 OMPFirstScanLoop = true;
2666 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2667 emitOMPSimdRegion(CGF, S, Action);
2668 };
2669 {
2670 auto LPCRegion =
2671 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
2672 OMPLexicalScope Scope(*this, S, OMPD_unknown);
2673 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
2674 }
2675 // Check for outer lastprivate conditional update.
2676 checkForLastprivateConditionalUpdate(*this, S);
2677 }
2678
EmitOMPTileDirective(const OMPTileDirective & S)2679 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
2680 // Emit the de-sugared statement.
2681 OMPTransformDirectiveScopeRAII TileScope(*this, &S);
2682 EmitStmt(S.getTransformedStmt());
2683 }
2684
EmitOMPUnrollDirective(const OMPUnrollDirective & S)2685 void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) {
2686 bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
2687
2688 if (UseOMPIRBuilder) {
2689 auto DL = SourceLocToDebugLoc(S.getBeginLoc());
2690 const Stmt *Inner = S.getRawStmt();
2691
2692 // Consume nested loop. Clear the entire remaining loop stack because a
2693 // fully unrolled loop is non-transformable. For partial unrolling the
2694 // generated outer loop is pushed back to the stack.
2695 llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2696 OMPLoopNestStack.clear();
2697
2698 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2699
2700 bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1;
2701 llvm::CanonicalLoopInfo *UnrolledCLI = nullptr;
2702
2703 if (S.hasClausesOfKind<OMPFullClause>()) {
2704 assert(ExpectedOMPLoopDepth == 0);
2705 OMPBuilder.unrollLoopFull(DL, CLI);
2706 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2707 uint64_t Factor = 0;
2708 if (Expr *FactorExpr = PartialClause->getFactor()) {
2709 Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2710 assert(Factor >= 1 && "Only positive factors are valid");
2711 }
2712 OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
2713 NeedsUnrolledCLI ? &UnrolledCLI : nullptr);
2714 } else {
2715 OMPBuilder.unrollLoopHeuristic(DL, CLI);
2716 }
2717
2718 assert((!NeedsUnrolledCLI || UnrolledCLI) &&
2719 "NeedsUnrolledCLI implies UnrolledCLI to be set");
2720 if (UnrolledCLI)
2721 OMPLoopNestStack.push_back(UnrolledCLI);
2722
2723 return;
2724 }
2725
2726 // This function is only called if the unrolled loop is not consumed by any
2727 // other loop-associated construct. Such a loop-associated construct will have
2728 // used the transformed AST.
2729
2730 // Set the unroll metadata for the next emitted loop.
2731 LoopStack.setUnrollState(LoopAttributes::Enable);
2732
2733 if (S.hasClausesOfKind<OMPFullClause>()) {
2734 LoopStack.setUnrollState(LoopAttributes::Full);
2735 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2736 if (Expr *FactorExpr = PartialClause->getFactor()) {
2737 uint64_t Factor =
2738 FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2739 assert(Factor >= 1 && "Only positive factors are valid");
2740 LoopStack.setUnrollCount(Factor);
2741 }
2742 }
2743
2744 EmitStmt(S.getAssociatedStmt());
2745 }
2746
EmitOMPOuterLoop(bool DynamicOrOrdered,bool IsMonotonic,const OMPLoopDirective & S,CodeGenFunction::OMPPrivateScope & LoopScope,const CodeGenFunction::OMPLoopArguments & LoopArgs,const CodeGenFunction::CodeGenLoopTy & CodeGenLoop,const CodeGenFunction::CodeGenOrderedTy & CodeGenOrdered)2747 void CodeGenFunction::EmitOMPOuterLoop(
2748 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2749 CodeGenFunction::OMPPrivateScope &LoopScope,
2750 const CodeGenFunction::OMPLoopArguments &LoopArgs,
2751 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2752 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2753 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2754
2755 const Expr *IVExpr = S.getIterationVariable();
2756 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2757 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2758
2759 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2760
2761 // Start the loop with a block that tests the condition.
2762 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2763 EmitBlock(CondBlock);
2764 const SourceRange R = S.getSourceRange();
2765 OMPLoopNestStack.clear();
2766 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2767 SourceLocToDebugLoc(R.getEnd()));
2768
2769 llvm::Value *BoolCondVal = nullptr;
2770 if (!DynamicOrOrdered) {
2771 // UB = min(UB, GlobalUB) or
2772 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2773 // 'distribute parallel for')
2774 EmitIgnoredExpr(LoopArgs.EUB);
2775 // IV = LB
2776 EmitIgnoredExpr(LoopArgs.Init);
2777 // IV < UB
2778 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2779 } else {
2780 BoolCondVal =
2781 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2782 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2783 }
2784
2785 // If there are any cleanups between here and the loop-exit scope,
2786 // create a block to stage a loop exit along.
2787 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2788 if (LoopScope.requiresCleanups())
2789 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2790
2791 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2792 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2793 if (ExitBlock != LoopExit.getBlock()) {
2794 EmitBlock(ExitBlock);
2795 EmitBranchThroughCleanup(LoopExit);
2796 }
2797 EmitBlock(LoopBody);
2798
2799 // Emit "IV = LB" (in case of static schedule, we have already calculated new
2800 // LB for loop condition and emitted it above).
2801 if (DynamicOrOrdered)
2802 EmitIgnoredExpr(LoopArgs.Init);
2803
2804 // Create a block for the increment.
2805 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2806 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2807
2808 emitCommonSimdLoop(
2809 *this, S,
2810 [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
2811 // Generate !llvm.loop.parallel metadata for loads and stores for loops
2812 // with dynamic/guided scheduling and without ordered clause.
2813 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
2814 CGF.LoopStack.setParallel(!IsMonotonic);
2815 if (const auto *C = S.getSingleClause<OMPOrderClause>())
2816 if (C->getKind() == OMPC_ORDER_concurrent)
2817 CGF.LoopStack.setParallel(/*Enable=*/true);
2818 } else {
2819 CGF.EmitOMPSimdInit(S);
2820 }
2821 },
2822 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2823 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2824 SourceLocation Loc = S.getBeginLoc();
2825 // when 'distribute' is not combined with a 'for':
2826 // while (idx <= UB) { BODY; ++idx; }
2827 // when 'distribute' is combined with a 'for'
2828 // (e.g. 'distribute parallel for')
2829 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2830 CGF.EmitOMPInnerLoop(
2831 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2832 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2833 CodeGenLoop(CGF, S, LoopExit);
2834 },
2835 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2836 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2837 });
2838 });
2839
2840 EmitBlock(Continue.getBlock());
2841 BreakContinueStack.pop_back();
2842 if (!DynamicOrOrdered) {
2843 // Emit "LB = LB + Stride", "UB = UB + Stride".
2844 EmitIgnoredExpr(LoopArgs.NextLB);
2845 EmitIgnoredExpr(LoopArgs.NextUB);
2846 }
2847
2848 EmitBranch(CondBlock);
2849 OMPLoopNestStack.clear();
2850 LoopStack.pop();
2851 // Emit the fall-through block.
2852 EmitBlock(LoopExit.getBlock());
2853
2854 // Tell the runtime we are done.
2855 auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
2856 if (!DynamicOrOrdered)
2857 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
2858 S.getDirectiveKind());
2859 };
2860 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
2861 }
2862
EmitOMPForOuterLoop(const OpenMPScheduleTy & ScheduleKind,bool IsMonotonic,const OMPLoopDirective & S,OMPPrivateScope & LoopScope,bool Ordered,const OMPLoopArguments & LoopArgs,const CodeGenDispatchBoundsTy & CGDispatchBounds)2863 void CodeGenFunction::EmitOMPForOuterLoop(
2864 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
2865 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
2866 const OMPLoopArguments &LoopArgs,
2867 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
2868 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2869
2870 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
2871 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule);
2872
2873 assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule,
2874 LoopArgs.Chunk != nullptr)) &&
2875 "static non-chunked schedule does not need outer loop");
2876
2877 // Emit outer loop.
2878 //
2879 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2880 // When schedule(dynamic,chunk_size) is specified, the iterations are
2881 // distributed to threads in the team in chunks as the threads request them.
2882 // Each thread executes a chunk of iterations, then requests another chunk,
2883 // until no chunks remain to be distributed. Each chunk contains chunk_size
2884 // iterations, except for the last chunk to be distributed, which may have
2885 // fewer iterations. When no chunk_size is specified, it defaults to 1.
2886 //
2887 // When schedule(guided,chunk_size) is specified, the iterations are assigned
2888 // to threads in the team in chunks as the executing threads request them.
2889 // Each thread executes a chunk of iterations, then requests another chunk,
2890 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
2891 // each chunk is proportional to the number of unassigned iterations divided
2892 // by the number of threads in the team, decreasing to 1. For a chunk_size
2893 // with value k (greater than 1), the size of each chunk is determined in the
2894 // same way, with the restriction that the chunks do not contain fewer than k
2895 // iterations (except for the last chunk to be assigned, which may have fewer
2896 // than k iterations).
2897 //
2898 // When schedule(auto) is specified, the decision regarding scheduling is
2899 // delegated to the compiler and/or runtime system. The programmer gives the
2900 // implementation the freedom to choose any possible mapping of iterations to
2901 // threads in the team.
2902 //
2903 // When schedule(runtime) is specified, the decision regarding scheduling is
2904 // deferred until run time, and the schedule and chunk size are taken from the
2905 // run-sched-var ICV. If the ICV is set to auto, the schedule is
2906 // implementation defined
2907 //
2908 // while(__kmpc_dispatch_next(&LB, &UB)) {
2909 // idx = LB;
2910 // while (idx <= UB) { BODY; ++idx;
2911 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
2912 // } // inner loop
2913 // }
2914 //
2915 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
2916 // When schedule(static, chunk_size) is specified, iterations are divided into
2917 // chunks of size chunk_size, and the chunks are assigned to the threads in
2918 // the team in a round-robin fashion in the order of the thread number.
2919 //
2920 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
2921 // while (idx <= UB) { BODY; ++idx; } // inner loop
2922 // LB = LB + ST;
2923 // UB = UB + ST;
2924 // }
2925 //
2926
2927 const Expr *IVExpr = S.getIterationVariable();
2928 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2929 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2930
2931 if (DynamicOrOrdered) {
2932 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
2933 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
2934 llvm::Value *LBVal = DispatchBounds.first;
2935 llvm::Value *UBVal = DispatchBounds.second;
2936 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
2937 LoopArgs.Chunk};
2938 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
2939 IVSigned, Ordered, DipatchRTInputValues);
2940 } else {
2941 CGOpenMPRuntime::StaticRTInput StaticInit(
2942 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
2943 LoopArgs.ST, LoopArgs.Chunk);
2944 RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
2945 ScheduleKind, StaticInit);
2946 }
2947
2948 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
2949 const unsigned IVSize,
2950 const bool IVSigned) {
2951 if (Ordered) {
2952 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
2953 IVSigned);
2954 }
2955 };
2956
2957 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
2958 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
2959 OuterLoopArgs.IncExpr = S.getInc();
2960 OuterLoopArgs.Init = S.getInit();
2961 OuterLoopArgs.Cond = S.getCond();
2962 OuterLoopArgs.NextLB = S.getNextLowerBound();
2963 OuterLoopArgs.NextUB = S.getNextUpperBound();
2964 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
2965 emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
2966 }
2967
emitEmptyOrdered(CodeGenFunction &,SourceLocation Loc,const unsigned IVSize,const bool IVSigned)2968 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
2969 const unsigned IVSize, const bool IVSigned) {}
2970
EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,const OMPLoopDirective & S,OMPPrivateScope & LoopScope,const OMPLoopArguments & LoopArgs,const CodeGenLoopTy & CodeGenLoopContent)2971 void CodeGenFunction::EmitOMPDistributeOuterLoop(
2972 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
2973 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
2974 const CodeGenLoopTy &CodeGenLoopContent) {
2975
2976 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2977
2978 // Emit outer loop.
2979 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
2980 // dynamic
2981 //
2982
2983 const Expr *IVExpr = S.getIterationVariable();
2984 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2985 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2986
2987 CGOpenMPRuntime::StaticRTInput StaticInit(
2988 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
2989 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
2990 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
2991
2992 // for combined 'distribute' and 'for' the increment expression of distribute
2993 // is stored in DistInc. For 'distribute' alone, it is in Inc.
2994 Expr *IncExpr;
2995 if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
2996 IncExpr = S.getDistInc();
2997 else
2998 IncExpr = S.getInc();
2999
3000 // this routine is shared by 'omp distribute parallel for' and
3001 // 'omp distribute': select the right EUB expression depending on the
3002 // directive
3003 OMPLoopArguments OuterLoopArgs;
3004 OuterLoopArgs.LB = LoopArgs.LB;
3005 OuterLoopArgs.UB = LoopArgs.UB;
3006 OuterLoopArgs.ST = LoopArgs.ST;
3007 OuterLoopArgs.IL = LoopArgs.IL;
3008 OuterLoopArgs.Chunk = LoopArgs.Chunk;
3009 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3010 ? S.getCombinedEnsureUpperBound()
3011 : S.getEnsureUpperBound();
3012 OuterLoopArgs.IncExpr = IncExpr;
3013 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3014 ? S.getCombinedInit()
3015 : S.getInit();
3016 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3017 ? S.getCombinedCond()
3018 : S.getCond();
3019 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3020 ? S.getCombinedNextLowerBound()
3021 : S.getNextLowerBound();
3022 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
3023 ? S.getCombinedNextUpperBound()
3024 : S.getNextUpperBound();
3025
3026 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
3027 LoopScope, OuterLoopArgs, CodeGenLoopContent,
3028 emitEmptyOrdered);
3029 }
3030
3031 static std::pair<LValue, LValue>
emitDistributeParallelForInnerBounds(CodeGenFunction & CGF,const OMPExecutableDirective & S)3032 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
3033 const OMPExecutableDirective &S) {
3034 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3035 LValue LB =
3036 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3037 LValue UB =
3038 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3039
3040 // When composing 'distribute' with 'for' (e.g. as in 'distribute
3041 // parallel for') we need to use the 'distribute'
3042 // chunk lower and upper bounds rather than the whole loop iteration
3043 // space. These are parameters to the outlined function for 'parallel'
3044 // and we copy the bounds of the previous schedule into the
3045 // the current ones.
3046 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
3047 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
3048 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
3049 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
3050 PrevLBVal = CGF.EmitScalarConversion(
3051 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
3052 LS.getIterationVariable()->getType(),
3053 LS.getPrevLowerBoundVariable()->getExprLoc());
3054 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
3055 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
3056 PrevUBVal = CGF.EmitScalarConversion(
3057 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
3058 LS.getIterationVariable()->getType(),
3059 LS.getPrevUpperBoundVariable()->getExprLoc());
3060
3061 CGF.EmitStoreOfScalar(PrevLBVal, LB);
3062 CGF.EmitStoreOfScalar(PrevUBVal, UB);
3063
3064 return {LB, UB};
3065 }
3066
3067 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
3068 /// we need to use the LB and UB expressions generated by the worksharing
3069 /// code generation support, whereas in non combined situations we would
3070 /// just emit 0 and the LastIteration expression
3071 /// This function is necessary due to the difference of the LB and UB
3072 /// types for the RT emission routines for 'for_static_init' and
3073 /// 'for_dispatch_init'
3074 static std::pair<llvm::Value *, llvm::Value *>
emitDistributeParallelForDispatchBounds(CodeGenFunction & CGF,const OMPExecutableDirective & S,Address LB,Address UB)3075 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
3076 const OMPExecutableDirective &S,
3077 Address LB, Address UB) {
3078 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3079 const Expr *IVExpr = LS.getIterationVariable();
3080 // when implementing a dynamic schedule for a 'for' combined with a
3081 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
3082 // is not normalized as each team only executes its own assigned
3083 // distribute chunk
3084 QualType IteratorTy = IVExpr->getType();
3085 llvm::Value *LBVal =
3086 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3087 llvm::Value *UBVal =
3088 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3089 return {LBVal, UBVal};
3090 }
3091
emitDistributeParallelForDistributeInnerBoundParams(CodeGenFunction & CGF,const OMPExecutableDirective & S,llvm::SmallVectorImpl<llvm::Value * > & CapturedVars)3092 static void emitDistributeParallelForDistributeInnerBoundParams(
3093 CodeGenFunction &CGF, const OMPExecutableDirective &S,
3094 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
3095 const auto &Dir = cast<OMPLoopDirective>(S);
3096 LValue LB =
3097 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
3098 llvm::Value *LBCast =
3099 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
3100 CGF.SizeTy, /*isSigned=*/false);
3101 CapturedVars.push_back(LBCast);
3102 LValue UB =
3103 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
3104
3105 llvm::Value *UBCast =
3106 CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
3107 CGF.SizeTy, /*isSigned=*/false);
3108 CapturedVars.push_back(UBCast);
3109 }
3110
3111 static void
emitInnerParallelForWhenCombined(CodeGenFunction & CGF,const OMPLoopDirective & S,CodeGenFunction::JumpDest LoopExit)3112 emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
3113 const OMPLoopDirective &S,
3114 CodeGenFunction::JumpDest LoopExit) {
3115 auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
3116 PrePostActionTy &Action) {
3117 Action.Enter(CGF);
3118 bool HasCancel = false;
3119 if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
3120 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
3121 HasCancel = D->hasCancel();
3122 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
3123 HasCancel = D->hasCancel();
3124 else if (const auto *D =
3125 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
3126 HasCancel = D->hasCancel();
3127 }
3128 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3129 HasCancel);
3130 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
3131 emitDistributeParallelForInnerBounds,
3132 emitDistributeParallelForDispatchBounds);
3133 };
3134
3135 emitCommonOMPParallelDirective(
3136 CGF, S,
3137 isOpenMPSimdDirective(S.getDirectiveKind()) ? OMPD_for_simd : OMPD_for,
3138 CGInlinedWorksharingLoop,
3139 emitDistributeParallelForDistributeInnerBoundParams);
3140 }
3141
EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective & S)3142 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
3143 const OMPDistributeParallelForDirective &S) {
3144 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3145 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
3146 S.getDistInc());
3147 };
3148 OMPLexicalScope Scope(*this, S, OMPD_parallel);
3149 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3150 }
3151
EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective & S)3152 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
3153 const OMPDistributeParallelForSimdDirective &S) {
3154 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3155 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
3156 S.getDistInc());
3157 };
3158 OMPLexicalScope Scope(*this, S, OMPD_parallel);
3159 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3160 }
3161
EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective & S)3162 void CodeGenFunction::EmitOMPDistributeSimdDirective(
3163 const OMPDistributeSimdDirective &S) {
3164 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3165 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
3166 };
3167 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3168 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3169 }
3170
EmitOMPTargetSimdDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetSimdDirective & S)3171 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
3172 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
3173 // Emit SPMD target parallel for region as a standalone region.
3174 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3175 emitOMPSimdRegion(CGF, S, Action);
3176 };
3177 llvm::Function *Fn;
3178 llvm::Constant *Addr;
3179 // Emit target region as a standalone region.
3180 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3181 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3182 assert(Fn && Addr && "Target device function emission failed.");
3183 }
3184
EmitOMPTargetSimdDirective(const OMPTargetSimdDirective & S)3185 void CodeGenFunction::EmitOMPTargetSimdDirective(
3186 const OMPTargetSimdDirective &S) {
3187 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3188 emitOMPSimdRegion(CGF, S, Action);
3189 };
3190 emitCommonOMPTargetDirective(*this, S, CodeGen);
3191 }
3192
3193 namespace {
3194 struct ScheduleKindModifiersTy {
3195 OpenMPScheduleClauseKind Kind;
3196 OpenMPScheduleClauseModifier M1;
3197 OpenMPScheduleClauseModifier M2;
ScheduleKindModifiersTy__anon8cd96fef2811::ScheduleKindModifiersTy3198 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
3199 OpenMPScheduleClauseModifier M1,
3200 OpenMPScheduleClauseModifier M2)
3201 : Kind(Kind), M1(M1), M2(M2) {}
3202 };
3203 } // namespace
3204
EmitOMPWorksharingLoop(const OMPLoopDirective & S,Expr * EUB,const CodeGenLoopBoundsTy & CodeGenLoopBounds,const CodeGenDispatchBoundsTy & CGDispatchBounds)3205 bool CodeGenFunction::EmitOMPWorksharingLoop(
3206 const OMPLoopDirective &S, Expr *EUB,
3207 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3208 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
3209 // Emit the loop iteration variable.
3210 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3211 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3212 EmitVarDecl(*IVDecl);
3213
3214 // Emit the iterations count variable.
3215 // If it is not a variable, Sema decided to calculate iterations count on each
3216 // iteration (e.g., it is foldable into a constant).
3217 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3218 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3219 // Emit calculation of the iterations count.
3220 EmitIgnoredExpr(S.getCalcLastIteration());
3221 }
3222
3223 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3224
3225 bool HasLastprivateClause;
3226 // Check pre-condition.
3227 {
3228 OMPLoopScope PreInitScope(*this, S);
3229 // Skip the entire loop if we don't meet the precondition.
3230 // If the condition constant folds and can be elided, avoid emitting the
3231 // whole loop.
3232 bool CondConstant;
3233 llvm::BasicBlock *ContBlock = nullptr;
3234 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3235 if (!CondConstant)
3236 return false;
3237 } else {
3238 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3239 ContBlock = createBasicBlock("omp.precond.end");
3240 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3241 getProfileCount(&S));
3242 EmitBlock(ThenBlock);
3243 incrementProfileCounter(&S);
3244 }
3245
3246 RunCleanupsScope DoacrossCleanupScope(*this);
3247 bool Ordered = false;
3248 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
3249 if (OrderedClause->getNumForLoops())
3250 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
3251 else
3252 Ordered = true;
3253 }
3254
3255 llvm::DenseSet<const Expr *> EmittedFinals;
3256 emitAlignedClause(*this, S);
3257 bool HasLinears = EmitOMPLinearClauseInit(S);
3258 // Emit helper vars inits.
3259
3260 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
3261 LValue LB = Bounds.first;
3262 LValue UB = Bounds.second;
3263 LValue ST =
3264 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3265 LValue IL =
3266 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3267
3268 // Emit 'then' code.
3269 {
3270 OMPPrivateScope LoopScope(*this);
3271 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
3272 // Emit implicit barrier to synchronize threads and avoid data races on
3273 // initialization of firstprivate variables and post-update of
3274 // lastprivate variables.
3275 CGM.getOpenMPRuntime().emitBarrierCall(
3276 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3277 /*ForceSimpleCall=*/true);
3278 }
3279 EmitOMPPrivateClause(S, LoopScope);
3280 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
3281 *this, S, EmitLValue(S.getIterationVariable()));
3282 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3283 EmitOMPReductionClauseInit(S, LoopScope);
3284 EmitOMPPrivateLoopCounters(S, LoopScope);
3285 EmitOMPLinearClause(S, LoopScope);
3286 (void)LoopScope.Privatize();
3287 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
3288 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3289
3290 // Detect the loop schedule kind and chunk.
3291 const Expr *ChunkExpr = nullptr;
3292 OpenMPScheduleTy ScheduleKind;
3293 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
3294 ScheduleKind.Schedule = C->getScheduleKind();
3295 ScheduleKind.M1 = C->getFirstScheduleModifier();
3296 ScheduleKind.M2 = C->getSecondScheduleModifier();
3297 ChunkExpr = C->getChunkSize();
3298 } else {
3299 // Default behaviour for schedule clause.
3300 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
3301 *this, S, ScheduleKind.Schedule, ChunkExpr);
3302 }
3303 bool HasChunkSizeOne = false;
3304 llvm::Value *Chunk = nullptr;
3305 if (ChunkExpr) {
3306 Chunk = EmitScalarExpr(ChunkExpr);
3307 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
3308 S.getIterationVariable()->getType(),
3309 S.getBeginLoc());
3310 Expr::EvalResult Result;
3311 if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
3312 llvm::APSInt EvaluatedChunk = Result.Val.getInt();
3313 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
3314 }
3315 }
3316 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3317 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3318 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
3319 // If the static schedule kind is specified or if the ordered clause is
3320 // specified, and if no monotonic modifier is specified, the effect will
3321 // be as if the monotonic modifier was specified.
3322 bool StaticChunkedOne =
3323 RT.isStaticChunked(ScheduleKind.Schedule,
3324 /* Chunked */ Chunk != nullptr) &&
3325 HasChunkSizeOne &&
3326 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
3327 bool IsMonotonic =
3328 Ordered ||
3329 (ScheduleKind.Schedule == OMPC_SCHEDULE_static &&
3330 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3331 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
3332 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
3333 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
3334 if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
3335 /* Chunked */ Chunk != nullptr) ||
3336 StaticChunkedOne) &&
3337 !Ordered) {
3338 JumpDest LoopExit =
3339 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3340 emitCommonSimdLoop(
3341 *this, S,
3342 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3343 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3344 CGF.EmitOMPSimdInit(S);
3345 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
3346 if (C->getKind() == OMPC_ORDER_concurrent)
3347 CGF.LoopStack.setParallel(/*Enable=*/true);
3348 }
3349 },
3350 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3351 &S, ScheduleKind, LoopExit,
3352 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
3353 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3354 // When no chunk_size is specified, the iteration space is divided
3355 // into chunks that are approximately equal in size, and at most
3356 // one chunk is distributed to each thread. Note that the size of
3357 // the chunks is unspecified in this case.
3358 CGOpenMPRuntime::StaticRTInput StaticInit(
3359 IVSize, IVSigned, Ordered, IL.getAddress(CGF),
3360 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
3361 StaticChunkedOne ? Chunk : nullptr);
3362 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3363 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
3364 StaticInit);
3365 // UB = min(UB, GlobalUB);
3366 if (!StaticChunkedOne)
3367 CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3368 // IV = LB;
3369 CGF.EmitIgnoredExpr(S.getInit());
3370 // For unchunked static schedule generate:
3371 //
3372 // while (idx <= UB) {
3373 // BODY;
3374 // ++idx;
3375 // }
3376 //
3377 // For static schedule with chunk one:
3378 //
3379 // while (IV <= PrevUB) {
3380 // BODY;
3381 // IV += ST;
3382 // }
3383 CGF.EmitOMPInnerLoop(
3384 S, LoopScope.requiresCleanups(),
3385 StaticChunkedOne ? S.getCombinedParForInDistCond()
3386 : S.getCond(),
3387 StaticChunkedOne ? S.getDistInc() : S.getInc(),
3388 [&S, LoopExit](CodeGenFunction &CGF) {
3389 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3390 },
3391 [](CodeGenFunction &) {});
3392 });
3393 EmitBlock(LoopExit.getBlock());
3394 // Tell the runtime we are done.
3395 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3396 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3397 S.getDirectiveKind());
3398 };
3399 OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
3400 } else {
3401 // Emit the outer loop, which requests its work chunk [LB..UB] from
3402 // runtime and runs the inner loop to process it.
3403 const OMPLoopArguments LoopArguments(
3404 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
3405 IL.getAddress(*this), Chunk, EUB);
3406 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3407 LoopArguments, CGDispatchBounds);
3408 }
3409 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
3410 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3411 return CGF.Builder.CreateIsNotNull(
3412 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3413 });
3414 }
3415 EmitOMPReductionClauseFinal(
3416 S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
3417 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
3418 : /*Parallel only*/ OMPD_parallel);
3419 // Emit post-update of the reduction variables if IsLastIter != 0.
3420 emitPostUpdateForReductionClause(
3421 *this, S, [IL, &S](CodeGenFunction &CGF) {
3422 return CGF.Builder.CreateIsNotNull(
3423 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3424 });
3425 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3426 if (HasLastprivateClause)
3427 EmitOMPLastprivateClauseFinal(
3428 S, isOpenMPSimdDirective(S.getDirectiveKind()),
3429 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3430 LoopScope.restoreMap();
3431 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
3432 return CGF.Builder.CreateIsNotNull(
3433 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3434 });
3435 }
3436 DoacrossCleanupScope.ForceCleanup();
3437 // We're now done with the loop, so jump to the continuation block.
3438 if (ContBlock) {
3439 EmitBranch(ContBlock);
3440 EmitBlock(ContBlock, /*IsFinished=*/true);
3441 }
3442 }
3443 return HasLastprivateClause;
3444 }
3445
3446 /// The following two functions generate expressions for the loop lower
3447 /// and upper bounds in case of static and dynamic (dispatch) schedule
3448 /// of the associated 'for' or 'distribute' loop.
3449 static std::pair<LValue, LValue>
emitForLoopBounds(CodeGenFunction & CGF,const OMPExecutableDirective & S)3450 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
3451 const auto &LS = cast<OMPLoopDirective>(S);
3452 LValue LB =
3453 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3454 LValue UB =
3455 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3456 return {LB, UB};
3457 }
3458
3459 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
3460 /// consider the lower and upper bound expressions generated by the
3461 /// worksharing loop support, but we use 0 and the iteration space size as
3462 /// constants
3463 static std::pair<llvm::Value *, llvm::Value *>
emitDispatchForLoopBounds(CodeGenFunction & CGF,const OMPExecutableDirective & S,Address LB,Address UB)3464 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
3465 Address LB, Address UB) {
3466 const auto &LS = cast<OMPLoopDirective>(S);
3467 const Expr *IVExpr = LS.getIterationVariable();
3468 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
3469 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
3470 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
3471 return {LBVal, UBVal};
3472 }
3473
3474 /// Emits internal temp array declarations for the directive with inscan
3475 /// reductions.
3476 /// The code is the following:
3477 /// \code
3478 /// size num_iters = <num_iters>;
3479 /// <type> buffer[num_iters];
3480 /// \endcode
emitScanBasedDirectiveDecls(CodeGenFunction & CGF,const OMPLoopDirective & S,llvm::function_ref<llvm::Value * (CodeGenFunction &)> NumIteratorsGen)3481 static void emitScanBasedDirectiveDecls(
3482 CodeGenFunction &CGF, const OMPLoopDirective &S,
3483 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3484 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3485 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3486 SmallVector<const Expr *, 4> Shareds;
3487 SmallVector<const Expr *, 4> Privates;
3488 SmallVector<const Expr *, 4> ReductionOps;
3489 SmallVector<const Expr *, 4> CopyArrayTemps;
3490 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3491 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3492 "Only inscan reductions are expected.");
3493 Shareds.append(C->varlist_begin(), C->varlist_end());
3494 Privates.append(C->privates().begin(), C->privates().end());
3495 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3496 CopyArrayTemps.append(C->copy_array_temps().begin(),
3497 C->copy_array_temps().end());
3498 }
3499 {
3500 // Emit buffers for each reduction variables.
3501 // ReductionCodeGen is required to emit correctly the code for array
3502 // reductions.
3503 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
3504 unsigned Count = 0;
3505 auto *ITA = CopyArrayTemps.begin();
3506 for (const Expr *IRef : Privates) {
3507 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3508 // Emit variably modified arrays, used for arrays/array sections
3509 // reductions.
3510 if (PrivateVD->getType()->isVariablyModifiedType()) {
3511 RedCG.emitSharedOrigLValue(CGF, Count);
3512 RedCG.emitAggregateType(CGF, Count);
3513 }
3514 CodeGenFunction::OpaqueValueMapping DimMapping(
3515 CGF,
3516 cast<OpaqueValueExpr>(
3517 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3518 ->getSizeExpr()),
3519 RValue::get(OMPScanNumIterations));
3520 // Emit temp buffer.
3521 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3522 ++ITA;
3523 ++Count;
3524 }
3525 }
3526 }
3527
3528 /// Copies final inscan reductions values to the original variables.
3529 /// The code is the following:
3530 /// \code
3531 /// <orig_var> = buffer[num_iters-1];
3532 /// \endcode
emitScanBasedDirectiveFinals(CodeGenFunction & CGF,const OMPLoopDirective & S,llvm::function_ref<llvm::Value * (CodeGenFunction &)> NumIteratorsGen)3533 static void emitScanBasedDirectiveFinals(
3534 CodeGenFunction &CGF, const OMPLoopDirective &S,
3535 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3536 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3537 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3538 SmallVector<const Expr *, 4> Shareds;
3539 SmallVector<const Expr *, 4> LHSs;
3540 SmallVector<const Expr *, 4> RHSs;
3541 SmallVector<const Expr *, 4> Privates;
3542 SmallVector<const Expr *, 4> CopyOps;
3543 SmallVector<const Expr *, 4> CopyArrayElems;
3544 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3545 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3546 "Only inscan reductions are expected.");
3547 Shareds.append(C->varlist_begin(), C->varlist_end());
3548 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3549 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3550 Privates.append(C->privates().begin(), C->privates().end());
3551 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
3552 CopyArrayElems.append(C->copy_array_elems().begin(),
3553 C->copy_array_elems().end());
3554 }
3555 // Create temp var and copy LHS value to this temp value.
3556 // LHS = TMP[LastIter];
3557 llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
3558 OMPScanNumIterations,
3559 llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
3560 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
3561 const Expr *PrivateExpr = Privates[I];
3562 const Expr *OrigExpr = Shareds[I];
3563 const Expr *CopyArrayElem = CopyArrayElems[I];
3564 CodeGenFunction::OpaqueValueMapping IdxMapping(
3565 CGF,
3566 cast<OpaqueValueExpr>(
3567 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3568 RValue::get(OMPLast));
3569 LValue DestLVal = CGF.EmitLValue(OrigExpr);
3570 LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
3571 CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
3572 SrcLVal.getAddress(CGF),
3573 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
3574 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
3575 CopyOps[I]);
3576 }
3577 }
3578
3579 /// Emits the code for the directive with inscan reductions.
3580 /// The code is the following:
3581 /// \code
3582 /// #pragma omp ...
3583 /// for (i: 0..<num_iters>) {
3584 /// <input phase>;
3585 /// buffer[i] = red;
3586 /// }
3587 /// #pragma omp master // in parallel region
3588 /// for (int k = 0; k != ceil(log2(num_iters)); ++k)
3589 /// for (size cnt = last_iter; cnt >= pow(2, k); --k)
3590 /// buffer[i] op= buffer[i-pow(2,k)];
3591 /// #pragma omp barrier // in parallel region
3592 /// #pragma omp ...
3593 /// for (0..<num_iters>) {
3594 /// red = InclusiveScan ? buffer[i] : buffer[i-1];
3595 /// <scan phase>;
3596 /// }
3597 /// \endcode
emitScanBasedDirective(CodeGenFunction & CGF,const OMPLoopDirective & S,llvm::function_ref<llvm::Value * (CodeGenFunction &)> NumIteratorsGen,llvm::function_ref<void (CodeGenFunction &)> FirstGen,llvm::function_ref<void (CodeGenFunction &)> SecondGen)3598 static void emitScanBasedDirective(
3599 CodeGenFunction &CGF, const OMPLoopDirective &S,
3600 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
3601 llvm::function_ref<void(CodeGenFunction &)> FirstGen,
3602 llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
3603 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3604 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3605 SmallVector<const Expr *, 4> Privates;
3606 SmallVector<const Expr *, 4> ReductionOps;
3607 SmallVector<const Expr *, 4> LHSs;
3608 SmallVector<const Expr *, 4> RHSs;
3609 SmallVector<const Expr *, 4> CopyArrayElems;
3610 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3611 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3612 "Only inscan reductions are expected.");
3613 Privates.append(C->privates().begin(), C->privates().end());
3614 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3615 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3616 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3617 CopyArrayElems.append(C->copy_array_elems().begin(),
3618 C->copy_array_elems().end());
3619 }
3620 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
3621 {
3622 // Emit loop with input phase:
3623 // #pragma omp ...
3624 // for (i: 0..<num_iters>) {
3625 // <input phase>;
3626 // buffer[i] = red;
3627 // }
3628 CGF.OMPFirstScanLoop = true;
3629 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3630 FirstGen(CGF);
3631 }
3632 // #pragma omp barrier // in parallel region
3633 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
3634 &ReductionOps,
3635 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) {
3636 Action.Enter(CGF);
3637 // Emit prefix reduction:
3638 // #pragma omp master // in parallel region
3639 // for (int k = 0; k <= ceil(log2(n)); ++k)
3640 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3641 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
3642 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
3643 llvm::Function *F =
3644 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3645 llvm::Value *Arg =
3646 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3647 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3648 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3649 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3650 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3651 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3652 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3653 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
3654 CGF.EmitBlock(LoopBB);
3655 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3656 // size pow2k = 1;
3657 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3658 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3659 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3660 // for (size i = n - 1; i >= 2 ^ k; --i)
3661 // tmp[i] op= tmp[i-pow2k];
3662 llvm::BasicBlock *InnerLoopBB =
3663 CGF.createBasicBlock("omp.inner.log.scan.body");
3664 llvm::BasicBlock *InnerExitBB =
3665 CGF.createBasicBlock("omp.inner.log.scan.exit");
3666 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3667 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3668 CGF.EmitBlock(InnerLoopBB);
3669 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3670 IVal->addIncoming(NMin1, LoopBB);
3671 {
3672 CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3673 auto *ILHS = LHSs.begin();
3674 auto *IRHS = RHSs.begin();
3675 for (const Expr *CopyArrayElem : CopyArrayElems) {
3676 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3677 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3678 Address LHSAddr = Address::invalid();
3679 {
3680 CodeGenFunction::OpaqueValueMapping IdxMapping(
3681 CGF,
3682 cast<OpaqueValueExpr>(
3683 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3684 RValue::get(IVal));
3685 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3686 }
3687 PrivScope.addPrivate(LHSVD, LHSAddr);
3688 Address RHSAddr = Address::invalid();
3689 {
3690 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3691 CodeGenFunction::OpaqueValueMapping IdxMapping(
3692 CGF,
3693 cast<OpaqueValueExpr>(
3694 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3695 RValue::get(OffsetIVal));
3696 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
3697 }
3698 PrivScope.addPrivate(RHSVD, RHSAddr);
3699 ++ILHS;
3700 ++IRHS;
3701 }
3702 PrivScope.Privatize();
3703 CGF.CGM.getOpenMPRuntime().emitReduction(
3704 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3705 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
3706 }
3707 llvm::Value *NextIVal =
3708 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3709 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3710 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3711 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3712 CGF.EmitBlock(InnerExitBB);
3713 llvm::Value *Next =
3714 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3715 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3716 // pow2k <<= 1;
3717 llvm::Value *NextPow2K =
3718 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
3719 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3720 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3721 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3722 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
3723 CGF.EmitBlock(ExitBB);
3724 };
3725 if (isOpenMPParallelDirective(S.getDirectiveKind())) {
3726 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3727 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3728 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3729 /*ForceSimpleCall=*/true);
3730 } else {
3731 RegionCodeGenTy RCG(CodeGen);
3732 RCG(CGF);
3733 }
3734
3735 CGF.OMPFirstScanLoop = false;
3736 SecondGen(CGF);
3737 }
3738
emitWorksharingDirective(CodeGenFunction & CGF,const OMPLoopDirective & S,bool HasCancel)3739 static bool emitWorksharingDirective(CodeGenFunction &CGF,
3740 const OMPLoopDirective &S,
3741 bool HasCancel) {
3742 bool HasLastprivates;
3743 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
3744 [](const OMPReductionClause *C) {
3745 return C->getModifier() == OMPC_REDUCTION_inscan;
3746 })) {
3747 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
3748 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3749 OMPLoopScope LoopScope(CGF, S);
3750 return CGF.EmitScalarExpr(S.getNumIterations());
3751 };
3752 const auto &&FirstGen = [&S, HasCancel](CodeGenFunction &CGF) {
3753 CodeGenFunction::OMPCancelStackRAII CancelRegion(
3754 CGF, S.getDirectiveKind(), HasCancel);
3755 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3756 emitForLoopBounds,
3757 emitDispatchForLoopBounds);
3758 // Emit an implicit barrier at the end.
3759 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
3760 OMPD_for);
3761 };
3762 const auto &&SecondGen = [&S, HasCancel,
3763 &HasLastprivates](CodeGenFunction &CGF) {
3764 CodeGenFunction::OMPCancelStackRAII CancelRegion(
3765 CGF, S.getDirectiveKind(), HasCancel);
3766 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3767 emitForLoopBounds,
3768 emitDispatchForLoopBounds);
3769 };
3770 if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3771 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
3772 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
3773 if (!isOpenMPParallelDirective(S.getDirectiveKind()))
3774 emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
3775 } else {
3776 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
3777 HasCancel);
3778 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3779 emitForLoopBounds,
3780 emitDispatchForLoopBounds);
3781 }
3782 return HasLastprivates;
3783 }
3784
isSupportedByOpenMPIRBuilder(const OMPForDirective & S)3785 static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) {
3786 if (S.hasCancel())
3787 return false;
3788 for (OMPClause *C : S.clauses()) {
3789 if (isa<OMPNowaitClause>(C))
3790 continue;
3791
3792 if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
3793 if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3794 return false;
3795 if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3796 return false;
3797 switch (SC->getScheduleKind()) {
3798 case OMPC_SCHEDULE_auto:
3799 case OMPC_SCHEDULE_dynamic:
3800 case OMPC_SCHEDULE_runtime:
3801 case OMPC_SCHEDULE_guided:
3802 case OMPC_SCHEDULE_static:
3803 continue;
3804 case OMPC_SCHEDULE_unknown:
3805 return false;
3806 }
3807 }
3808
3809 return false;
3810 }
3811
3812 return true;
3813 }
3814
3815 static llvm::omp::ScheduleKind
convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind)3816 convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) {
3817 switch (ScheduleClauseKind) {
3818 case OMPC_SCHEDULE_unknown:
3819 return llvm::omp::OMP_SCHEDULE_Default;
3820 case OMPC_SCHEDULE_auto:
3821 return llvm::omp::OMP_SCHEDULE_Auto;
3822 case OMPC_SCHEDULE_dynamic:
3823 return llvm::omp::OMP_SCHEDULE_Dynamic;
3824 case OMPC_SCHEDULE_guided:
3825 return llvm::omp::OMP_SCHEDULE_Guided;
3826 case OMPC_SCHEDULE_runtime:
3827 return llvm::omp::OMP_SCHEDULE_Runtime;
3828 case OMPC_SCHEDULE_static:
3829 return llvm::omp::OMP_SCHEDULE_Static;
3830 }
3831 llvm_unreachable("Unhandled schedule kind");
3832 }
3833
EmitOMPForDirective(const OMPForDirective & S)3834 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
3835 bool HasLastprivates = false;
3836 bool UseOMPIRBuilder =
3837 CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
3838 auto &&CodeGen = [this, &S, &HasLastprivates,
3839 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
3840 // Use the OpenMPIRBuilder if enabled.
3841 if (UseOMPIRBuilder) {
3842 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
3843
3844 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
3845 llvm::Value *ChunkSize = nullptr;
3846 if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
3847 SchedKind =
3848 convertClauseKindToSchedKind(SchedClause->getScheduleKind());
3849 if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
3850 ChunkSize = EmitScalarExpr(ChunkSizeExpr);
3851 }
3852
3853 // Emit the associated statement and get its loop representation.
3854 const Stmt *Inner = S.getRawStmt();
3855 llvm::CanonicalLoopInfo *CLI =
3856 EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
3857
3858 llvm::OpenMPIRBuilder &OMPBuilder =
3859 CGM.getOpenMPRuntime().getOMPBuilder();
3860 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
3861 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
3862 OMPBuilder.applyWorkshareLoop(
3863 Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
3864 SchedKind, ChunkSize, /*HasSimdModifier=*/false,
3865 /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
3866 /*HasOrderedClause=*/false);
3867 return;
3868 }
3869
3870 HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
3871 };
3872 {
3873 auto LPCRegion =
3874 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3875 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3876 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
3877 S.hasCancel());
3878 }
3879
3880 if (!UseOMPIRBuilder) {
3881 // Emit an implicit barrier at the end.
3882 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3883 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3884 }
3885 // Check for outer lastprivate conditional update.
3886 checkForLastprivateConditionalUpdate(*this, S);
3887 }
3888
EmitOMPForSimdDirective(const OMPForSimdDirective & S)3889 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
3890 bool HasLastprivates = false;
3891 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
3892 PrePostActionTy &) {
3893 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
3894 };
3895 {
3896 auto LPCRegion =
3897 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
3898 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3899 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3900 }
3901
3902 // Emit an implicit barrier at the end.
3903 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
3904 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
3905 // Check for outer lastprivate conditional update.
3906 checkForLastprivateConditionalUpdate(*this, S);
3907 }
3908
createSectionLVal(CodeGenFunction & CGF,QualType Ty,const Twine & Name,llvm::Value * Init=nullptr)3909 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
3910 const Twine &Name,
3911 llvm::Value *Init = nullptr) {
3912 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
3913 if (Init)
3914 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
3915 return LVal;
3916 }
3917
EmitSections(const OMPExecutableDirective & S)3918 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
3919 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
3920 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
3921 bool HasLastprivates = false;
3922 auto &&CodeGen = [&S, CapturedStmt, CS,
3923 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
3924 const ASTContext &C = CGF.getContext();
3925 QualType KmpInt32Ty =
3926 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3927 // Emit helper vars inits.
3928 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
3929 CGF.Builder.getInt32(0));
3930 llvm::ConstantInt *GlobalUBVal = CS != nullptr
3931 ? CGF.Builder.getInt32(CS->size() - 1)
3932 : CGF.Builder.getInt32(0);
3933 LValue UB =
3934 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
3935 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
3936 CGF.Builder.getInt32(1));
3937 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
3938 CGF.Builder.getInt32(0));
3939 // Loop counter.
3940 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
3941 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3942 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
3943 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
3944 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
3945 // Generate condition for loop.
3946 BinaryOperator *Cond = BinaryOperator::Create(
3947 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary,
3948 S.getBeginLoc(), FPOptionsOverride());
3949 // Increment for loop counter.
3950 UnaryOperator *Inc = UnaryOperator::Create(
3951 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary,
3952 S.getBeginLoc(), true, FPOptionsOverride());
3953 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
3954 // Iterate through all sections and emit a switch construct:
3955 // switch (IV) {
3956 // case 0:
3957 // <SectionStmt[0]>;
3958 // break;
3959 // ...
3960 // case <NumSection> - 1:
3961 // <SectionStmt[<NumSection> - 1]>;
3962 // break;
3963 // }
3964 // .omp.sections.exit:
3965 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
3966 llvm::SwitchInst *SwitchStmt =
3967 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
3968 ExitBB, CS == nullptr ? 1 : CS->size());
3969 if (CS) {
3970 unsigned CaseNumber = 0;
3971 for (const Stmt *SubStmt : CS->children()) {
3972 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
3973 CGF.EmitBlock(CaseBB);
3974 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
3975 CGF.EmitStmt(SubStmt);
3976 CGF.EmitBranch(ExitBB);
3977 ++CaseNumber;
3978 }
3979 } else {
3980 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
3981 CGF.EmitBlock(CaseBB);
3982 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
3983 CGF.EmitStmt(CapturedStmt);
3984 CGF.EmitBranch(ExitBB);
3985 }
3986 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3987 };
3988
3989 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
3990 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
3991 // Emit implicit barrier to synchronize threads and avoid data races on
3992 // initialization of firstprivate variables and post-update of lastprivate
3993 // variables.
3994 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3995 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3996 /*ForceSimpleCall=*/true);
3997 }
3998 CGF.EmitOMPPrivateClause(S, LoopScope);
3999 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
4000 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4001 CGF.EmitOMPReductionClauseInit(S, LoopScope);
4002 (void)LoopScope.Privatize();
4003 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
4004 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4005
4006 // Emit static non-chunked loop.
4007 OpenMPScheduleTy ScheduleKind;
4008 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
4009 CGOpenMPRuntime::StaticRTInput StaticInit(
4010 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
4011 LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
4012 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
4013 CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
4014 // UB = min(UB, GlobalUB);
4015 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
4016 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
4017 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
4018 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
4019 // IV = LB;
4020 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
4021 // while (idx <= UB) { BODY; ++idx; }
4022 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
4023 [](CodeGenFunction &) {});
4024 // Tell the runtime we are done.
4025 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
4026 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
4027 S.getDirectiveKind());
4028 };
4029 CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
4030 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4031 // Emit post-update of the reduction variables if IsLastIter != 0.
4032 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
4033 return CGF.Builder.CreateIsNotNull(
4034 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4035 });
4036
4037 // Emit final copy of the lastprivate variables if IsLastIter != 0.
4038 if (HasLastprivates)
4039 CGF.EmitOMPLastprivateClauseFinal(
4040 S, /*NoFinals=*/false,
4041 CGF.Builder.CreateIsNotNull(
4042 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
4043 };
4044
4045 bool HasCancel = false;
4046 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
4047 HasCancel = OSD->hasCancel();
4048 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
4049 HasCancel = OPSD->hasCancel();
4050 OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
4051 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
4052 HasCancel);
4053 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
4054 // clause. Otherwise the barrier will be generated by the codegen for the
4055 // directive.
4056 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
4057 // Emit implicit barrier to synchronize threads and avoid data races on
4058 // initialization of firstprivate variables.
4059 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4060 OMPD_unknown);
4061 }
4062 }
4063
EmitOMPSectionsDirective(const OMPSectionsDirective & S)4064 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
4065 if (CGM.getLangOpts().OpenMPIRBuilder) {
4066 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4067 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4068 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
4069
4070 auto FiniCB = [this](InsertPointTy IP) {
4071 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4072 };
4073
4074 const CapturedStmt *ICS = S.getInnermostCapturedStmt();
4075 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4076 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
4077 llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector;
4078 if (CS) {
4079 for (const Stmt *SubStmt : CS->children()) {
4080 auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
4081 InsertPointTy CodeGenIP) {
4082 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4083 *this, SubStmt, AllocaIP, CodeGenIP, "section");
4084 };
4085 SectionCBVector.push_back(SectionCB);
4086 }
4087 } else {
4088 auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
4089 InsertPointTy CodeGenIP) {
4090 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4091 *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
4092 };
4093 SectionCBVector.push_back(SectionCB);
4094 }
4095
4096 // Privatization callback that performs appropriate action for
4097 // shared/private/firstprivate/lastprivate/copyin/... variables.
4098 //
4099 // TODO: This defaults to shared right now.
4100 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
4101 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
4102 // The next line is appropriate only for variables (Val) with the
4103 // data-sharing attribute "shared".
4104 ReplVal = &Val;
4105
4106 return CodeGenIP;
4107 };
4108
4109 CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP);
4110 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
4111 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4112 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
4113 Builder.restoreIP(OMPBuilder.createSections(
4114 Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(),
4115 S.getSingleClause<OMPNowaitClause>()));
4116 return;
4117 }
4118 {
4119 auto LPCRegion =
4120 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4121 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4122 EmitSections(S);
4123 }
4124 // Emit an implicit barrier at the end.
4125 if (!S.getSingleClause<OMPNowaitClause>()) {
4126 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4127 OMPD_sections);
4128 }
4129 // Check for outer lastprivate conditional update.
4130 checkForLastprivateConditionalUpdate(*this, S);
4131 }
4132
EmitOMPSectionDirective(const OMPSectionDirective & S)4133 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
4134 if (CGM.getLangOpts().OpenMPIRBuilder) {
4135 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4136 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4137
4138 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
4139 auto FiniCB = [this](InsertPointTy IP) {
4140 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4141 };
4142
4143 auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
4144 InsertPointTy CodeGenIP) {
4145 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4146 *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
4147 };
4148
4149 LexicalScope Scope(*this, S.getSourceRange());
4150 EmitStopPoint(&S);
4151 Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB));
4152
4153 return;
4154 }
4155 LexicalScope Scope(*this, S.getSourceRange());
4156 EmitStopPoint(&S);
4157 EmitStmt(S.getAssociatedStmt());
4158 }
4159
EmitOMPSingleDirective(const OMPSingleDirective & S)4160 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
4161 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
4162 llvm::SmallVector<const Expr *, 8> DestExprs;
4163 llvm::SmallVector<const Expr *, 8> SrcExprs;
4164 llvm::SmallVector<const Expr *, 8> AssignmentOps;
4165 // Check if there are any 'copyprivate' clauses associated with this
4166 // 'single' construct.
4167 // Build a list of copyprivate variables along with helper expressions
4168 // (<source>, <destination>, <destination>=<source> expressions)
4169 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
4170 CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
4171 DestExprs.append(C->destination_exprs().begin(),
4172 C->destination_exprs().end());
4173 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
4174 AssignmentOps.append(C->assignment_ops().begin(),
4175 C->assignment_ops().end());
4176 }
4177 // Emit code for 'single' region along with 'copyprivate' clauses
4178 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4179 Action.Enter(CGF);
4180 OMPPrivateScope SingleScope(CGF);
4181 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
4182 CGF.EmitOMPPrivateClause(S, SingleScope);
4183 (void)SingleScope.Privatize();
4184 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4185 };
4186 {
4187 auto LPCRegion =
4188 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4189 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4190 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
4191 CopyprivateVars, DestExprs,
4192 SrcExprs, AssignmentOps);
4193 }
4194 // Emit an implicit barrier at the end (to avoid data race on firstprivate
4195 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
4196 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
4197 CGM.getOpenMPRuntime().emitBarrierCall(
4198 *this, S.getBeginLoc(),
4199 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
4200 }
4201 // Check for outer lastprivate conditional update.
4202 checkForLastprivateConditionalUpdate(*this, S);
4203 }
4204
emitMaster(CodeGenFunction & CGF,const OMPExecutableDirective & S)4205 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
4206 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4207 Action.Enter(CGF);
4208 CGF.EmitStmt(S.getRawStmt());
4209 };
4210 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
4211 }
4212
EmitOMPMasterDirective(const OMPMasterDirective & S)4213 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
4214 if (CGM.getLangOpts().OpenMPIRBuilder) {
4215 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4216 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4217
4218 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
4219
4220 auto FiniCB = [this](InsertPointTy IP) {
4221 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4222 };
4223
4224 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
4225 InsertPointTy CodeGenIP) {
4226 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4227 *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
4228 };
4229
4230 LexicalScope Scope(*this, S.getSourceRange());
4231 EmitStopPoint(&S);
4232 Builder.restoreIP(OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB));
4233
4234 return;
4235 }
4236 LexicalScope Scope(*this, S.getSourceRange());
4237 EmitStopPoint(&S);
4238 emitMaster(*this, S);
4239 }
4240
emitMasked(CodeGenFunction & CGF,const OMPExecutableDirective & S)4241 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
4242 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4243 Action.Enter(CGF);
4244 CGF.EmitStmt(S.getRawStmt());
4245 };
4246 Expr *Filter = nullptr;
4247 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4248 Filter = FilterClause->getThreadID();
4249 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(),
4250 Filter);
4251 }
4252
EmitOMPMaskedDirective(const OMPMaskedDirective & S)4253 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) {
4254 if (CGM.getLangOpts().OpenMPIRBuilder) {
4255 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4256 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4257
4258 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
4259 const Expr *Filter = nullptr;
4260 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4261 Filter = FilterClause->getThreadID();
4262 llvm::Value *FilterVal = Filter
4263 ? EmitScalarExpr(Filter, CGM.Int32Ty)
4264 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
4265
4266 auto FiniCB = [this](InsertPointTy IP) {
4267 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4268 };
4269
4270 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
4271 InsertPointTy CodeGenIP) {
4272 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4273 *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
4274 };
4275
4276 LexicalScope Scope(*this, S.getSourceRange());
4277 EmitStopPoint(&S);
4278 Builder.restoreIP(
4279 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal));
4280
4281 return;
4282 }
4283 LexicalScope Scope(*this, S.getSourceRange());
4284 EmitStopPoint(&S);
4285 emitMasked(*this, S);
4286 }
4287
EmitOMPCriticalDirective(const OMPCriticalDirective & S)4288 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
4289 if (CGM.getLangOpts().OpenMPIRBuilder) {
4290 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4291 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4292
4293 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
4294 const Expr *Hint = nullptr;
4295 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4296 Hint = HintClause->getHint();
4297
4298 // TODO: This is slightly different from what's currently being done in
4299 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
4300 // about typing is final.
4301 llvm::Value *HintInst = nullptr;
4302 if (Hint)
4303 HintInst =
4304 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
4305
4306 auto FiniCB = [this](InsertPointTy IP) {
4307 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4308 };
4309
4310 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
4311 InsertPointTy CodeGenIP) {
4312 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4313 *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
4314 };
4315
4316 LexicalScope Scope(*this, S.getSourceRange());
4317 EmitStopPoint(&S);
4318 Builder.restoreIP(OMPBuilder.createCritical(
4319 Builder, BodyGenCB, FiniCB, S.getDirectiveName().getAsString(),
4320 HintInst));
4321
4322 return;
4323 }
4324
4325 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4326 Action.Enter(CGF);
4327 CGF.EmitStmt(S.getAssociatedStmt());
4328 };
4329 const Expr *Hint = nullptr;
4330 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4331 Hint = HintClause->getHint();
4332 LexicalScope Scope(*this, S.getSourceRange());
4333 EmitStopPoint(&S);
4334 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
4335 S.getDirectiveName().getAsString(),
4336 CodeGen, S.getBeginLoc(), Hint);
4337 }
4338
EmitOMPParallelForDirective(const OMPParallelForDirective & S)4339 void CodeGenFunction::EmitOMPParallelForDirective(
4340 const OMPParallelForDirective &S) {
4341 // Emit directive as a combined directive that consists of two implicit
4342 // directives: 'parallel' with 'for' directive.
4343 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4344 Action.Enter(CGF);
4345 (void)emitWorksharingDirective(CGF, S, S.hasCancel());
4346 };
4347 {
4348 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4349 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
4350 CGCapturedStmtInfo CGSI(CR_OpenMP);
4351 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4352 OMPLoopScope LoopScope(CGF, S);
4353 return CGF.EmitScalarExpr(S.getNumIterations());
4354 };
4355 bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4356 [](const OMPReductionClause *C) {
4357 return C->getModifier() == OMPC_REDUCTION_inscan;
4358 });
4359 if (IsInscan)
4360 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4361 auto LPCRegion =
4362 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4363 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
4364 emitEmptyBoundParameters);
4365 if (IsInscan)
4366 emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4367 }
4368 // Check for outer lastprivate conditional update.
4369 checkForLastprivateConditionalUpdate(*this, S);
4370 }
4371
EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective & S)4372 void CodeGenFunction::EmitOMPParallelForSimdDirective(
4373 const OMPParallelForSimdDirective &S) {
4374 // Emit directive as a combined directive that consists of two implicit
4375 // directives: 'parallel' with 'for' directive.
4376 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4377 Action.Enter(CGF);
4378 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
4379 };
4380 {
4381 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4382 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
4383 CGCapturedStmtInfo CGSI(CR_OpenMP);
4384 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4385 OMPLoopScope LoopScope(CGF, S);
4386 return CGF.EmitScalarExpr(S.getNumIterations());
4387 };
4388 bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4389 [](const OMPReductionClause *C) {
4390 return C->getModifier() == OMPC_REDUCTION_inscan;
4391 });
4392 if (IsInscan)
4393 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4394 auto LPCRegion =
4395 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4396 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
4397 emitEmptyBoundParameters);
4398 if (IsInscan)
4399 emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4400 }
4401 // Check for outer lastprivate conditional update.
4402 checkForLastprivateConditionalUpdate(*this, S);
4403 }
4404
EmitOMPParallelMasterDirective(const OMPParallelMasterDirective & S)4405 void CodeGenFunction::EmitOMPParallelMasterDirective(
4406 const OMPParallelMasterDirective &S) {
4407 // Emit directive as a combined directive that consists of two implicit
4408 // directives: 'parallel' with 'master' directive.
4409 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4410 Action.Enter(CGF);
4411 OMPPrivateScope PrivateScope(CGF);
4412 bool Copyins = CGF.EmitOMPCopyinClause(S);
4413 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4414 if (Copyins) {
4415 // Emit implicit barrier to synchronize threads and avoid data races on
4416 // propagation master's thread values of threadprivate variables to local
4417 // instances of that variables of all other implicit threads.
4418 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
4419 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
4420 /*ForceSimpleCall=*/true);
4421 }
4422 CGF.EmitOMPPrivateClause(S, PrivateScope);
4423 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4424 (void)PrivateScope.Privatize();
4425 emitMaster(CGF, S);
4426 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4427 };
4428 {
4429 auto LPCRegion =
4430 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4431 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
4432 emitEmptyBoundParameters);
4433 emitPostUpdateForReductionClause(*this, S,
4434 [](CodeGenFunction &) { return nullptr; });
4435 }
4436 // Check for outer lastprivate conditional update.
4437 checkForLastprivateConditionalUpdate(*this, S);
4438 }
4439
EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective & S)4440 void CodeGenFunction::EmitOMPParallelSectionsDirective(
4441 const OMPParallelSectionsDirective &S) {
4442 // Emit directive as a combined directive that consists of two implicit
4443 // directives: 'parallel' with 'sections' directive.
4444 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4445 Action.Enter(CGF);
4446 CGF.EmitSections(S);
4447 };
4448 {
4449 auto LPCRegion =
4450 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4451 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
4452 emitEmptyBoundParameters);
4453 }
4454 // Check for outer lastprivate conditional update.
4455 checkForLastprivateConditionalUpdate(*this, S);
4456 }
4457
4458 namespace {
4459 /// Get the list of variables declared in the context of the untied tasks.
4460 class CheckVarsEscapingUntiedTaskDeclContext final
4461 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
4462 llvm::SmallVector<const VarDecl *, 4> PrivateDecls;
4463
4464 public:
4465 explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
4466 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
VisitDeclStmt(const DeclStmt * S)4467 void VisitDeclStmt(const DeclStmt *S) {
4468 if (!S)
4469 return;
4470 // Need to privatize only local vars, static locals can be processed as is.
4471 for (const Decl *D : S->decls()) {
4472 if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
4473 if (VD->hasLocalStorage())
4474 PrivateDecls.push_back(VD);
4475 }
4476 }
VisitOMPExecutableDirective(const OMPExecutableDirective *)4477 void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
VisitCapturedStmt(const CapturedStmt *)4478 void VisitCapturedStmt(const CapturedStmt *) {}
VisitLambdaExpr(const LambdaExpr *)4479 void VisitLambdaExpr(const LambdaExpr *) {}
VisitBlockExpr(const BlockExpr *)4480 void VisitBlockExpr(const BlockExpr *) {}
VisitStmt(const Stmt * S)4481 void VisitStmt(const Stmt *S) {
4482 if (!S)
4483 return;
4484 for (const Stmt *Child : S->children())
4485 if (Child)
4486 Visit(Child);
4487 }
4488
4489 /// Swaps list of vars with the provided one.
getPrivateDecls() const4490 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
4491 };
4492 } // anonymous namespace
4493
buildDependences(const OMPExecutableDirective & S,OMPTaskDataTy & Data)4494 static void buildDependences(const OMPExecutableDirective &S,
4495 OMPTaskDataTy &Data) {
4496
4497 // First look for 'omp_all_memory' and add this first.
4498 bool OmpAllMemory = false;
4499 if (llvm::any_of(
4500 S.getClausesOfKind<OMPDependClause>(), [](const OMPDependClause *C) {
4501 return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
4502 C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
4503 })) {
4504 OmpAllMemory = true;
4505 // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are
4506 // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to
4507 // simplify.
4508 OMPTaskDataTy::DependData &DD =
4509 Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
4510 /*IteratorExpr=*/nullptr);
4511 // Add a nullptr Expr to simplify the codegen in emitDependData.
4512 DD.DepExprs.push_back(nullptr);
4513 }
4514 // Add remaining dependences skipping any 'out' or 'inout' if they are
4515 // overridden by 'omp_all_memory'.
4516 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4517 OpenMPDependClauseKind Kind = C->getDependencyKind();
4518 if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
4519 continue;
4520 if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
4521 continue;
4522 OMPTaskDataTy::DependData &DD =
4523 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4524 DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4525 }
4526 }
4527
EmitOMPTaskBasedDirective(const OMPExecutableDirective & S,const OpenMPDirectiveKind CapturedRegion,const RegionCodeGenTy & BodyGen,const TaskGenTy & TaskGen,OMPTaskDataTy & Data)4528 void CodeGenFunction::EmitOMPTaskBasedDirective(
4529 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
4530 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
4531 OMPTaskDataTy &Data) {
4532 // Emit outlined function for task construct.
4533 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
4534 auto I = CS->getCapturedDecl()->param_begin();
4535 auto PartId = std::next(I);
4536 auto TaskT = std::next(I, 4);
4537 // Check if the task is final
4538 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
4539 // If the condition constant folds and can be elided, try to avoid emitting
4540 // the condition and the dead arm of the if/else.
4541 const Expr *Cond = Clause->getCondition();
4542 bool CondConstant;
4543 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
4544 Data.Final.setInt(CondConstant);
4545 else
4546 Data.Final.setPointer(EvaluateExprAsBool(Cond));
4547 } else {
4548 // By default the task is not final.
4549 Data.Final.setInt(/*IntVal=*/false);
4550 }
4551 // Check if the task has 'priority' clause.
4552 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
4553 const Expr *Prio = Clause->getPriority();
4554 Data.Priority.setInt(/*IntVal=*/true);
4555 Data.Priority.setPointer(EmitScalarConversion(
4556 EmitScalarExpr(Prio), Prio->getType(),
4557 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
4558 Prio->getExprLoc()));
4559 }
4560 // The first function argument for tasks is a thread id, the second one is a
4561 // part id (0 for tied tasks, >=0 for untied task).
4562 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
4563 // Get list of private variables.
4564 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
4565 auto IRef = C->varlist_begin();
4566 for (const Expr *IInit : C->private_copies()) {
4567 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4568 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4569 Data.PrivateVars.push_back(*IRef);
4570 Data.PrivateCopies.push_back(IInit);
4571 }
4572 ++IRef;
4573 }
4574 }
4575 EmittedAsPrivate.clear();
4576 // Get list of firstprivate variables.
4577 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4578 auto IRef = C->varlist_begin();
4579 auto IElemInitRef = C->inits().begin();
4580 for (const Expr *IInit : C->private_copies()) {
4581 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4582 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4583 Data.FirstprivateVars.push_back(*IRef);
4584 Data.FirstprivateCopies.push_back(IInit);
4585 Data.FirstprivateInits.push_back(*IElemInitRef);
4586 }
4587 ++IRef;
4588 ++IElemInitRef;
4589 }
4590 }
4591 // Get list of lastprivate variables (for taskloops).
4592 llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
4593 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
4594 auto IRef = C->varlist_begin();
4595 auto ID = C->destination_exprs().begin();
4596 for (const Expr *IInit : C->private_copies()) {
4597 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4598 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4599 Data.LastprivateVars.push_back(*IRef);
4600 Data.LastprivateCopies.push_back(IInit);
4601 }
4602 LastprivateDstsOrigs.insert(
4603 std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
4604 cast<DeclRefExpr>(*IRef)));
4605 ++IRef;
4606 ++ID;
4607 }
4608 }
4609 SmallVector<const Expr *, 4> LHSs;
4610 SmallVector<const Expr *, 4> RHSs;
4611 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
4612 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4613 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4614 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4615 Data.ReductionOps.append(C->reduction_ops().begin(),
4616 C->reduction_ops().end());
4617 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4618 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4619 }
4620 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
4621 *this, S.getBeginLoc(), LHSs, RHSs, Data);
4622 // Build list of dependences.
4623 buildDependences(S, Data);
4624 // Get list of local vars for untied tasks.
4625 if (!Data.Tied) {
4626 CheckVarsEscapingUntiedTaskDeclContext Checker;
4627 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
4628 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
4629 Checker.getPrivateDecls().end());
4630 }
4631 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
4632 CapturedRegion](CodeGenFunction &CGF,
4633 PrePostActionTy &Action) {
4634 llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
4635 std::pair<Address, Address>>
4636 UntiedLocalVars;
4637 // Set proper addresses for generated private copies.
4638 OMPPrivateScope Scope(CGF);
4639 // Generate debug info for variables present in shared clause.
4640 if (auto *DI = CGF.getDebugInfo()) {
4641 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
4642 CGF.CapturedStmtInfo->getCaptureFields();
4643 llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
4644 if (CaptureFields.size() && ContextValue) {
4645 unsigned CharWidth = CGF.getContext().getCharWidth();
4646 // The shared variables are packed together as members of structure.
4647 // So the address of each shared variable can be computed by adding
4648 // offset of it (within record) to the base address of record. For each
4649 // shared variable, debug intrinsic llvm.dbg.declare is generated with
4650 // appropriate expressions (DIExpression).
4651 // Ex:
4652 // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
4653 // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4654 // metadata !svar1,
4655 // metadata !DIExpression(DW_OP_deref))
4656 // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4657 // metadata !svar2,
4658 // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
4659 for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
4660 const VarDecl *SharedVar = It->first;
4661 RecordDecl *CaptureRecord = It->second->getParent();
4662 const ASTRecordLayout &Layout =
4663 CGF.getContext().getASTRecordLayout(CaptureRecord);
4664 unsigned Offset =
4665 Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
4666 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4667 (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
4668 CGF.Builder, false);
4669 llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
4670 // Get the call dbg.declare instruction we just created and update
4671 // its DIExpression to add offset to base address.
4672 if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
4673 SmallVector<uint64_t, 8> Ops;
4674 // Add offset to the base address if non zero.
4675 if (Offset) {
4676 Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
4677 Ops.push_back(Offset);
4678 }
4679 Ops.push_back(llvm::dwarf::DW_OP_deref);
4680 auto &Ctx = DDI->getContext();
4681 llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
4682 Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
4683 }
4684 }
4685 }
4686 }
4687 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
4688 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
4689 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
4690 enum { PrivatesParam = 2, CopyFnParam = 3 };
4691 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4692 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4693 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4694 CS->getCapturedDecl()->getParam(PrivatesParam)));
4695 // Map privates.
4696 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
4697 llvm::SmallVector<llvm::Value *, 16> CallArgs;
4698 llvm::SmallVector<llvm::Type *, 4> ParamTypes;
4699 CallArgs.push_back(PrivatesPtr);
4700 ParamTypes.push_back(PrivatesPtr->getType());
4701 for (const Expr *E : Data.PrivateVars) {
4702 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4703 Address PrivatePtr = CGF.CreateMemTemp(
4704 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
4705 PrivatePtrs.emplace_back(VD, PrivatePtr);
4706 CallArgs.push_back(PrivatePtr.getPointer());
4707 ParamTypes.push_back(PrivatePtr.getType());
4708 }
4709 for (const Expr *E : Data.FirstprivateVars) {
4710 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4711 Address PrivatePtr =
4712 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4713 ".firstpriv.ptr.addr");
4714 PrivatePtrs.emplace_back(VD, PrivatePtr);
4715 FirstprivatePtrs.emplace_back(VD, PrivatePtr);
4716 CallArgs.push_back(PrivatePtr.getPointer());
4717 ParamTypes.push_back(PrivatePtr.getType());
4718 }
4719 for (const Expr *E : Data.LastprivateVars) {
4720 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4721 Address PrivatePtr =
4722 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4723 ".lastpriv.ptr.addr");
4724 PrivatePtrs.emplace_back(VD, PrivatePtr);
4725 CallArgs.push_back(PrivatePtr.getPointer());
4726 ParamTypes.push_back(PrivatePtr.getType());
4727 }
4728 for (const VarDecl *VD : Data.PrivateLocals) {
4729 QualType Ty = VD->getType().getNonReferenceType();
4730 if (VD->getType()->isLValueReferenceType())
4731 Ty = CGF.getContext().getPointerType(Ty);
4732 if (isAllocatableDecl(VD))
4733 Ty = CGF.getContext().getPointerType(Ty);
4734 Address PrivatePtr = CGF.CreateMemTemp(
4735 CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
4736 auto Result = UntiedLocalVars.insert(
4737 std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
4738 // If key exists update in place.
4739 if (Result.second == false)
4740 *Result.first = std::make_pair(
4741 VD, std::make_pair(PrivatePtr, Address::invalid()));
4742 CallArgs.push_back(PrivatePtr.getPointer());
4743 ParamTypes.push_back(PrivatePtr.getType());
4744 }
4745 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
4746 ParamTypes, /*isVarArg=*/false);
4747 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4748 CopyFn, CopyFnTy->getPointerTo());
4749 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4750 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
4751 for (const auto &Pair : LastprivateDstsOrigs) {
4752 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
4753 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
4754 /*RefersToEnclosingVariableOrCapture=*/
4755 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
4756 Pair.second->getType(), VK_LValue,
4757 Pair.second->getExprLoc());
4758 Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
4759 }
4760 for (const auto &Pair : PrivatePtrs) {
4761 Address Replacement = Address(
4762 CGF.Builder.CreateLoad(Pair.second),
4763 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4764 CGF.getContext().getDeclAlign(Pair.first));
4765 Scope.addPrivate(Pair.first, Replacement);
4766 if (auto *DI = CGF.getDebugInfo())
4767 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4768 (void)DI->EmitDeclareOfAutoVariable(
4769 Pair.first, Pair.second.getPointer(), CGF.Builder,
4770 /*UsePointerValue*/ true);
4771 }
4772 // Adjust mapping for internal locals by mapping actual memory instead of
4773 // a pointer to this memory.
4774 for (auto &Pair : UntiedLocalVars) {
4775 QualType VDType = Pair.first->getType().getNonReferenceType();
4776 if (isAllocatableDecl(Pair.first)) {
4777 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4778 Address Replacement(
4779 Ptr,
4780 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
4781 CGF.getPointerAlign());
4782 Pair.second.first = Replacement;
4783 Ptr = CGF.Builder.CreateLoad(Replacement);
4784 Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
4785 CGF.getContext().getDeclAlign(Pair.first));
4786 Pair.second.second = Replacement;
4787 } else {
4788 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
4789 Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
4790 CGF.getContext().getDeclAlign(Pair.first));
4791 Pair.second.first = Replacement;
4792 }
4793 }
4794 }
4795 if (Data.Reductions) {
4796 OMPPrivateScope FirstprivateScope(CGF);
4797 for (const auto &Pair : FirstprivatePtrs) {
4798 Address Replacement(
4799 CGF.Builder.CreateLoad(Pair.second),
4800 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
4801 CGF.getContext().getDeclAlign(Pair.first));
4802 FirstprivateScope.addPrivate(Pair.first, Replacement);
4803 }
4804 (void)FirstprivateScope.Privatize();
4805 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
4806 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
4807 Data.ReductionCopies, Data.ReductionOps);
4808 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
4809 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
4810 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
4811 RedCG.emitSharedOrigLValue(CGF, Cnt);
4812 RedCG.emitAggregateType(CGF, Cnt);
4813 // FIXME: This must removed once the runtime library is fixed.
4814 // Emit required threadprivate variables for
4815 // initializer/combiner/finalizer.
4816 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4817 RedCG, Cnt);
4818 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4819 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4820 Replacement =
4821 Address(CGF.EmitScalarConversion(
4822 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4823 CGF.getContext().getPointerType(
4824 Data.ReductionCopies[Cnt]->getType()),
4825 Data.ReductionCopies[Cnt]->getExprLoc()),
4826 CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
4827 Replacement.getAlignment());
4828 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4829 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4830 }
4831 }
4832 // Privatize all private variables except for in_reduction items.
4833 (void)Scope.Privatize();
4834 SmallVector<const Expr *, 4> InRedVars;
4835 SmallVector<const Expr *, 4> InRedPrivs;
4836 SmallVector<const Expr *, 4> InRedOps;
4837 SmallVector<const Expr *, 4> TaskgroupDescriptors;
4838 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
4839 auto IPriv = C->privates().begin();
4840 auto IRed = C->reduction_ops().begin();
4841 auto ITD = C->taskgroup_descriptors().begin();
4842 for (const Expr *Ref : C->varlists()) {
4843 InRedVars.emplace_back(Ref);
4844 InRedPrivs.emplace_back(*IPriv);
4845 InRedOps.emplace_back(*IRed);
4846 TaskgroupDescriptors.emplace_back(*ITD);
4847 std::advance(IPriv, 1);
4848 std::advance(IRed, 1);
4849 std::advance(ITD, 1);
4850 }
4851 }
4852 // Privatize in_reduction items here, because taskgroup descriptors must be
4853 // privatized earlier.
4854 OMPPrivateScope InRedScope(CGF);
4855 if (!InRedVars.empty()) {
4856 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
4857 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
4858 RedCG.emitSharedOrigLValue(CGF, Cnt);
4859 RedCG.emitAggregateType(CGF, Cnt);
4860 // The taskgroup descriptor variable is always implicit firstprivate and
4861 // privatized already during processing of the firstprivates.
4862 // FIXME: This must removed once the runtime library is fixed.
4863 // Emit required threadprivate variables for
4864 // initializer/combiner/finalizer.
4865 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
4866 RedCG, Cnt);
4867 llvm::Value *ReductionsPtr;
4868 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
4869 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
4870 TRExpr->getExprLoc());
4871 } else {
4872 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4873 }
4874 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
4875 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
4876 Replacement = Address(
4877 CGF.EmitScalarConversion(
4878 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
4879 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
4880 InRedPrivs[Cnt]->getExprLoc()),
4881 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
4882 Replacement.getAlignment());
4883 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
4884 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
4885 }
4886 }
4887 (void)InRedScope.Privatize();
4888
4889 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
4890 UntiedLocalVars);
4891 Action.Enter(CGF);
4892 BodyGen(CGF);
4893 };
4894 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
4895 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
4896 Data.NumberOfParts);
4897 OMPLexicalScope Scope(*this, S, llvm::None,
4898 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
4899 !isOpenMPSimdDirective(S.getDirectiveKind()));
4900 TaskGen(*this, OutlinedFn, Data);
4901 }
4902
4903 static ImplicitParamDecl *
createImplicitFirstprivateForType(ASTContext & C,OMPTaskDataTy & Data,QualType Ty,CapturedDecl * CD,SourceLocation Loc)4904 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
4905 QualType Ty, CapturedDecl *CD,
4906 SourceLocation Loc) {
4907 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4908 ImplicitParamDecl::Other);
4909 auto *OrigRef = DeclRefExpr::Create(
4910 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
4911 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4912 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
4913 ImplicitParamDecl::Other);
4914 auto *PrivateRef = DeclRefExpr::Create(
4915 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
4916 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
4917 QualType ElemType = C.getBaseElementType(Ty);
4918 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
4919 ImplicitParamDecl::Other);
4920 auto *InitRef = DeclRefExpr::Create(
4921 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
4922 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
4923 PrivateVD->setInitStyle(VarDecl::CInit);
4924 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
4925 InitRef, /*BasePath=*/nullptr,
4926 VK_PRValue, FPOptionsOverride()));
4927 Data.FirstprivateVars.emplace_back(OrigRef);
4928 Data.FirstprivateCopies.emplace_back(PrivateRef);
4929 Data.FirstprivateInits.emplace_back(InitRef);
4930 return OrigVD;
4931 }
4932
EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective & S,const RegionCodeGenTy & BodyGen,OMPTargetDataInfo & InputInfo)4933 void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
4934 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
4935 OMPTargetDataInfo &InputInfo) {
4936 // Emit outlined function for task construct.
4937 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
4938 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
4939 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
4940 auto I = CS->getCapturedDecl()->param_begin();
4941 auto PartId = std::next(I);
4942 auto TaskT = std::next(I, 4);
4943 OMPTaskDataTy Data;
4944 // The task is not final.
4945 Data.Final.setInt(/*IntVal=*/false);
4946 // Get list of firstprivate variables.
4947 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4948 auto IRef = C->varlist_begin();
4949 auto IElemInitRef = C->inits().begin();
4950 for (auto *IInit : C->private_copies()) {
4951 Data.FirstprivateVars.push_back(*IRef);
4952 Data.FirstprivateCopies.push_back(IInit);
4953 Data.FirstprivateInits.push_back(*IElemInitRef);
4954 ++IRef;
4955 ++IElemInitRef;
4956 }
4957 }
4958 SmallVector<const Expr *, 4> LHSs;
4959 SmallVector<const Expr *, 4> RHSs;
4960 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
4961 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4962 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4963 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4964 Data.ReductionOps.append(C->reduction_ops().begin(),
4965 C->reduction_ops().end());
4966 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4967 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4968 }
4969 OMPPrivateScope TargetScope(*this);
4970 VarDecl *BPVD = nullptr;
4971 VarDecl *PVD = nullptr;
4972 VarDecl *SVD = nullptr;
4973 VarDecl *MVD = nullptr;
4974 if (InputInfo.NumberOfTargetItems > 0) {
4975 auto *CD = CapturedDecl::Create(
4976 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
4977 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
4978 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
4979 getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
4980 /*IndexTypeQuals=*/0);
4981 BPVD = createImplicitFirstprivateForType(
4982 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4983 PVD = createImplicitFirstprivateForType(
4984 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
4985 QualType SizesType = getContext().getConstantArrayType(
4986 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
4987 ArrSize, nullptr, ArrayType::Normal,
4988 /*IndexTypeQuals=*/0);
4989 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
4990 S.getBeginLoc());
4991 TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
4992 TargetScope.addPrivate(PVD, InputInfo.PointersArray);
4993 TargetScope.addPrivate(SVD, InputInfo.SizesArray);
4994 // If there is no user-defined mapper, the mapper array will be nullptr. In
4995 // this case, we don't need to privatize it.
4996 if (!isa_and_nonnull<llvm::ConstantPointerNull>(
4997 InputInfo.MappersArray.getPointer())) {
4998 MVD = createImplicitFirstprivateForType(
4999 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5000 TargetScope.addPrivate(MVD, InputInfo.MappersArray);
5001 }
5002 }
5003 (void)TargetScope.Privatize();
5004 buildDependences(S, Data);
5005 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
5006 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
5007 // Set proper addresses for generated private copies.
5008 OMPPrivateScope Scope(CGF);
5009 if (!Data.FirstprivateVars.empty()) {
5010 enum { PrivatesParam = 2, CopyFnParam = 3 };
5011 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
5012 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
5013 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
5014 CS->getCapturedDecl()->getParam(PrivatesParam)));
5015 // Map privates.
5016 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
5017 llvm::SmallVector<llvm::Value *, 16> CallArgs;
5018 llvm::SmallVector<llvm::Type *, 4> ParamTypes;
5019 CallArgs.push_back(PrivatesPtr);
5020 ParamTypes.push_back(PrivatesPtr->getType());
5021 for (const Expr *E : Data.FirstprivateVars) {
5022 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5023 Address PrivatePtr =
5024 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
5025 ".firstpriv.ptr.addr");
5026 PrivatePtrs.emplace_back(VD, PrivatePtr);
5027 CallArgs.push_back(PrivatePtr.getPointer());
5028 ParamTypes.push_back(PrivatePtr.getType());
5029 }
5030 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
5031 ParamTypes, /*isVarArg=*/false);
5032 CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5033 CopyFn, CopyFnTy->getPointerTo());
5034 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
5035 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
5036 for (const auto &Pair : PrivatePtrs) {
5037 Address Replacement(
5038 CGF.Builder.CreateLoad(Pair.second),
5039 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5040 CGF.getContext().getDeclAlign(Pair.first));
5041 Scope.addPrivate(Pair.first, Replacement);
5042 }
5043 }
5044 CGF.processInReduction(S, Data, CGF, CS, Scope);
5045 if (InputInfo.NumberOfTargetItems > 0) {
5046 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
5047 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
5048 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
5049 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
5050 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
5051 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
5052 // If MVD is nullptr, the mapper array is not privatized
5053 if (MVD)
5054 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
5055 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
5056 }
5057
5058 Action.Enter(CGF);
5059 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
5060 BodyGen(CGF);
5061 };
5062 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
5063 S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, /*Tied=*/true,
5064 Data.NumberOfParts);
5065 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
5066 IntegerLiteral IfCond(getContext(), TrueOrFalse,
5067 getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
5068 SourceLocation());
5069 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
5070 SharedsTy, CapturedStruct, &IfCond, Data);
5071 }
5072
processInReduction(const OMPExecutableDirective & S,OMPTaskDataTy & Data,CodeGenFunction & CGF,const CapturedStmt * CS,OMPPrivateScope & Scope)5073 void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
5074 OMPTaskDataTy &Data,
5075 CodeGenFunction &CGF,
5076 const CapturedStmt *CS,
5077 OMPPrivateScope &Scope) {
5078 if (Data.Reductions) {
5079 OpenMPDirectiveKind CapturedRegion = S.getDirectiveKind();
5080 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5081 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
5082 Data.ReductionCopies, Data.ReductionOps);
5083 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
5084 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(4)));
5085 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
5086 RedCG.emitSharedOrigLValue(CGF, Cnt);
5087 RedCG.emitAggregateType(CGF, Cnt);
5088 // FIXME: This must removed once the runtime library is fixed.
5089 // Emit required threadprivate variables for
5090 // initializer/combiner/finalizer.
5091 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5092 RedCG, Cnt);
5093 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5094 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5095 Replacement =
5096 Address(CGF.EmitScalarConversion(
5097 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
5098 CGF.getContext().getPointerType(
5099 Data.ReductionCopies[Cnt]->getType()),
5100 Data.ReductionCopies[Cnt]->getExprLoc()),
5101 CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
5102 Replacement.getAlignment());
5103 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5104 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5105 }
5106 }
5107 (void)Scope.Privatize();
5108 SmallVector<const Expr *, 4> InRedVars;
5109 SmallVector<const Expr *, 4> InRedPrivs;
5110 SmallVector<const Expr *, 4> InRedOps;
5111 SmallVector<const Expr *, 4> TaskgroupDescriptors;
5112 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5113 auto IPriv = C->privates().begin();
5114 auto IRed = C->reduction_ops().begin();
5115 auto ITD = C->taskgroup_descriptors().begin();
5116 for (const Expr *Ref : C->varlists()) {
5117 InRedVars.emplace_back(Ref);
5118 InRedPrivs.emplace_back(*IPriv);
5119 InRedOps.emplace_back(*IRed);
5120 TaskgroupDescriptors.emplace_back(*ITD);
5121 std::advance(IPriv, 1);
5122 std::advance(IRed, 1);
5123 std::advance(ITD, 1);
5124 }
5125 }
5126 OMPPrivateScope InRedScope(CGF);
5127 if (!InRedVars.empty()) {
5128 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
5129 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
5130 RedCG.emitSharedOrigLValue(CGF, Cnt);
5131 RedCG.emitAggregateType(CGF, Cnt);
5132 // FIXME: This must removed once the runtime library is fixed.
5133 // Emit required threadprivate variables for
5134 // initializer/combiner/finalizer.
5135 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5136 RedCG, Cnt);
5137 llvm::Value *ReductionsPtr;
5138 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5139 ReductionsPtr =
5140 CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), TRExpr->getExprLoc());
5141 } else {
5142 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5143 }
5144 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5145 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5146 Replacement = Address(
5147 CGF.EmitScalarConversion(
5148 Replacement.getPointer(), CGF.getContext().VoidPtrTy,
5149 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
5150 InRedPrivs[Cnt]->getExprLoc()),
5151 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
5152 Replacement.getAlignment());
5153 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5154 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5155 }
5156 }
5157 (void)InRedScope.Privatize();
5158 }
5159
EmitOMPTaskDirective(const OMPTaskDirective & S)5160 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
5161 // Emit outlined function for task construct.
5162 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
5163 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5164 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5165 const Expr *IfCond = nullptr;
5166 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
5167 if (C->getNameModifier() == OMPD_unknown ||
5168 C->getNameModifier() == OMPD_task) {
5169 IfCond = C->getCondition();
5170 break;
5171 }
5172 }
5173
5174 OMPTaskDataTy Data;
5175 // Check if we should emit tied or untied task.
5176 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
5177 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
5178 CGF.EmitStmt(CS->getCapturedStmt());
5179 };
5180 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5181 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
5182 const OMPTaskDataTy &Data) {
5183 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
5184 SharedsTy, CapturedStruct, IfCond,
5185 Data);
5186 };
5187 auto LPCRegion =
5188 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
5189 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
5190 }
5191
EmitOMPTaskyieldDirective(const OMPTaskyieldDirective & S)5192 void CodeGenFunction::EmitOMPTaskyieldDirective(
5193 const OMPTaskyieldDirective &S) {
5194 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
5195 }
5196
EmitOMPBarrierDirective(const OMPBarrierDirective & S)5197 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
5198 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
5199 }
5200
EmitOMPTaskwaitDirective(const OMPTaskwaitDirective & S)5201 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
5202 OMPTaskDataTy Data;
5203 // Build list of dependences
5204 buildDependences(S, Data);
5205 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
5206 }
5207
isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective & T)5208 bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T) {
5209 return T.clauses().empty();
5210 }
5211
EmitOMPTaskgroupDirective(const OMPTaskgroupDirective & S)5212 void CodeGenFunction::EmitOMPTaskgroupDirective(
5213 const OMPTaskgroupDirective &S) {
5214 OMPLexicalScope Scope(*this, S, OMPD_unknown);
5215 if (CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S)) {
5216 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
5217 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
5218 InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
5219 AllocaInsertPt->getIterator());
5220
5221 auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
5222 InsertPointTy CodeGenIP) {
5223 Builder.restoreIP(CodeGenIP);
5224 EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5225 };
5226 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
5227 if (!CapturedStmtInfo)
5228 CapturedStmtInfo = &CapStmtInfo;
5229 Builder.restoreIP(OMPBuilder.createTaskgroup(Builder, AllocaIP, BodyGenCB));
5230 return;
5231 }
5232 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5233 Action.Enter(CGF);
5234 if (const Expr *E = S.getReductionRef()) {
5235 SmallVector<const Expr *, 4> LHSs;
5236 SmallVector<const Expr *, 4> RHSs;
5237 OMPTaskDataTy Data;
5238 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
5239 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5240 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5241 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5242 Data.ReductionOps.append(C->reduction_ops().begin(),
5243 C->reduction_ops().end());
5244 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5245 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5246 }
5247 llvm::Value *ReductionDesc =
5248 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
5249 LHSs, RHSs, Data);
5250 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5251 CGF.EmitVarDecl(*VD);
5252 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
5253 /*Volatile=*/false, E->getType());
5254 }
5255 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5256 };
5257 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
5258 }
5259
EmitOMPFlushDirective(const OMPFlushDirective & S)5260 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
5261 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
5262 ? llvm::AtomicOrdering::NotAtomic
5263 : llvm::AtomicOrdering::AcquireRelease;
5264 CGM.getOpenMPRuntime().emitFlush(
5265 *this,
5266 [&S]() -> ArrayRef<const Expr *> {
5267 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
5268 return llvm::makeArrayRef(FlushClause->varlist_begin(),
5269 FlushClause->varlist_end());
5270 return llvm::None;
5271 }(),
5272 S.getBeginLoc(), AO);
5273 }
5274
EmitOMPDepobjDirective(const OMPDepobjDirective & S)5275 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
5276 const auto *DO = S.getSingleClause<OMPDepobjClause>();
5277 LValue DOLVal = EmitLValue(DO->getDepobj());
5278 if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
5279 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
5280 DC->getModifier());
5281 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
5282 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
5283 *this, Dependencies, DC->getBeginLoc());
5284 EmitStoreOfScalar(DepAddr.getPointer(), DOLVal);
5285 return;
5286 }
5287 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
5288 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
5289 return;
5290 }
5291 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
5292 CGM.getOpenMPRuntime().emitUpdateClause(
5293 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
5294 return;
5295 }
5296 }
5297
EmitOMPScanDirective(const OMPScanDirective & S)5298 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
5299 if (!OMPParentLoopDirectiveForScan)
5300 return;
5301 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
5302 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
5303 SmallVector<const Expr *, 4> Shareds;
5304 SmallVector<const Expr *, 4> Privates;
5305 SmallVector<const Expr *, 4> LHSs;
5306 SmallVector<const Expr *, 4> RHSs;
5307 SmallVector<const Expr *, 4> ReductionOps;
5308 SmallVector<const Expr *, 4> CopyOps;
5309 SmallVector<const Expr *, 4> CopyArrayTemps;
5310 SmallVector<const Expr *, 4> CopyArrayElems;
5311 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
5312 if (C->getModifier() != OMPC_REDUCTION_inscan)
5313 continue;
5314 Shareds.append(C->varlist_begin(), C->varlist_end());
5315 Privates.append(C->privates().begin(), C->privates().end());
5316 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5317 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5318 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
5319 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
5320 CopyArrayTemps.append(C->copy_array_temps().begin(),
5321 C->copy_array_temps().end());
5322 CopyArrayElems.append(C->copy_array_elems().begin(),
5323 C->copy_array_elems().end());
5324 }
5325 if (ParentDir.getDirectiveKind() == OMPD_simd ||
5326 (getLangOpts().OpenMPSimd &&
5327 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
5328 // For simd directive and simd-based directives in simd only mode, use the
5329 // following codegen:
5330 // int x = 0;
5331 // #pragma omp simd reduction(inscan, +: x)
5332 // for (..) {
5333 // <first part>
5334 // #pragma omp scan inclusive(x)
5335 // <second part>
5336 // }
5337 // is transformed to:
5338 // int x = 0;
5339 // for (..) {
5340 // int x_priv = 0;
5341 // <first part>
5342 // x = x_priv + x;
5343 // x_priv = x;
5344 // <second part>
5345 // }
5346 // and
5347 // int x = 0;
5348 // #pragma omp simd reduction(inscan, +: x)
5349 // for (..) {
5350 // <first part>
5351 // #pragma omp scan exclusive(x)
5352 // <second part>
5353 // }
5354 // to
5355 // int x = 0;
5356 // for (..) {
5357 // int x_priv = 0;
5358 // <second part>
5359 // int temp = x;
5360 // x = x_priv + x;
5361 // x_priv = temp;
5362 // <first part>
5363 // }
5364 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
5365 EmitBranch(IsInclusive
5366 ? OMPScanReduce
5367 : BreakContinueStack.back().ContinueBlock.getBlock());
5368 EmitBlock(OMPScanDispatch);
5369 {
5370 // New scope for correct construction/destruction of temp variables for
5371 // exclusive scan.
5372 LexicalScope Scope(*this, S.getSourceRange());
5373 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
5374 EmitBlock(OMPScanReduce);
5375 if (!IsInclusive) {
5376 // Create temp var and copy LHS value to this temp value.
5377 // TMP = LHS;
5378 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5379 const Expr *PrivateExpr = Privates[I];
5380 const Expr *TempExpr = CopyArrayTemps[I];
5381 EmitAutoVarDecl(
5382 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
5383 LValue DestLVal = EmitLValue(TempExpr);
5384 LValue SrcLVal = EmitLValue(LHSs[I]);
5385 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5386 SrcLVal.getAddress(*this),
5387 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5388 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5389 CopyOps[I]);
5390 }
5391 }
5392 CGM.getOpenMPRuntime().emitReduction(
5393 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
5394 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
5395 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5396 const Expr *PrivateExpr = Privates[I];
5397 LValue DestLVal;
5398 LValue SrcLVal;
5399 if (IsInclusive) {
5400 DestLVal = EmitLValue(RHSs[I]);
5401 SrcLVal = EmitLValue(LHSs[I]);
5402 } else {
5403 const Expr *TempExpr = CopyArrayTemps[I];
5404 DestLVal = EmitLValue(RHSs[I]);
5405 SrcLVal = EmitLValue(TempExpr);
5406 }
5407 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5408 SrcLVal.getAddress(*this),
5409 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5410 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5411 CopyOps[I]);
5412 }
5413 }
5414 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
5415 OMPScanExitBlock = IsInclusive
5416 ? BreakContinueStack.back().ContinueBlock.getBlock()
5417 : OMPScanReduce;
5418 EmitBlock(OMPAfterScanBlock);
5419 return;
5420 }
5421 if (!IsInclusive) {
5422 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5423 EmitBlock(OMPScanExitBlock);
5424 }
5425 if (OMPFirstScanLoop) {
5426 // Emit buffer[i] = red; at the end of the input phase.
5427 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5428 .getIterationVariable()
5429 ->IgnoreParenImpCasts();
5430 LValue IdxLVal = EmitLValue(IVExpr);
5431 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5432 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5433 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5434 const Expr *PrivateExpr = Privates[I];
5435 const Expr *OrigExpr = Shareds[I];
5436 const Expr *CopyArrayElem = CopyArrayElems[I];
5437 OpaqueValueMapping IdxMapping(
5438 *this,
5439 cast<OpaqueValueExpr>(
5440 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5441 RValue::get(IdxVal));
5442 LValue DestLVal = EmitLValue(CopyArrayElem);
5443 LValue SrcLVal = EmitLValue(OrigExpr);
5444 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5445 SrcLVal.getAddress(*this),
5446 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5447 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5448 CopyOps[I]);
5449 }
5450 }
5451 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5452 if (IsInclusive) {
5453 EmitBlock(OMPScanExitBlock);
5454 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5455 }
5456 EmitBlock(OMPScanDispatch);
5457 if (!OMPFirstScanLoop) {
5458 // Emit red = buffer[i]; at the entrance to the scan phase.
5459 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5460 .getIterationVariable()
5461 ->IgnoreParenImpCasts();
5462 LValue IdxLVal = EmitLValue(IVExpr);
5463 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5464 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5465 llvm::BasicBlock *ExclusiveExitBB = nullptr;
5466 if (!IsInclusive) {
5467 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
5468 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
5469 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
5470 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
5471 EmitBlock(ContBB);
5472 // Use idx - 1 iteration for exclusive scan.
5473 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
5474 }
5475 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5476 const Expr *PrivateExpr = Privates[I];
5477 const Expr *OrigExpr = Shareds[I];
5478 const Expr *CopyArrayElem = CopyArrayElems[I];
5479 OpaqueValueMapping IdxMapping(
5480 *this,
5481 cast<OpaqueValueExpr>(
5482 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5483 RValue::get(IdxVal));
5484 LValue SrcLVal = EmitLValue(CopyArrayElem);
5485 LValue DestLVal = EmitLValue(OrigExpr);
5486 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
5487 SrcLVal.getAddress(*this),
5488 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5489 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5490 CopyOps[I]);
5491 }
5492 if (!IsInclusive) {
5493 EmitBlock(ExclusiveExitBB);
5494 }
5495 }
5496 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
5497 : OMPAfterScanBlock);
5498 EmitBlock(OMPAfterScanBlock);
5499 }
5500
EmitOMPDistributeLoop(const OMPLoopDirective & S,const CodeGenLoopTy & CodeGenLoop,Expr * IncExpr)5501 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
5502 const CodeGenLoopTy &CodeGenLoop,
5503 Expr *IncExpr) {
5504 // Emit the loop iteration variable.
5505 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
5506 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
5507 EmitVarDecl(*IVDecl);
5508
5509 // Emit the iterations count variable.
5510 // If it is not a variable, Sema decided to calculate iterations count on each
5511 // iteration (e.g., it is foldable into a constant).
5512 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
5513 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
5514 // Emit calculation of the iterations count.
5515 EmitIgnoredExpr(S.getCalcLastIteration());
5516 }
5517
5518 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
5519
5520 bool HasLastprivateClause = false;
5521 // Check pre-condition.
5522 {
5523 OMPLoopScope PreInitScope(*this, S);
5524 // Skip the entire loop if we don't meet the precondition.
5525 // If the condition constant folds and can be elided, avoid emitting the
5526 // whole loop.
5527 bool CondConstant;
5528 llvm::BasicBlock *ContBlock = nullptr;
5529 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
5530 if (!CondConstant)
5531 return;
5532 } else {
5533 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
5534 ContBlock = createBasicBlock("omp.precond.end");
5535 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
5536 getProfileCount(&S));
5537 EmitBlock(ThenBlock);
5538 incrementProfileCounter(&S);
5539 }
5540
5541 emitAlignedClause(*this, S);
5542 // Emit 'then' code.
5543 {
5544 // Emit helper vars inits.
5545
5546 LValue LB = EmitOMPHelperVar(
5547 *this, cast<DeclRefExpr>(
5548 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5549 ? S.getCombinedLowerBoundVariable()
5550 : S.getLowerBoundVariable())));
5551 LValue UB = EmitOMPHelperVar(
5552 *this, cast<DeclRefExpr>(
5553 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5554 ? S.getCombinedUpperBoundVariable()
5555 : S.getUpperBoundVariable())));
5556 LValue ST =
5557 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
5558 LValue IL =
5559 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
5560
5561 OMPPrivateScope LoopScope(*this);
5562 if (EmitOMPFirstprivateClause(S, LoopScope)) {
5563 // Emit implicit barrier to synchronize threads and avoid data races
5564 // on initialization of firstprivate variables and post-update of
5565 // lastprivate variables.
5566 CGM.getOpenMPRuntime().emitBarrierCall(
5567 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
5568 /*ForceSimpleCall=*/true);
5569 }
5570 EmitOMPPrivateClause(S, LoopScope);
5571 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5572 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5573 !isOpenMPTeamsDirective(S.getDirectiveKind()))
5574 EmitOMPReductionClauseInit(S, LoopScope);
5575 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
5576 EmitOMPPrivateLoopCounters(S, LoopScope);
5577 (void)LoopScope.Privatize();
5578 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
5579 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
5580
5581 // Detect the distribute schedule kind and chunk.
5582 llvm::Value *Chunk = nullptr;
5583 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
5584 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
5585 ScheduleKind = C->getDistScheduleKind();
5586 if (const Expr *Ch = C->getChunkSize()) {
5587 Chunk = EmitScalarExpr(Ch);
5588 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
5589 S.getIterationVariable()->getType(),
5590 S.getBeginLoc());
5591 }
5592 } else {
5593 // Default behaviour for dist_schedule clause.
5594 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
5595 *this, S, ScheduleKind, Chunk);
5596 }
5597 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
5598 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
5599
5600 // OpenMP [2.10.8, distribute Construct, Description]
5601 // If dist_schedule is specified, kind must be static. If specified,
5602 // iterations are divided into chunks of size chunk_size, chunks are
5603 // assigned to the teams of the league in a round-robin fashion in the
5604 // order of the team number. When no chunk_size is specified, the
5605 // iteration space is divided into chunks that are approximately equal
5606 // in size, and at most one chunk is distributed to each team of the
5607 // league. The size of the chunks is unspecified in this case.
5608 bool StaticChunked =
5609 RT.isStaticChunked(ScheduleKind, /* Chunked */ Chunk != nullptr) &&
5610 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
5611 if (RT.isStaticNonchunked(ScheduleKind,
5612 /* Chunked */ Chunk != nullptr) ||
5613 StaticChunked) {
5614 CGOpenMPRuntime::StaticRTInput StaticInit(
5615 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
5616 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
5617 StaticChunked ? Chunk : nullptr);
5618 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
5619 StaticInit);
5620 JumpDest LoopExit =
5621 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
5622 // UB = min(UB, GlobalUB);
5623 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5624 ? S.getCombinedEnsureUpperBound()
5625 : S.getEnsureUpperBound());
5626 // IV = LB;
5627 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5628 ? S.getCombinedInit()
5629 : S.getInit());
5630
5631 const Expr *Cond =
5632 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5633 ? S.getCombinedCond()
5634 : S.getCond();
5635
5636 if (StaticChunked)
5637 Cond = S.getCombinedDistCond();
5638
5639 // For static unchunked schedules generate:
5640 //
5641 // 1. For distribute alone, codegen
5642 // while (idx <= UB) {
5643 // BODY;
5644 // ++idx;
5645 // }
5646 //
5647 // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
5648 // while (idx <= UB) {
5649 // <CodeGen rest of pragma>(LB, UB);
5650 // idx += ST;
5651 // }
5652 //
5653 // For static chunk one schedule generate:
5654 //
5655 // while (IV <= GlobalUB) {
5656 // <CodeGen rest of pragma>(LB, UB);
5657 // LB += ST;
5658 // UB += ST;
5659 // UB = min(UB, GlobalUB);
5660 // IV = LB;
5661 // }
5662 //
5663 emitCommonSimdLoop(
5664 *this, S,
5665 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
5666 if (isOpenMPSimdDirective(S.getDirectiveKind()))
5667 CGF.EmitOMPSimdInit(S);
5668 },
5669 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
5670 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
5671 CGF.EmitOMPInnerLoop(
5672 S, LoopScope.requiresCleanups(), Cond, IncExpr,
5673 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
5674 CodeGenLoop(CGF, S, LoopExit);
5675 },
5676 [&S, StaticChunked](CodeGenFunction &CGF) {
5677 if (StaticChunked) {
5678 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
5679 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
5680 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
5681 CGF.EmitIgnoredExpr(S.getCombinedInit());
5682 }
5683 });
5684 });
5685 EmitBlock(LoopExit.getBlock());
5686 // Tell the runtime we are done.
5687 RT.emitForStaticFinish(*this, S.getEndLoc(), S.getDirectiveKind());
5688 } else {
5689 // Emit the outer loop, which requests its work chunk [LB..UB] from
5690 // runtime and runs the inner loop to process it.
5691 const OMPLoopArguments LoopArguments = {
5692 LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
5693 IL.getAddress(*this), Chunk};
5694 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
5695 CodeGenLoop);
5696 }
5697 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
5698 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
5699 return CGF.Builder.CreateIsNotNull(
5700 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5701 });
5702 }
5703 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5704 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5705 !isOpenMPTeamsDirective(S.getDirectiveKind())) {
5706 EmitOMPReductionClauseFinal(S, OMPD_simd);
5707 // Emit post-update of the reduction variables if IsLastIter != 0.
5708 emitPostUpdateForReductionClause(
5709 *this, S, [IL, &S](CodeGenFunction &CGF) {
5710 return CGF.Builder.CreateIsNotNull(
5711 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5712 });
5713 }
5714 // Emit final copy of the lastprivate variables if IsLastIter != 0.
5715 if (HasLastprivateClause) {
5716 EmitOMPLastprivateClauseFinal(
5717 S, /*NoFinals=*/false,
5718 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
5719 }
5720 }
5721
5722 // We're now done with the loop, so jump to the continuation block.
5723 if (ContBlock) {
5724 EmitBranch(ContBlock);
5725 EmitBlock(ContBlock, true);
5726 }
5727 }
5728 }
5729
EmitOMPDistributeDirective(const OMPDistributeDirective & S)5730 void CodeGenFunction::EmitOMPDistributeDirective(
5731 const OMPDistributeDirective &S) {
5732 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
5733 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
5734 };
5735 OMPLexicalScope Scope(*this, S, OMPD_unknown);
5736 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
5737 }
5738
emitOutlinedOrderedFunction(CodeGenModule & CGM,const CapturedStmt * S,SourceLocation Loc)5739 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
5740 const CapturedStmt *S,
5741 SourceLocation Loc) {
5742 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
5743 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
5744 CGF.CapturedStmtInfo = &CapStmtInfo;
5745 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
5746 Fn->setDoesNotRecurse();
5747 return Fn;
5748 }
5749
EmitOMPOrderedDirective(const OMPOrderedDirective & S)5750 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
5751 if (CGM.getLangOpts().OpenMPIRBuilder) {
5752 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
5753 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
5754
5755 if (S.hasClausesOfKind<OMPDependClause>()) {
5756 // The ordered directive with depend clause.
5757 assert(!S.hasAssociatedStmt() &&
5758 "No associated statement must be in ordered depend construct.");
5759 InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
5760 AllocaInsertPt->getIterator());
5761 for (const auto *DC : S.getClausesOfKind<OMPDependClause>()) {
5762 unsigned NumLoops = DC->getNumLoops();
5763 QualType Int64Ty = CGM.getContext().getIntTypeForBitwidth(
5764 /*DestWidth=*/64, /*Signed=*/1);
5765 llvm::SmallVector<llvm::Value *> StoreValues;
5766 for (unsigned I = 0; I < NumLoops; I++) {
5767 const Expr *CounterVal = DC->getLoopData(I);
5768 assert(CounterVal);
5769 llvm::Value *StoreValue = EmitScalarConversion(
5770 EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
5771 CounterVal->getExprLoc());
5772 StoreValues.emplace_back(StoreValue);
5773 }
5774 bool IsDependSource = false;
5775 if (DC->getDependencyKind() == OMPC_DEPEND_source)
5776 IsDependSource = true;
5777 Builder.restoreIP(OMPBuilder.createOrderedDepend(
5778 Builder, AllocaIP, NumLoops, StoreValues, ".cnt.addr",
5779 IsDependSource));
5780 }
5781 } else {
5782 // The ordered directive with threads or simd clause, or without clause.
5783 // Without clause, it behaves as if the threads clause is specified.
5784 const auto *C = S.getSingleClause<OMPSIMDClause>();
5785
5786 auto FiniCB = [this](InsertPointTy IP) {
5787 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
5788 };
5789
5790 auto BodyGenCB = [&S, C, this](InsertPointTy AllocaIP,
5791 InsertPointTy CodeGenIP) {
5792 Builder.restoreIP(CodeGenIP);
5793
5794 const CapturedStmt *CS = S.getInnermostCapturedStmt();
5795 if (C) {
5796 llvm::BasicBlock *FiniBB = splitBBWithSuffix(
5797 Builder, /*CreateBranch=*/false, ".ordered.after");
5798 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
5799 GenerateOpenMPCapturedVars(*CS, CapturedVars);
5800 llvm::Function *OutlinedFn =
5801 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
5802 assert(S.getBeginLoc().isValid() &&
5803 "Outlined function call location must be valid.");
5804 ApplyDebugLocation::CreateDefaultArtificial(*this, S.getBeginLoc());
5805 OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, *FiniBB,
5806 OutlinedFn, CapturedVars);
5807 } else {
5808 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
5809 *this, CS->getCapturedStmt(), AllocaIP, CodeGenIP, "ordered");
5810 }
5811 };
5812
5813 OMPLexicalScope Scope(*this, S, OMPD_unknown);
5814 Builder.restoreIP(
5815 OMPBuilder.createOrderedThreadsSimd(Builder, BodyGenCB, FiniCB, !C));
5816 }
5817 return;
5818 }
5819
5820 if (S.hasClausesOfKind<OMPDependClause>()) {
5821 assert(!S.hasAssociatedStmt() &&
5822 "No associated statement must be in ordered depend construct.");
5823 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
5824 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
5825 return;
5826 }
5827 const auto *C = S.getSingleClause<OMPSIMDClause>();
5828 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
5829 PrePostActionTy &Action) {
5830 const CapturedStmt *CS = S.getInnermostCapturedStmt();
5831 if (C) {
5832 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
5833 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
5834 llvm::Function *OutlinedFn =
5835 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
5836 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
5837 OutlinedFn, CapturedVars);
5838 } else {
5839 Action.Enter(CGF);
5840 CGF.EmitStmt(CS->getCapturedStmt());
5841 }
5842 };
5843 OMPLexicalScope Scope(*this, S, OMPD_unknown);
5844 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
5845 }
5846
convertToScalarValue(CodeGenFunction & CGF,RValue Val,QualType SrcType,QualType DestType,SourceLocation Loc)5847 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
5848 QualType SrcType, QualType DestType,
5849 SourceLocation Loc) {
5850 assert(CGF.hasScalarEvaluationKind(DestType) &&
5851 "DestType must have scalar evaluation kind.");
5852 assert(!Val.isAggregate() && "Must be a scalar or complex.");
5853 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
5854 DestType, Loc)
5855 : CGF.EmitComplexToScalarConversion(
5856 Val.getComplexVal(), SrcType, DestType, Loc);
5857 }
5858
5859 static CodeGenFunction::ComplexPairTy
convertToComplexValue(CodeGenFunction & CGF,RValue Val,QualType SrcType,QualType DestType,SourceLocation Loc)5860 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
5861 QualType DestType, SourceLocation Loc) {
5862 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
5863 "DestType must have complex evaluation kind.");
5864 CodeGenFunction::ComplexPairTy ComplexVal;
5865 if (Val.isScalar()) {
5866 // Convert the input element to the element type of the complex.
5867 QualType DestElementType =
5868 DestType->castAs<ComplexType>()->getElementType();
5869 llvm::Value *ScalarVal = CGF.EmitScalarConversion(
5870 Val.getScalarVal(), SrcType, DestElementType, Loc);
5871 ComplexVal = CodeGenFunction::ComplexPairTy(
5872 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
5873 } else {
5874 assert(Val.isComplex() && "Must be a scalar or complex.");
5875 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
5876 QualType DestElementType =
5877 DestType->castAs<ComplexType>()->getElementType();
5878 ComplexVal.first = CGF.EmitScalarConversion(
5879 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
5880 ComplexVal.second = CGF.EmitScalarConversion(
5881 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
5882 }
5883 return ComplexVal;
5884 }
5885
emitSimpleAtomicStore(CodeGenFunction & CGF,llvm::AtomicOrdering AO,LValue LVal,RValue RVal)5886 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
5887 LValue LVal, RValue RVal) {
5888 if (LVal.isGlobalReg())
5889 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
5890 else
5891 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false);
5892 }
5893
emitSimpleAtomicLoad(CodeGenFunction & CGF,llvm::AtomicOrdering AO,LValue LVal,SourceLocation Loc)5894 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF,
5895 llvm::AtomicOrdering AO, LValue LVal,
5896 SourceLocation Loc) {
5897 if (LVal.isGlobalReg())
5898 return CGF.EmitLoadOfLValue(LVal, Loc);
5899 return CGF.EmitAtomicLoad(
5900 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
5901 LVal.isVolatile());
5902 }
5903
emitOMPSimpleStore(LValue LVal,RValue RVal,QualType RValTy,SourceLocation Loc)5904 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
5905 QualType RValTy, SourceLocation Loc) {
5906 switch (getEvaluationKind(LVal.getType())) {
5907 case TEK_Scalar:
5908 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
5909 *this, RVal, RValTy, LVal.getType(), Loc)),
5910 LVal);
5911 break;
5912 case TEK_Complex:
5913 EmitStoreOfComplex(
5914 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
5915 /*isInit=*/false);
5916 break;
5917 case TEK_Aggregate:
5918 llvm_unreachable("Must be a scalar or complex.");
5919 }
5920 }
5921
emitOMPAtomicReadExpr(CodeGenFunction & CGF,llvm::AtomicOrdering AO,const Expr * X,const Expr * V,SourceLocation Loc)5922 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
5923 const Expr *X, const Expr *V,
5924 SourceLocation Loc) {
5925 // v = x;
5926 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
5927 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
5928 LValue XLValue = CGF.EmitLValue(X);
5929 LValue VLValue = CGF.EmitLValue(V);
5930 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc);
5931 // OpenMP, 2.17.7, atomic Construct
5932 // If the read or capture clause is specified and the acquire, acq_rel, or
5933 // seq_cst clause is specified then the strong flush on exit from the atomic
5934 // operation is also an acquire flush.
5935 switch (AO) {
5936 case llvm::AtomicOrdering::Acquire:
5937 case llvm::AtomicOrdering::AcquireRelease:
5938 case llvm::AtomicOrdering::SequentiallyConsistent:
5939 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
5940 llvm::AtomicOrdering::Acquire);
5941 break;
5942 case llvm::AtomicOrdering::Monotonic:
5943 case llvm::AtomicOrdering::Release:
5944 break;
5945 case llvm::AtomicOrdering::NotAtomic:
5946 case llvm::AtomicOrdering::Unordered:
5947 llvm_unreachable("Unexpected ordering.");
5948 }
5949 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
5950 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
5951 }
5952
emitOMPAtomicWriteExpr(CodeGenFunction & CGF,llvm::AtomicOrdering AO,const Expr * X,const Expr * E,SourceLocation Loc)5953 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
5954 llvm::AtomicOrdering AO, const Expr *X,
5955 const Expr *E, SourceLocation Loc) {
5956 // x = expr;
5957 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
5958 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
5959 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
5960 // OpenMP, 2.17.7, atomic Construct
5961 // If the write, update, or capture clause is specified and the release,
5962 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
5963 // the atomic operation is also a release flush.
5964 switch (AO) {
5965 case llvm::AtomicOrdering::Release:
5966 case llvm::AtomicOrdering::AcquireRelease:
5967 case llvm::AtomicOrdering::SequentiallyConsistent:
5968 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
5969 llvm::AtomicOrdering::Release);
5970 break;
5971 case llvm::AtomicOrdering::Acquire:
5972 case llvm::AtomicOrdering::Monotonic:
5973 break;
5974 case llvm::AtomicOrdering::NotAtomic:
5975 case llvm::AtomicOrdering::Unordered:
5976 llvm_unreachable("Unexpected ordering.");
5977 }
5978 }
5979
emitOMPAtomicRMW(CodeGenFunction & CGF,LValue X,RValue Update,BinaryOperatorKind BO,llvm::AtomicOrdering AO,bool IsXLHSInRHSPart)5980 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
5981 RValue Update,
5982 BinaryOperatorKind BO,
5983 llvm::AtomicOrdering AO,
5984 bool IsXLHSInRHSPart) {
5985 ASTContext &Context = CGF.getContext();
5986 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
5987 // expression is simple and atomic is allowed for the given type for the
5988 // target platform.
5989 if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
5990 (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
5991 (Update.getScalarVal()->getType() !=
5992 X.getAddress(CGF).getElementType())) ||
5993 !Context.getTargetInfo().hasBuiltinAtomic(
5994 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
5995 return std::make_pair(false, RValue::get(nullptr));
5996
5997 auto &&CheckAtomicSupport = [&CGF](llvm::Type *T, BinaryOperatorKind BO) {
5998 if (T->isIntegerTy())
5999 return true;
6000
6001 if (T->isFloatingPointTy() && (BO == BO_Add || BO == BO_Sub))
6002 return llvm::isPowerOf2_64(CGF.CGM.getDataLayout().getTypeStoreSize(T));
6003
6004 return false;
6005 };
6006
6007 if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
6008 !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
6009 return std::make_pair(false, RValue::get(nullptr));
6010
6011 bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
6012 llvm::AtomicRMWInst::BinOp RMWOp;
6013 switch (BO) {
6014 case BO_Add:
6015 RMWOp = IsInteger ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::FAdd;
6016 break;
6017 case BO_Sub:
6018 if (!IsXLHSInRHSPart)
6019 return std::make_pair(false, RValue::get(nullptr));
6020 RMWOp = IsInteger ? llvm::AtomicRMWInst::Sub : llvm::AtomicRMWInst::FSub;
6021 break;
6022 case BO_And:
6023 RMWOp = llvm::AtomicRMWInst::And;
6024 break;
6025 case BO_Or:
6026 RMWOp = llvm::AtomicRMWInst::Or;
6027 break;
6028 case BO_Xor:
6029 RMWOp = llvm::AtomicRMWInst::Xor;
6030 break;
6031 case BO_LT:
6032 if (IsInteger)
6033 RMWOp = X.getType()->hasSignedIntegerRepresentation()
6034 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
6035 : llvm::AtomicRMWInst::Max)
6036 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
6037 : llvm::AtomicRMWInst::UMax);
6038 else
6039 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMin
6040 : llvm::AtomicRMWInst::FMax;
6041 break;
6042 case BO_GT:
6043 if (IsInteger)
6044 RMWOp = X.getType()->hasSignedIntegerRepresentation()
6045 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
6046 : llvm::AtomicRMWInst::Min)
6047 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
6048 : llvm::AtomicRMWInst::UMin);
6049 else
6050 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMax
6051 : llvm::AtomicRMWInst::FMin;
6052 break;
6053 case BO_Assign:
6054 RMWOp = llvm::AtomicRMWInst::Xchg;
6055 break;
6056 case BO_Mul:
6057 case BO_Div:
6058 case BO_Rem:
6059 case BO_Shl:
6060 case BO_Shr:
6061 case BO_LAnd:
6062 case BO_LOr:
6063 return std::make_pair(false, RValue::get(nullptr));
6064 case BO_PtrMemD:
6065 case BO_PtrMemI:
6066 case BO_LE:
6067 case BO_GE:
6068 case BO_EQ:
6069 case BO_NE:
6070 case BO_Cmp:
6071 case BO_AddAssign:
6072 case BO_SubAssign:
6073 case BO_AndAssign:
6074 case BO_OrAssign:
6075 case BO_XorAssign:
6076 case BO_MulAssign:
6077 case BO_DivAssign:
6078 case BO_RemAssign:
6079 case BO_ShlAssign:
6080 case BO_ShrAssign:
6081 case BO_Comma:
6082 llvm_unreachable("Unsupported atomic update operation");
6083 }
6084 llvm::Value *UpdateVal = Update.getScalarVal();
6085 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
6086 if (IsInteger)
6087 UpdateVal = CGF.Builder.CreateIntCast(
6088 IC, X.getAddress(CGF).getElementType(),
6089 X.getType()->hasSignedIntegerRepresentation());
6090 else
6091 UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
6092 X.getAddress(CGF).getElementType());
6093 }
6094 llvm::Value *Res =
6095 CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO);
6096 return std::make_pair(true, RValue::get(Res));
6097 }
6098
EmitOMPAtomicSimpleUpdateExpr(LValue X,RValue E,BinaryOperatorKind BO,bool IsXLHSInRHSPart,llvm::AtomicOrdering AO,SourceLocation Loc,const llvm::function_ref<RValue (RValue)> CommonGen)6099 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
6100 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
6101 llvm::AtomicOrdering AO, SourceLocation Loc,
6102 const llvm::function_ref<RValue(RValue)> CommonGen) {
6103 // Update expressions are allowed to have the following forms:
6104 // x binop= expr; -> xrval + expr;
6105 // x++, ++x -> xrval + 1;
6106 // x--, --x -> xrval - 1;
6107 // x = x binop expr; -> xrval binop expr
6108 // x = expr Op x; - > expr binop xrval;
6109 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
6110 if (!Res.first) {
6111 if (X.isGlobalReg()) {
6112 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
6113 // 'xrval'.
6114 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
6115 } else {
6116 // Perform compare-and-swap procedure.
6117 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
6118 }
6119 }
6120 return Res;
6121 }
6122
emitOMPAtomicUpdateExpr(CodeGenFunction & CGF,llvm::AtomicOrdering AO,const Expr * X,const Expr * E,const Expr * UE,bool IsXLHSInRHSPart,SourceLocation Loc)6123 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF,
6124 llvm::AtomicOrdering AO, const Expr *X,
6125 const Expr *E, const Expr *UE,
6126 bool IsXLHSInRHSPart, SourceLocation Loc) {
6127 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
6128 "Update expr in 'atomic update' must be a binary operator.");
6129 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
6130 // Update expressions are allowed to have the following forms:
6131 // x binop= expr; -> xrval + expr;
6132 // x++, ++x -> xrval + 1;
6133 // x--, --x -> xrval - 1;
6134 // x = x binop expr; -> xrval binop expr
6135 // x = expr Op x; - > expr binop xrval;
6136 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
6137 LValue XLValue = CGF.EmitLValue(X);
6138 RValue ExprRValue = CGF.EmitAnyExpr(E);
6139 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6140 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6141 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
6142 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
6143 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) {
6144 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6145 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6146 return CGF.EmitAnyExpr(UE);
6147 };
6148 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
6149 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
6150 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6151 // OpenMP, 2.17.7, atomic Construct
6152 // If the write, update, or capture clause is specified and the release,
6153 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
6154 // the atomic operation is also a release flush.
6155 switch (AO) {
6156 case llvm::AtomicOrdering::Release:
6157 case llvm::AtomicOrdering::AcquireRelease:
6158 case llvm::AtomicOrdering::SequentiallyConsistent:
6159 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
6160 llvm::AtomicOrdering::Release);
6161 break;
6162 case llvm::AtomicOrdering::Acquire:
6163 case llvm::AtomicOrdering::Monotonic:
6164 break;
6165 case llvm::AtomicOrdering::NotAtomic:
6166 case llvm::AtomicOrdering::Unordered:
6167 llvm_unreachable("Unexpected ordering.");
6168 }
6169 }
6170
convertToType(CodeGenFunction & CGF,RValue Value,QualType SourceType,QualType ResType,SourceLocation Loc)6171 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
6172 QualType SourceType, QualType ResType,
6173 SourceLocation Loc) {
6174 switch (CGF.getEvaluationKind(ResType)) {
6175 case TEK_Scalar:
6176 return RValue::get(
6177 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
6178 case TEK_Complex: {
6179 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
6180 return RValue::getComplex(Res.first, Res.second);
6181 }
6182 case TEK_Aggregate:
6183 break;
6184 }
6185 llvm_unreachable("Must be a scalar or complex.");
6186 }
6187
emitOMPAtomicCaptureExpr(CodeGenFunction & CGF,llvm::AtomicOrdering AO,bool IsPostfixUpdate,const Expr * V,const Expr * X,const Expr * E,const Expr * UE,bool IsXLHSInRHSPart,SourceLocation Loc)6188 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
6189 llvm::AtomicOrdering AO,
6190 bool IsPostfixUpdate, const Expr *V,
6191 const Expr *X, const Expr *E,
6192 const Expr *UE, bool IsXLHSInRHSPart,
6193 SourceLocation Loc) {
6194 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
6195 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
6196 RValue NewVVal;
6197 LValue VLValue = CGF.EmitLValue(V);
6198 LValue XLValue = CGF.EmitLValue(X);
6199 RValue ExprRValue = CGF.EmitAnyExpr(E);
6200 QualType NewVValType;
6201 if (UE) {
6202 // 'x' is updated with some additional value.
6203 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
6204 "Update expr in 'atomic capture' must be a binary operator.");
6205 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
6206 // Update expressions are allowed to have the following forms:
6207 // x binop= expr; -> xrval + expr;
6208 // x++, ++x -> xrval + 1;
6209 // x--, --x -> xrval - 1;
6210 // x = x binop expr; -> xrval binop expr
6211 // x = expr Op x; - > expr binop xrval;
6212 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6213 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6214 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
6215 NewVValType = XRValExpr->getType();
6216 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
6217 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
6218 IsPostfixUpdate](RValue XRValue) {
6219 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6220 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6221 RValue Res = CGF.EmitAnyExpr(UE);
6222 NewVVal = IsPostfixUpdate ? XRValue : Res;
6223 return Res;
6224 };
6225 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
6226 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
6227 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6228 if (Res.first) {
6229 // 'atomicrmw' instruction was generated.
6230 if (IsPostfixUpdate) {
6231 // Use old value from 'atomicrmw'.
6232 NewVVal = Res.second;
6233 } else {
6234 // 'atomicrmw' does not provide new value, so evaluate it using old
6235 // value of 'x'.
6236 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6237 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
6238 NewVVal = CGF.EmitAnyExpr(UE);
6239 }
6240 }
6241 } else {
6242 // 'x' is simply rewritten with some 'expr'.
6243 NewVValType = X->getType().getNonReferenceType();
6244 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
6245 X->getType().getNonReferenceType(), Loc);
6246 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) {
6247 NewVVal = XRValue;
6248 return ExprRValue;
6249 };
6250 // Try to perform atomicrmw xchg, otherwise simple exchange.
6251 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
6252 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
6253 Loc, Gen);
6254 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6255 if (Res.first) {
6256 // 'atomicrmw' instruction was generated.
6257 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
6258 }
6259 }
6260 // Emit post-update store to 'v' of old/new 'x' value.
6261 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
6262 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
6263 // OpenMP 5.1 removes the required flush for capture clause.
6264 if (CGF.CGM.getLangOpts().OpenMP < 51) {
6265 // OpenMP, 2.17.7, atomic Construct
6266 // If the write, update, or capture clause is specified and the release,
6267 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
6268 // the atomic operation is also a release flush.
6269 // If the read or capture clause is specified and the acquire, acq_rel, or
6270 // seq_cst clause is specified then the strong flush on exit from the atomic
6271 // operation is also an acquire flush.
6272 switch (AO) {
6273 case llvm::AtomicOrdering::Release:
6274 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
6275 llvm::AtomicOrdering::Release);
6276 break;
6277 case llvm::AtomicOrdering::Acquire:
6278 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
6279 llvm::AtomicOrdering::Acquire);
6280 break;
6281 case llvm::AtomicOrdering::AcquireRelease:
6282 case llvm::AtomicOrdering::SequentiallyConsistent:
6283 CGF.CGM.getOpenMPRuntime().emitFlush(
6284 CGF, llvm::None, Loc, llvm::AtomicOrdering::AcquireRelease);
6285 break;
6286 case llvm::AtomicOrdering::Monotonic:
6287 break;
6288 case llvm::AtomicOrdering::NotAtomic:
6289 case llvm::AtomicOrdering::Unordered:
6290 llvm_unreachable("Unexpected ordering.");
6291 }
6292 }
6293 }
6294
emitOMPAtomicCompareExpr(CodeGenFunction & CGF,llvm::AtomicOrdering AO,const Expr * X,const Expr * V,const Expr * R,const Expr * E,const Expr * D,const Expr * CE,bool IsXBinopExpr,bool IsPostfixUpdate,bool IsFailOnly,SourceLocation Loc)6295 static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF,
6296 llvm::AtomicOrdering AO, const Expr *X,
6297 const Expr *V, const Expr *R,
6298 const Expr *E, const Expr *D,
6299 const Expr *CE, bool IsXBinopExpr,
6300 bool IsPostfixUpdate, bool IsFailOnly,
6301 SourceLocation Loc) {
6302 llvm::OpenMPIRBuilder &OMPBuilder =
6303 CGF.CGM.getOpenMPRuntime().getOMPBuilder();
6304
6305 OMPAtomicCompareOp Op;
6306 assert(isa<BinaryOperator>(CE) && "CE is not a BinaryOperator");
6307 switch (cast<BinaryOperator>(CE)->getOpcode()) {
6308 case BO_EQ:
6309 Op = OMPAtomicCompareOp::EQ;
6310 break;
6311 case BO_LT:
6312 Op = OMPAtomicCompareOp::MIN;
6313 break;
6314 case BO_GT:
6315 Op = OMPAtomicCompareOp::MAX;
6316 break;
6317 default:
6318 llvm_unreachable("unsupported atomic compare binary operator");
6319 }
6320
6321 LValue XLVal = CGF.EmitLValue(X);
6322 Address XAddr = XLVal.getAddress(CGF);
6323
6324 auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
6325 if (X->getType() == E->getType())
6326 return CGF.EmitScalarExpr(E);
6327 const Expr *NewE = E->IgnoreImplicitAsWritten();
6328 llvm::Value *V = CGF.EmitScalarExpr(NewE);
6329 if (NewE->getType() == X->getType())
6330 return V;
6331 return CGF.EmitScalarConversion(V, NewE->getType(), X->getType(), Loc);
6332 };
6333
6334 llvm::Value *EVal = EmitRValueWithCastIfNeeded(X, E);
6335 llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
6336 if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
6337 EVal = CGF.Builder.CreateIntCast(
6338 CI, XLVal.getAddress(CGF).getElementType(),
6339 E->getType()->hasSignedIntegerRepresentation());
6340 if (DVal)
6341 if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
6342 DVal = CGF.Builder.CreateIntCast(
6343 CI, XLVal.getAddress(CGF).getElementType(),
6344 D->getType()->hasSignedIntegerRepresentation());
6345
6346 llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
6347 XAddr.getPointer(), XAddr.getElementType(),
6348 X->getType()->hasSignedIntegerRepresentation(),
6349 X->getType().isVolatileQualified()};
6350 llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
6351 if (V) {
6352 LValue LV = CGF.EmitLValue(V);
6353 Address Addr = LV.getAddress(CGF);
6354 VOpVal = {Addr.getPointer(), Addr.getElementType(),
6355 V->getType()->hasSignedIntegerRepresentation(),
6356 V->getType().isVolatileQualified()};
6357 }
6358 if (R) {
6359 LValue LV = CGF.EmitLValue(R);
6360 Address Addr = LV.getAddress(CGF);
6361 ROpVal = {Addr.getPointer(), Addr.getElementType(),
6362 R->getType()->hasSignedIntegerRepresentation(),
6363 R->getType().isVolatileQualified()};
6364 }
6365
6366 CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
6367 CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
6368 IsPostfixUpdate, IsFailOnly));
6369 }
6370
emitOMPAtomicExpr(CodeGenFunction & CGF,OpenMPClauseKind Kind,llvm::AtomicOrdering AO,bool IsPostfixUpdate,const Expr * X,const Expr * V,const Expr * R,const Expr * E,const Expr * UE,const Expr * D,const Expr * CE,bool IsXLHSInRHSPart,bool IsFailOnly,SourceLocation Loc)6371 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
6372 llvm::AtomicOrdering AO, bool IsPostfixUpdate,
6373 const Expr *X, const Expr *V, const Expr *R,
6374 const Expr *E, const Expr *UE, const Expr *D,
6375 const Expr *CE, bool IsXLHSInRHSPart,
6376 bool IsFailOnly, SourceLocation Loc) {
6377 switch (Kind) {
6378 case OMPC_read:
6379 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
6380 break;
6381 case OMPC_write:
6382 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc);
6383 break;
6384 case OMPC_unknown:
6385 case OMPC_update:
6386 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc);
6387 break;
6388 case OMPC_capture:
6389 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
6390 IsXLHSInRHSPart, Loc);
6391 break;
6392 case OMPC_compare: {
6393 emitOMPAtomicCompareExpr(CGF, AO, X, V, R, E, D, CE, IsXLHSInRHSPart,
6394 IsPostfixUpdate, IsFailOnly, Loc);
6395 break;
6396 }
6397 case OMPC_if:
6398 case OMPC_final:
6399 case OMPC_num_threads:
6400 case OMPC_private:
6401 case OMPC_firstprivate:
6402 case OMPC_lastprivate:
6403 case OMPC_reduction:
6404 case OMPC_task_reduction:
6405 case OMPC_in_reduction:
6406 case OMPC_safelen:
6407 case OMPC_simdlen:
6408 case OMPC_sizes:
6409 case OMPC_full:
6410 case OMPC_partial:
6411 case OMPC_allocator:
6412 case OMPC_allocate:
6413 case OMPC_collapse:
6414 case OMPC_default:
6415 case OMPC_seq_cst:
6416 case OMPC_acq_rel:
6417 case OMPC_acquire:
6418 case OMPC_release:
6419 case OMPC_relaxed:
6420 case OMPC_shared:
6421 case OMPC_linear:
6422 case OMPC_aligned:
6423 case OMPC_copyin:
6424 case OMPC_copyprivate:
6425 case OMPC_flush:
6426 case OMPC_depobj:
6427 case OMPC_proc_bind:
6428 case OMPC_schedule:
6429 case OMPC_ordered:
6430 case OMPC_nowait:
6431 case OMPC_untied:
6432 case OMPC_threadprivate:
6433 case OMPC_depend:
6434 case OMPC_mergeable:
6435 case OMPC_device:
6436 case OMPC_threads:
6437 case OMPC_simd:
6438 case OMPC_map:
6439 case OMPC_num_teams:
6440 case OMPC_thread_limit:
6441 case OMPC_priority:
6442 case OMPC_grainsize:
6443 case OMPC_nogroup:
6444 case OMPC_num_tasks:
6445 case OMPC_hint:
6446 case OMPC_dist_schedule:
6447 case OMPC_defaultmap:
6448 case OMPC_uniform:
6449 case OMPC_to:
6450 case OMPC_from:
6451 case OMPC_use_device_ptr:
6452 case OMPC_use_device_addr:
6453 case OMPC_is_device_ptr:
6454 case OMPC_has_device_addr:
6455 case OMPC_unified_address:
6456 case OMPC_unified_shared_memory:
6457 case OMPC_reverse_offload:
6458 case OMPC_dynamic_allocators:
6459 case OMPC_atomic_default_mem_order:
6460 case OMPC_device_type:
6461 case OMPC_match:
6462 case OMPC_nontemporal:
6463 case OMPC_order:
6464 case OMPC_destroy:
6465 case OMPC_detach:
6466 case OMPC_inclusive:
6467 case OMPC_exclusive:
6468 case OMPC_uses_allocators:
6469 case OMPC_affinity:
6470 case OMPC_init:
6471 case OMPC_inbranch:
6472 case OMPC_notinbranch:
6473 case OMPC_link:
6474 case OMPC_indirect:
6475 case OMPC_use:
6476 case OMPC_novariants:
6477 case OMPC_nocontext:
6478 case OMPC_filter:
6479 case OMPC_when:
6480 case OMPC_adjust_args:
6481 case OMPC_append_args:
6482 case OMPC_memory_order:
6483 case OMPC_bind:
6484 case OMPC_align:
6485 case OMPC_cancellation_construct_type:
6486 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
6487 }
6488 }
6489
EmitOMPAtomicDirective(const OMPAtomicDirective & S)6490 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
6491 llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic;
6492 bool MemOrderingSpecified = false;
6493 if (S.getSingleClause<OMPSeqCstClause>()) {
6494 AO = llvm::AtomicOrdering::SequentiallyConsistent;
6495 MemOrderingSpecified = true;
6496 } else if (S.getSingleClause<OMPAcqRelClause>()) {
6497 AO = llvm::AtomicOrdering::AcquireRelease;
6498 MemOrderingSpecified = true;
6499 } else if (S.getSingleClause<OMPAcquireClause>()) {
6500 AO = llvm::AtomicOrdering::Acquire;
6501 MemOrderingSpecified = true;
6502 } else if (S.getSingleClause<OMPReleaseClause>()) {
6503 AO = llvm::AtomicOrdering::Release;
6504 MemOrderingSpecified = true;
6505 } else if (S.getSingleClause<OMPRelaxedClause>()) {
6506 AO = llvm::AtomicOrdering::Monotonic;
6507 MemOrderingSpecified = true;
6508 }
6509 llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered;
6510 OpenMPClauseKind Kind = OMPC_unknown;
6511 for (const OMPClause *C : S.clauses()) {
6512 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
6513 // if it is first).
6514 OpenMPClauseKind K = C->getClauseKind();
6515 if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
6516 K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
6517 continue;
6518 Kind = K;
6519 KindsEncountered.insert(K);
6520 }
6521 // We just need to correct Kind here. No need to set a bool saying it is
6522 // actually compare capture because we can tell from whether V and R are
6523 // nullptr.
6524 if (KindsEncountered.contains(OMPC_compare) &&
6525 KindsEncountered.contains(OMPC_capture))
6526 Kind = OMPC_compare;
6527 if (!MemOrderingSpecified) {
6528 llvm::AtomicOrdering DefaultOrder =
6529 CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
6530 if (DefaultOrder == llvm::AtomicOrdering::Monotonic ||
6531 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent ||
6532 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease &&
6533 Kind == OMPC_capture)) {
6534 AO = DefaultOrder;
6535 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) {
6536 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) {
6537 AO = llvm::AtomicOrdering::Release;
6538 } else if (Kind == OMPC_read) {
6539 assert(Kind == OMPC_read && "Unexpected atomic kind.");
6540 AO = llvm::AtomicOrdering::Acquire;
6541 }
6542 }
6543 }
6544
6545 LexicalScope Scope(*this, S.getSourceRange());
6546 EmitStopPoint(S.getAssociatedStmt());
6547 emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
6548 S.getR(), S.getExpr(), S.getUpdateExpr(), S.getD(),
6549 S.getCondExpr(), S.isXLHSInRHSPart(), S.isFailOnly(),
6550 S.getBeginLoc());
6551 }
6552
emitCommonOMPTargetDirective(CodeGenFunction & CGF,const OMPExecutableDirective & S,const RegionCodeGenTy & CodeGen)6553 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
6554 const OMPExecutableDirective &S,
6555 const RegionCodeGenTy &CodeGen) {
6556 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
6557 CodeGenModule &CGM = CGF.CGM;
6558
6559 // On device emit this construct as inlined code.
6560 if (CGM.getLangOpts().OpenMPIsDevice) {
6561 OMPLexicalScope Scope(CGF, S, OMPD_target);
6562 CGM.getOpenMPRuntime().emitInlinedDirective(
6563 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6564 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
6565 });
6566 return;
6567 }
6568
6569 auto LPCRegion = CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
6570 llvm::Function *Fn = nullptr;
6571 llvm::Constant *FnID = nullptr;
6572
6573 const Expr *IfCond = nullptr;
6574 // Check for the at most one if clause associated with the target region.
6575 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
6576 if (C->getNameModifier() == OMPD_unknown ||
6577 C->getNameModifier() == OMPD_target) {
6578 IfCond = C->getCondition();
6579 break;
6580 }
6581 }
6582
6583 // Check if we have any device clause associated with the directive.
6584 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device(
6585 nullptr, OMPC_DEVICE_unknown);
6586 if (auto *C = S.getSingleClause<OMPDeviceClause>())
6587 Device.setPointerAndInt(C->getDevice(), C->getModifier());
6588
6589 // Check if we have an if clause whose conditional always evaluates to false
6590 // or if we do not have any targets specified. If so the target region is not
6591 // an offload entry point.
6592 bool IsOffloadEntry = true;
6593 if (IfCond) {
6594 bool Val;
6595 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
6596 IsOffloadEntry = false;
6597 }
6598 if (CGM.getLangOpts().OMPTargetTriples.empty())
6599 IsOffloadEntry = false;
6600
6601 if (CGM.getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) {
6602 unsigned DiagID = CGM.getDiags().getCustomDiagID(
6603 DiagnosticsEngine::Error,
6604 "No offloading entry generated while offloading is mandatory.");
6605 CGM.getDiags().Report(DiagID);
6606 }
6607
6608 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
6609 StringRef ParentName;
6610 // In case we have Ctors/Dtors we use the complete type variant to produce
6611 // the mangling of the device outlined kernel.
6612 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
6613 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
6614 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
6615 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
6616 else
6617 ParentName =
6618 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
6619
6620 // Emit target region as a standalone region.
6621 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
6622 IsOffloadEntry, CodeGen);
6623 OMPLexicalScope Scope(CGF, S, OMPD_task);
6624 auto &&SizeEmitter =
6625 [IsOffloadEntry](CodeGenFunction &CGF,
6626 const OMPLoopDirective &D) -> llvm::Value * {
6627 if (IsOffloadEntry) {
6628 OMPLoopScope(CGF, D);
6629 // Emit calculation of the iterations count.
6630 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
6631 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
6632 /*isSigned=*/false);
6633 return NumIterations;
6634 }
6635 return nullptr;
6636 };
6637 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
6638 SizeEmitter);
6639 }
6640
emitTargetRegion(CodeGenFunction & CGF,const OMPTargetDirective & S,PrePostActionTy & Action)6641 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
6642 PrePostActionTy &Action) {
6643 Action.Enter(CGF);
6644 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6645 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6646 CGF.EmitOMPPrivateClause(S, PrivateScope);
6647 (void)PrivateScope.Privatize();
6648 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
6649 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
6650
6651 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
6652 CGF.EnsureInsertPoint();
6653 }
6654
EmitOMPTargetDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetDirective & S)6655 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
6656 StringRef ParentName,
6657 const OMPTargetDirective &S) {
6658 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6659 emitTargetRegion(CGF, S, Action);
6660 };
6661 llvm::Function *Fn;
6662 llvm::Constant *Addr;
6663 // Emit target region as a standalone region.
6664 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6665 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6666 assert(Fn && Addr && "Target device function emission failed.");
6667 }
6668
EmitOMPTargetDirective(const OMPTargetDirective & S)6669 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
6670 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6671 emitTargetRegion(CGF, S, Action);
6672 };
6673 emitCommonOMPTargetDirective(*this, S, CodeGen);
6674 }
6675
emitCommonOMPTeamsDirective(CodeGenFunction & CGF,const OMPExecutableDirective & S,OpenMPDirectiveKind InnermostKind,const RegionCodeGenTy & CodeGen)6676 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
6677 const OMPExecutableDirective &S,
6678 OpenMPDirectiveKind InnermostKind,
6679 const RegionCodeGenTy &CodeGen) {
6680 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
6681 llvm::Function *OutlinedFn =
6682 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
6683 S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
6684
6685 const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
6686 const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
6687 if (NT || TL) {
6688 const Expr *NumTeams = NT ? NT->getNumTeams() : nullptr;
6689 const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
6690
6691 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
6692 S.getBeginLoc());
6693 }
6694
6695 OMPTeamsScope Scope(CGF, S);
6696 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
6697 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
6698 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
6699 CapturedVars);
6700 }
6701
EmitOMPTeamsDirective(const OMPTeamsDirective & S)6702 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
6703 // Emit teams region as a standalone region.
6704 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6705 Action.Enter(CGF);
6706 OMPPrivateScope PrivateScope(CGF);
6707 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6708 CGF.EmitOMPPrivateClause(S, PrivateScope);
6709 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6710 (void)PrivateScope.Privatize();
6711 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
6712 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6713 };
6714 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
6715 emitPostUpdateForReductionClause(*this, S,
6716 [](CodeGenFunction &) { return nullptr; });
6717 }
6718
emitTargetTeamsRegion(CodeGenFunction & CGF,PrePostActionTy & Action,const OMPTargetTeamsDirective & S)6719 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
6720 const OMPTargetTeamsDirective &S) {
6721 auto *CS = S.getCapturedStmt(OMPD_teams);
6722 Action.Enter(CGF);
6723 // Emit teams region as a standalone region.
6724 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
6725 Action.Enter(CGF);
6726 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6727 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6728 CGF.EmitOMPPrivateClause(S, PrivateScope);
6729 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6730 (void)PrivateScope.Privatize();
6731 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
6732 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
6733 CGF.EmitStmt(CS->getCapturedStmt());
6734 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6735 };
6736 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
6737 emitPostUpdateForReductionClause(CGF, S,
6738 [](CodeGenFunction &) { return nullptr; });
6739 }
6740
EmitOMPTargetTeamsDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetTeamsDirective & S)6741 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
6742 CodeGenModule &CGM, StringRef ParentName,
6743 const OMPTargetTeamsDirective &S) {
6744 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6745 emitTargetTeamsRegion(CGF, Action, S);
6746 };
6747 llvm::Function *Fn;
6748 llvm::Constant *Addr;
6749 // Emit target region as a standalone region.
6750 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6751 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6752 assert(Fn && Addr && "Target device function emission failed.");
6753 }
6754
EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective & S)6755 void CodeGenFunction::EmitOMPTargetTeamsDirective(
6756 const OMPTargetTeamsDirective &S) {
6757 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6758 emitTargetTeamsRegion(CGF, Action, S);
6759 };
6760 emitCommonOMPTargetDirective(*this, S, CodeGen);
6761 }
6762
6763 static void
emitTargetTeamsDistributeRegion(CodeGenFunction & CGF,PrePostActionTy & Action,const OMPTargetTeamsDistributeDirective & S)6764 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
6765 const OMPTargetTeamsDistributeDirective &S) {
6766 Action.Enter(CGF);
6767 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6768 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
6769 };
6770
6771 // Emit teams region as a standalone region.
6772 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6773 PrePostActionTy &Action) {
6774 Action.Enter(CGF);
6775 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6776 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6777 (void)PrivateScope.Privatize();
6778 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
6779 CodeGenDistribute);
6780 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6781 };
6782 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
6783 emitPostUpdateForReductionClause(CGF, S,
6784 [](CodeGenFunction &) { return nullptr; });
6785 }
6786
EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetTeamsDistributeDirective & S)6787 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
6788 CodeGenModule &CGM, StringRef ParentName,
6789 const OMPTargetTeamsDistributeDirective &S) {
6790 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6791 emitTargetTeamsDistributeRegion(CGF, Action, S);
6792 };
6793 llvm::Function *Fn;
6794 llvm::Constant *Addr;
6795 // Emit target region as a standalone region.
6796 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6797 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6798 assert(Fn && Addr && "Target device function emission failed.");
6799 }
6800
EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective & S)6801 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
6802 const OMPTargetTeamsDistributeDirective &S) {
6803 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6804 emitTargetTeamsDistributeRegion(CGF, Action, S);
6805 };
6806 emitCommonOMPTargetDirective(*this, S, CodeGen);
6807 }
6808
emitTargetTeamsDistributeSimdRegion(CodeGenFunction & CGF,PrePostActionTy & Action,const OMPTargetTeamsDistributeSimdDirective & S)6809 static void emitTargetTeamsDistributeSimdRegion(
6810 CodeGenFunction &CGF, PrePostActionTy &Action,
6811 const OMPTargetTeamsDistributeSimdDirective &S) {
6812 Action.Enter(CGF);
6813 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6814 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
6815 };
6816
6817 // Emit teams region as a standalone region.
6818 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6819 PrePostActionTy &Action) {
6820 Action.Enter(CGF);
6821 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6822 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6823 (void)PrivateScope.Privatize();
6824 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
6825 CodeGenDistribute);
6826 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6827 };
6828 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen);
6829 emitPostUpdateForReductionClause(CGF, S,
6830 [](CodeGenFunction &) { return nullptr; });
6831 }
6832
EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetTeamsDistributeSimdDirective & S)6833 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
6834 CodeGenModule &CGM, StringRef ParentName,
6835 const OMPTargetTeamsDistributeSimdDirective &S) {
6836 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6837 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
6838 };
6839 llvm::Function *Fn;
6840 llvm::Constant *Addr;
6841 // Emit target region as a standalone region.
6842 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6843 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6844 assert(Fn && Addr && "Target device function emission failed.");
6845 }
6846
EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective & S)6847 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
6848 const OMPTargetTeamsDistributeSimdDirective &S) {
6849 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6850 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
6851 };
6852 emitCommonOMPTargetDirective(*this, S, CodeGen);
6853 }
6854
EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective & S)6855 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
6856 const OMPTeamsDistributeDirective &S) {
6857
6858 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6859 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
6860 };
6861
6862 // Emit teams region as a standalone region.
6863 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6864 PrePostActionTy &Action) {
6865 Action.Enter(CGF);
6866 OMPPrivateScope PrivateScope(CGF);
6867 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6868 (void)PrivateScope.Privatize();
6869 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
6870 CodeGenDistribute);
6871 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6872 };
6873 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
6874 emitPostUpdateForReductionClause(*this, S,
6875 [](CodeGenFunction &) { return nullptr; });
6876 }
6877
EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective & S)6878 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
6879 const OMPTeamsDistributeSimdDirective &S) {
6880 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6881 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
6882 };
6883
6884 // Emit teams region as a standalone region.
6885 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6886 PrePostActionTy &Action) {
6887 Action.Enter(CGF);
6888 OMPPrivateScope PrivateScope(CGF);
6889 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6890 (void)PrivateScope.Privatize();
6891 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
6892 CodeGenDistribute);
6893 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6894 };
6895 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen);
6896 emitPostUpdateForReductionClause(*this, S,
6897 [](CodeGenFunction &) { return nullptr; });
6898 }
6899
EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective & S)6900 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
6901 const OMPTeamsDistributeParallelForDirective &S) {
6902 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6903 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
6904 S.getDistInc());
6905 };
6906
6907 // Emit teams region as a standalone region.
6908 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6909 PrePostActionTy &Action) {
6910 Action.Enter(CGF);
6911 OMPPrivateScope PrivateScope(CGF);
6912 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6913 (void)PrivateScope.Privatize();
6914 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
6915 CodeGenDistribute);
6916 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6917 };
6918 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
6919 emitPostUpdateForReductionClause(*this, S,
6920 [](CodeGenFunction &) { return nullptr; });
6921 }
6922
EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective & S)6923 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
6924 const OMPTeamsDistributeParallelForSimdDirective &S) {
6925 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6926 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
6927 S.getDistInc());
6928 };
6929
6930 // Emit teams region as a standalone region.
6931 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
6932 PrePostActionTy &Action) {
6933 Action.Enter(CGF);
6934 OMPPrivateScope PrivateScope(CGF);
6935 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6936 (void)PrivateScope.Privatize();
6937 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
6938 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
6939 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6940 };
6941 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd,
6942 CodeGen);
6943 emitPostUpdateForReductionClause(*this, S,
6944 [](CodeGenFunction &) { return nullptr; });
6945 }
6946
EmitOMPInteropDirective(const OMPInteropDirective & S)6947 void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
6948 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
6949 llvm::Value *Device = nullptr;
6950 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
6951 Device = EmitScalarExpr(C->getDevice());
6952
6953 llvm::Value *NumDependences = nullptr;
6954 llvm::Value *DependenceAddress = nullptr;
6955 if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
6956 OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
6957 DC->getModifier());
6958 Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
6959 std::pair<llvm::Value *, Address> DependencePair =
6960 CGM.getOpenMPRuntime().emitDependClause(*this, Dependencies,
6961 DC->getBeginLoc());
6962 NumDependences = DependencePair.first;
6963 DependenceAddress = Builder.CreatePointerCast(
6964 DependencePair.second.getPointer(), CGM.Int8PtrTy);
6965 }
6966
6967 assert(!(S.hasClausesOfKind<OMPNowaitClause>() &&
6968 !(S.getSingleClause<OMPInitClause>() ||
6969 S.getSingleClause<OMPDestroyClause>() ||
6970 S.getSingleClause<OMPUseClause>())) &&
6971 "OMPNowaitClause clause is used separately in OMPInteropDirective.");
6972
6973 if (const auto *C = S.getSingleClause<OMPInitClause>()) {
6974 llvm::Value *InteropvarPtr =
6975 EmitLValue(C->getInteropVar()).getPointer(*this);
6976 llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
6977 if (C->getIsTarget()) {
6978 InteropType = llvm::omp::OMPInteropType::Target;
6979 } else {
6980 assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
6981 InteropType = llvm::omp::OMPInteropType::TargetSync;
6982 }
6983 OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
6984 NumDependences, DependenceAddress,
6985 S.hasClausesOfKind<OMPNowaitClause>());
6986 } else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
6987 llvm::Value *InteropvarPtr =
6988 EmitLValue(C->getInteropVar()).getPointer(*this);
6989 OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
6990 NumDependences, DependenceAddress,
6991 S.hasClausesOfKind<OMPNowaitClause>());
6992 } else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
6993 llvm::Value *InteropvarPtr =
6994 EmitLValue(C->getInteropVar()).getPointer(*this);
6995 OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
6996 NumDependences, DependenceAddress,
6997 S.hasClausesOfKind<OMPNowaitClause>());
6998 }
6999 }
7000
emitTargetTeamsDistributeParallelForRegion(CodeGenFunction & CGF,const OMPTargetTeamsDistributeParallelForDirective & S,PrePostActionTy & Action)7001 static void emitTargetTeamsDistributeParallelForRegion(
7002 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
7003 PrePostActionTy &Action) {
7004 Action.Enter(CGF);
7005 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7006 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7007 S.getDistInc());
7008 };
7009
7010 // Emit teams region as a standalone region.
7011 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7012 PrePostActionTy &Action) {
7013 Action.Enter(CGF);
7014 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7015 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7016 (void)PrivateScope.Privatize();
7017 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7018 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
7019 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7020 };
7021
7022 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
7023 CodeGenTeams);
7024 emitPostUpdateForReductionClause(CGF, S,
7025 [](CodeGenFunction &) { return nullptr; });
7026 }
7027
EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetTeamsDistributeParallelForDirective & S)7028 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7029 CodeGenModule &CGM, StringRef ParentName,
7030 const OMPTargetTeamsDistributeParallelForDirective &S) {
7031 // Emit SPMD target teams distribute parallel for region as a standalone
7032 // region.
7033 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7034 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
7035 };
7036 llvm::Function *Fn;
7037 llvm::Constant *Addr;
7038 // Emit target region as a standalone region.
7039 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7040 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7041 assert(Fn && Addr && "Target device function emission failed.");
7042 }
7043
EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective & S)7044 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
7045 const OMPTargetTeamsDistributeParallelForDirective &S) {
7046 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7047 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
7048 };
7049 emitCommonOMPTargetDirective(*this, S, CodeGen);
7050 }
7051
emitTargetTeamsDistributeParallelForSimdRegion(CodeGenFunction & CGF,const OMPTargetTeamsDistributeParallelForSimdDirective & S,PrePostActionTy & Action)7052 static void emitTargetTeamsDistributeParallelForSimdRegion(
7053 CodeGenFunction &CGF,
7054 const OMPTargetTeamsDistributeParallelForSimdDirective &S,
7055 PrePostActionTy &Action) {
7056 Action.Enter(CGF);
7057 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7058 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7059 S.getDistInc());
7060 };
7061
7062 // Emit teams region as a standalone region.
7063 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7064 PrePostActionTy &Action) {
7065 Action.Enter(CGF);
7066 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7067 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7068 (void)PrivateScope.Privatize();
7069 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7070 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
7071 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7072 };
7073
7074 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd,
7075 CodeGenTeams);
7076 emitPostUpdateForReductionClause(CGF, S,
7077 [](CodeGenFunction &) { return nullptr; });
7078 }
7079
EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetTeamsDistributeParallelForSimdDirective & S)7080 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7081 CodeGenModule &CGM, StringRef ParentName,
7082 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
7083 // Emit SPMD target teams distribute parallel for simd region as a standalone
7084 // region.
7085 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7086 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
7087 };
7088 llvm::Function *Fn;
7089 llvm::Constant *Addr;
7090 // Emit target region as a standalone region.
7091 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7092 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7093 assert(Fn && Addr && "Target device function emission failed.");
7094 }
7095
EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective & S)7096 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
7097 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
7098 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7099 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
7100 };
7101 emitCommonOMPTargetDirective(*this, S, CodeGen);
7102 }
7103
EmitOMPCancellationPointDirective(const OMPCancellationPointDirective & S)7104 void CodeGenFunction::EmitOMPCancellationPointDirective(
7105 const OMPCancellationPointDirective &S) {
7106 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(),
7107 S.getCancelRegion());
7108 }
7109
EmitOMPCancelDirective(const OMPCancelDirective & S)7110 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
7111 const Expr *IfCond = nullptr;
7112 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
7113 if (C->getNameModifier() == OMPD_unknown ||
7114 C->getNameModifier() == OMPD_cancel) {
7115 IfCond = C->getCondition();
7116 break;
7117 }
7118 }
7119 if (CGM.getLangOpts().OpenMPIRBuilder) {
7120 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
7121 // TODO: This check is necessary as we only generate `omp parallel` through
7122 // the OpenMPIRBuilder for now.
7123 if (S.getCancelRegion() == OMPD_parallel ||
7124 S.getCancelRegion() == OMPD_sections ||
7125 S.getCancelRegion() == OMPD_section) {
7126 llvm::Value *IfCondition = nullptr;
7127 if (IfCond)
7128 IfCondition = EmitScalarExpr(IfCond,
7129 /*IgnoreResultAssign=*/true);
7130 return Builder.restoreIP(
7131 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion()));
7132 }
7133 }
7134
7135 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond,
7136 S.getCancelRegion());
7137 }
7138
7139 CodeGenFunction::JumpDest
getOMPCancelDestination(OpenMPDirectiveKind Kind)7140 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
7141 if (Kind == OMPD_parallel || Kind == OMPD_task ||
7142 Kind == OMPD_target_parallel || Kind == OMPD_taskloop ||
7143 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop)
7144 return ReturnBlock;
7145 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
7146 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
7147 Kind == OMPD_distribute_parallel_for ||
7148 Kind == OMPD_target_parallel_for ||
7149 Kind == OMPD_teams_distribute_parallel_for ||
7150 Kind == OMPD_target_teams_distribute_parallel_for);
7151 return OMPCancelStack.getExitBlock();
7152 }
7153
EmitOMPUseDevicePtrClause(const OMPUseDevicePtrClause & C,OMPPrivateScope & PrivateScope,const llvm::DenseMap<const ValueDecl *,Address> & CaptureDeviceAddrMap)7154 void CodeGenFunction::EmitOMPUseDevicePtrClause(
7155 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
7156 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
7157 auto OrigVarIt = C.varlist_begin();
7158 auto InitIt = C.inits().begin();
7159 for (const Expr *PvtVarIt : C.private_copies()) {
7160 const auto *OrigVD =
7161 cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
7162 const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
7163 const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
7164
7165 // In order to identify the right initializer we need to match the
7166 // declaration used by the mapping logic. In some cases we may get
7167 // OMPCapturedExprDecl that refers to the original declaration.
7168 const ValueDecl *MatchingVD = OrigVD;
7169 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7170 // OMPCapturedExprDecl are used to privative fields of the current
7171 // structure.
7172 const auto *ME = cast<MemberExpr>(OED->getInit());
7173 assert(isa<CXXThisExpr>(ME->getBase()) &&
7174 "Base should be the current struct!");
7175 MatchingVD = ME->getMemberDecl();
7176 }
7177
7178 // If we don't have information about the current list item, move on to
7179 // the next one.
7180 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7181 if (InitAddrIt == CaptureDeviceAddrMap.end())
7182 continue;
7183
7184 // Initialize the temporary initialization variable with the address
7185 // we get from the runtime library. We have to cast the source address
7186 // because it is always a void *. References are materialized in the
7187 // privatization scope, so the initialization here disregards the fact
7188 // the original variable is a reference.
7189 llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
7190 Address InitAddr = Builder.CreateElementBitCast(InitAddrIt->second, Ty);
7191 setAddrOfLocalVar(InitVD, InitAddr);
7192
7193 // Emit private declaration, it will be initialized by the value we
7194 // declaration we just added to the local declarations map.
7195 EmitDecl(*PvtVD);
7196
7197 // The initialization variables reached its purpose in the emission
7198 // of the previous declaration, so we don't need it anymore.
7199 LocalDeclMap.erase(InitVD);
7200
7201 // Return the address of the private variable.
7202 bool IsRegistered =
7203 PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(PvtVD));
7204 assert(IsRegistered && "firstprivate var already registered as private");
7205 // Silence the warning about unused variable.
7206 (void)IsRegistered;
7207
7208 ++OrigVarIt;
7209 ++InitIt;
7210 }
7211 }
7212
getBaseDecl(const Expr * Ref)7213 static const VarDecl *getBaseDecl(const Expr *Ref) {
7214 const Expr *Base = Ref->IgnoreParenImpCasts();
7215 while (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Base))
7216 Base = OASE->getBase()->IgnoreParenImpCasts();
7217 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base))
7218 Base = ASE->getBase()->IgnoreParenImpCasts();
7219 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl());
7220 }
7221
EmitOMPUseDeviceAddrClause(const OMPUseDeviceAddrClause & C,OMPPrivateScope & PrivateScope,const llvm::DenseMap<const ValueDecl *,Address> & CaptureDeviceAddrMap)7222 void CodeGenFunction::EmitOMPUseDeviceAddrClause(
7223 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
7224 const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
7225 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
7226 for (const Expr *Ref : C.varlists()) {
7227 const VarDecl *OrigVD = getBaseDecl(Ref);
7228 if (!Processed.insert(OrigVD).second)
7229 continue;
7230 // In order to identify the right initializer we need to match the
7231 // declaration used by the mapping logic. In some cases we may get
7232 // OMPCapturedExprDecl that refers to the original declaration.
7233 const ValueDecl *MatchingVD = OrigVD;
7234 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7235 // OMPCapturedExprDecl are used to privative fields of the current
7236 // structure.
7237 const auto *ME = cast<MemberExpr>(OED->getInit());
7238 assert(isa<CXXThisExpr>(ME->getBase()) &&
7239 "Base should be the current struct!");
7240 MatchingVD = ME->getMemberDecl();
7241 }
7242
7243 // If we don't have information about the current list item, move on to
7244 // the next one.
7245 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7246 if (InitAddrIt == CaptureDeviceAddrMap.end())
7247 continue;
7248
7249 Address PrivAddr = InitAddrIt->getSecond();
7250 // For declrefs and variable length array need to load the pointer for
7251 // correct mapping, since the pointer to the data was passed to the runtime.
7252 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
7253 MatchingVD->getType()->isArrayType()) {
7254 QualType PtrTy = getContext().getPointerType(
7255 OrigVD->getType().getNonReferenceType());
7256 PrivAddr = EmitLoadOfPointer(
7257 Builder.CreateElementBitCast(PrivAddr, ConvertTypeForMem(PtrTy)),
7258 PtrTy->castAs<PointerType>());
7259 }
7260
7261 (void)PrivateScope.addPrivate(OrigVD, PrivAddr);
7262 }
7263 }
7264
7265 // Generate the instructions for '#pragma omp target data' directive.
EmitOMPTargetDataDirective(const OMPTargetDataDirective & S)7266 void CodeGenFunction::EmitOMPTargetDataDirective(
7267 const OMPTargetDataDirective &S) {
7268 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true,
7269 /*SeparateBeginEndCalls=*/true);
7270
7271 // Create a pre/post action to signal the privatization of the device pointer.
7272 // This action can be replaced by the OpenMP runtime code generation to
7273 // deactivate privatization.
7274 bool PrivatizeDevicePointers = false;
7275 class DevicePointerPrivActionTy : public PrePostActionTy {
7276 bool &PrivatizeDevicePointers;
7277
7278 public:
7279 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
7280 : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
7281 void Enter(CodeGenFunction &CGF) override {
7282 PrivatizeDevicePointers = true;
7283 }
7284 };
7285 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
7286
7287 auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
7288 CodeGenFunction &CGF, PrePostActionTy &Action) {
7289 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7290 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
7291 };
7292
7293 // Codegen that selects whether to generate the privatization code or not.
7294 auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
7295 &InnermostCodeGen](CodeGenFunction &CGF,
7296 PrePostActionTy &Action) {
7297 RegionCodeGenTy RCG(InnermostCodeGen);
7298 PrivatizeDevicePointers = false;
7299
7300 // Call the pre-action to change the status of PrivatizeDevicePointers if
7301 // needed.
7302 Action.Enter(CGF);
7303
7304 if (PrivatizeDevicePointers) {
7305 OMPPrivateScope PrivateScope(CGF);
7306 // Emit all instances of the use_device_ptr clause.
7307 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
7308 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
7309 Info.CaptureDeviceAddrMap);
7310 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
7311 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope,
7312 Info.CaptureDeviceAddrMap);
7313 (void)PrivateScope.Privatize();
7314 RCG(CGF);
7315 } else {
7316 OMPLexicalScope Scope(CGF, S, OMPD_unknown);
7317 RCG(CGF);
7318 }
7319 };
7320
7321 // Forward the provided action to the privatization codegen.
7322 RegionCodeGenTy PrivRCG(PrivCodeGen);
7323 PrivRCG.setAction(Action);
7324
7325 // Notwithstanding the body of the region is emitted as inlined directive,
7326 // we don't use an inline scope as changes in the references inside the
7327 // region are expected to be visible outside, so we do not privative them.
7328 OMPLexicalScope Scope(CGF, S);
7329 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
7330 PrivRCG);
7331 };
7332
7333 RegionCodeGenTy RCG(CodeGen);
7334
7335 // If we don't have target devices, don't bother emitting the data mapping
7336 // code.
7337 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
7338 RCG(*this);
7339 return;
7340 }
7341
7342 // Check if we have any if clause associated with the directive.
7343 const Expr *IfCond = nullptr;
7344 if (const auto *C = S.getSingleClause<OMPIfClause>())
7345 IfCond = C->getCondition();
7346
7347 // Check if we have any device clause associated with the directive.
7348 const Expr *Device = nullptr;
7349 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7350 Device = C->getDevice();
7351
7352 // Set the action to signal privatization of device pointers.
7353 RCG.setAction(PrivAction);
7354
7355 // Emit region code.
7356 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
7357 Info);
7358 }
7359
EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective & S)7360 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
7361 const OMPTargetEnterDataDirective &S) {
7362 // If we don't have target devices, don't bother emitting the data mapping
7363 // code.
7364 if (CGM.getLangOpts().OMPTargetTriples.empty())
7365 return;
7366
7367 // Check if we have any if clause associated with the directive.
7368 const Expr *IfCond = nullptr;
7369 if (const auto *C = S.getSingleClause<OMPIfClause>())
7370 IfCond = C->getCondition();
7371
7372 // Check if we have any device clause associated with the directive.
7373 const Expr *Device = nullptr;
7374 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7375 Device = C->getDevice();
7376
7377 OMPLexicalScope Scope(*this, S, OMPD_task);
7378 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
7379 }
7380
EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective & S)7381 void CodeGenFunction::EmitOMPTargetExitDataDirective(
7382 const OMPTargetExitDataDirective &S) {
7383 // If we don't have target devices, don't bother emitting the data mapping
7384 // code.
7385 if (CGM.getLangOpts().OMPTargetTriples.empty())
7386 return;
7387
7388 // Check if we have any if clause associated with the directive.
7389 const Expr *IfCond = nullptr;
7390 if (const auto *C = S.getSingleClause<OMPIfClause>())
7391 IfCond = C->getCondition();
7392
7393 // Check if we have any device clause associated with the directive.
7394 const Expr *Device = nullptr;
7395 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7396 Device = C->getDevice();
7397
7398 OMPLexicalScope Scope(*this, S, OMPD_task);
7399 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
7400 }
7401
emitTargetParallelRegion(CodeGenFunction & CGF,const OMPTargetParallelDirective & S,PrePostActionTy & Action)7402 static void emitTargetParallelRegion(CodeGenFunction &CGF,
7403 const OMPTargetParallelDirective &S,
7404 PrePostActionTy &Action) {
7405 // Get the captured statement associated with the 'parallel' region.
7406 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
7407 Action.Enter(CGF);
7408 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
7409 Action.Enter(CGF);
7410 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7411 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
7412 CGF.EmitOMPPrivateClause(S, PrivateScope);
7413 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7414 (void)PrivateScope.Privatize();
7415 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
7416 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
7417 // TODO: Add support for clauses.
7418 CGF.EmitStmt(CS->getCapturedStmt());
7419 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
7420 };
7421 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
7422 emitEmptyBoundParameters);
7423 emitPostUpdateForReductionClause(CGF, S,
7424 [](CodeGenFunction &) { return nullptr; });
7425 }
7426
EmitOMPTargetParallelDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetParallelDirective & S)7427 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7428 CodeGenModule &CGM, StringRef ParentName,
7429 const OMPTargetParallelDirective &S) {
7430 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7431 emitTargetParallelRegion(CGF, S, Action);
7432 };
7433 llvm::Function *Fn;
7434 llvm::Constant *Addr;
7435 // Emit target region as a standalone region.
7436 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7437 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7438 assert(Fn && Addr && "Target device function emission failed.");
7439 }
7440
EmitOMPTargetParallelDirective(const OMPTargetParallelDirective & S)7441 void CodeGenFunction::EmitOMPTargetParallelDirective(
7442 const OMPTargetParallelDirective &S) {
7443 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7444 emitTargetParallelRegion(CGF, S, Action);
7445 };
7446 emitCommonOMPTargetDirective(*this, S, CodeGen);
7447 }
7448
emitTargetParallelForRegion(CodeGenFunction & CGF,const OMPTargetParallelForDirective & S,PrePostActionTy & Action)7449 static void emitTargetParallelForRegion(CodeGenFunction &CGF,
7450 const OMPTargetParallelForDirective &S,
7451 PrePostActionTy &Action) {
7452 Action.Enter(CGF);
7453 // Emit directive as a combined directive that consists of two implicit
7454 // directives: 'parallel' with 'for' directive.
7455 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7456 Action.Enter(CGF);
7457 CodeGenFunction::OMPCancelStackRAII CancelRegion(
7458 CGF, OMPD_target_parallel_for, S.hasCancel());
7459 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
7460 emitDispatchForLoopBounds);
7461 };
7462 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
7463 emitEmptyBoundParameters);
7464 }
7465
EmitOMPTargetParallelForDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetParallelForDirective & S)7466 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7467 CodeGenModule &CGM, StringRef ParentName,
7468 const OMPTargetParallelForDirective &S) {
7469 // Emit SPMD target parallel for region as a standalone region.
7470 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7471 emitTargetParallelForRegion(CGF, S, Action);
7472 };
7473 llvm::Function *Fn;
7474 llvm::Constant *Addr;
7475 // Emit target region as a standalone region.
7476 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7477 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7478 assert(Fn && Addr && "Target device function emission failed.");
7479 }
7480
EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective & S)7481 void CodeGenFunction::EmitOMPTargetParallelForDirective(
7482 const OMPTargetParallelForDirective &S) {
7483 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7484 emitTargetParallelForRegion(CGF, S, Action);
7485 };
7486 emitCommonOMPTargetDirective(*this, S, CodeGen);
7487 }
7488
7489 static void
emitTargetParallelForSimdRegion(CodeGenFunction & CGF,const OMPTargetParallelForSimdDirective & S,PrePostActionTy & Action)7490 emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
7491 const OMPTargetParallelForSimdDirective &S,
7492 PrePostActionTy &Action) {
7493 Action.Enter(CGF);
7494 // Emit directive as a combined directive that consists of two implicit
7495 // directives: 'parallel' with 'for' directive.
7496 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7497 Action.Enter(CGF);
7498 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
7499 emitDispatchForLoopBounds);
7500 };
7501 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen,
7502 emitEmptyBoundParameters);
7503 }
7504
EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule & CGM,StringRef ParentName,const OMPTargetParallelForSimdDirective & S)7505 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7506 CodeGenModule &CGM, StringRef ParentName,
7507 const OMPTargetParallelForSimdDirective &S) {
7508 // Emit SPMD target parallel for region as a standalone region.
7509 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7510 emitTargetParallelForSimdRegion(CGF, S, Action);
7511 };
7512 llvm::Function *Fn;
7513 llvm::Constant *Addr;
7514 // Emit target region as a standalone region.
7515 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7516 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7517 assert(Fn && Addr && "Target device function emission failed.");
7518 }
7519
EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective & S)7520 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
7521 const OMPTargetParallelForSimdDirective &S) {
7522 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7523 emitTargetParallelForSimdRegion(CGF, S, Action);
7524 };
7525 emitCommonOMPTargetDirective(*this, S, CodeGen);
7526 }
7527
7528 /// Emit a helper variable and return corresponding lvalue.
mapParam(CodeGenFunction & CGF,const DeclRefExpr * Helper,const ImplicitParamDecl * PVD,CodeGenFunction::OMPPrivateScope & Privates)7529 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
7530 const ImplicitParamDecl *PVD,
7531 CodeGenFunction::OMPPrivateScope &Privates) {
7532 const auto *VDecl = cast<VarDecl>(Helper->getDecl());
7533 Privates.addPrivate(VDecl, CGF.GetAddrOfLocalVar(PVD));
7534 }
7535
EmitOMPTaskLoopBasedDirective(const OMPLoopDirective & S)7536 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
7537 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
7538 // Emit outlined function for task construct.
7539 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
7540 Address CapturedStruct = Address::invalid();
7541 {
7542 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
7543 CapturedStruct = GenerateCapturedStmtArgument(*CS);
7544 }
7545 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
7546 const Expr *IfCond = nullptr;
7547 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
7548 if (C->getNameModifier() == OMPD_unknown ||
7549 C->getNameModifier() == OMPD_taskloop) {
7550 IfCond = C->getCondition();
7551 break;
7552 }
7553 }
7554
7555 OMPTaskDataTy Data;
7556 // Check if taskloop must be emitted without taskgroup.
7557 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
7558 // TODO: Check if we should emit tied or untied task.
7559 Data.Tied = true;
7560 // Set scheduling for taskloop
7561 if (const auto *Clause = S.getSingleClause<OMPGrainsizeClause>()) {
7562 // grainsize clause
7563 Data.Schedule.setInt(/*IntVal=*/false);
7564 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
7565 } else if (const auto *Clause = S.getSingleClause<OMPNumTasksClause>()) {
7566 // num_tasks clause
7567 Data.Schedule.setInt(/*IntVal=*/true);
7568 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
7569 }
7570
7571 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
7572 // if (PreCond) {
7573 // for (IV in 0..LastIteration) BODY;
7574 // <Final counter/linear vars updates>;
7575 // }
7576 //
7577
7578 // Emit: if (PreCond) - begin.
7579 // If the condition constant folds and can be elided, avoid emitting the
7580 // whole loop.
7581 bool CondConstant;
7582 llvm::BasicBlock *ContBlock = nullptr;
7583 OMPLoopScope PreInitScope(CGF, S);
7584 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
7585 if (!CondConstant)
7586 return;
7587 } else {
7588 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
7589 ContBlock = CGF.createBasicBlock("taskloop.if.end");
7590 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
7591 CGF.getProfileCount(&S));
7592 CGF.EmitBlock(ThenBlock);
7593 CGF.incrementProfileCounter(&S);
7594 }
7595
7596 (void)CGF.EmitOMPLinearClauseInit(S);
7597
7598 OMPPrivateScope LoopScope(CGF);
7599 // Emit helper vars inits.
7600 enum { LowerBound = 5, UpperBound, Stride, LastIter };
7601 auto *I = CS->getCapturedDecl()->param_begin();
7602 auto *LBP = std::next(I, LowerBound);
7603 auto *UBP = std::next(I, UpperBound);
7604 auto *STP = std::next(I, Stride);
7605 auto *LIP = std::next(I, LastIter);
7606 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
7607 LoopScope);
7608 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
7609 LoopScope);
7610 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
7611 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
7612 LoopScope);
7613 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
7614 CGF.EmitOMPLinearClause(S, LoopScope);
7615 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
7616 (void)LoopScope.Privatize();
7617 // Emit the loop iteration variable.
7618 const Expr *IVExpr = S.getIterationVariable();
7619 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
7620 CGF.EmitVarDecl(*IVDecl);
7621 CGF.EmitIgnoredExpr(S.getInit());
7622
7623 // Emit the iterations count variable.
7624 // If it is not a variable, Sema decided to calculate iterations count on
7625 // each iteration (e.g., it is foldable into a constant).
7626 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
7627 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
7628 // Emit calculation of the iterations count.
7629 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
7630 }
7631
7632 {
7633 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
7634 emitCommonSimdLoop(
7635 CGF, S,
7636 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7637 if (isOpenMPSimdDirective(S.getDirectiveKind()))
7638 CGF.EmitOMPSimdInit(S);
7639 },
7640 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
7641 CGF.EmitOMPInnerLoop(
7642 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
7643 [&S](CodeGenFunction &CGF) {
7644 emitOMPLoopBodyWithStopPoint(CGF, S,
7645 CodeGenFunction::JumpDest());
7646 },
7647 [](CodeGenFunction &) {});
7648 });
7649 }
7650 // Emit: if (PreCond) - end.
7651 if (ContBlock) {
7652 CGF.EmitBranch(ContBlock);
7653 CGF.EmitBlock(ContBlock, true);
7654 }
7655 // Emit final copy of the lastprivate variables if IsLastIter != 0.
7656 if (HasLastprivateClause) {
7657 CGF.EmitOMPLastprivateClauseFinal(
7658 S, isOpenMPSimdDirective(S.getDirectiveKind()),
7659 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
7660 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
7661 (*LIP)->getType(), S.getBeginLoc())));
7662 }
7663 LoopScope.restoreMap();
7664 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) {
7665 return CGF.Builder.CreateIsNotNull(
7666 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
7667 (*LIP)->getType(), S.getBeginLoc()));
7668 });
7669 };
7670 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
7671 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
7672 const OMPTaskDataTy &Data) {
7673 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
7674 &Data](CodeGenFunction &CGF, PrePostActionTy &) {
7675 OMPLoopScope PreInitScope(CGF, S);
7676 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
7677 OutlinedFn, SharedsTy,
7678 CapturedStruct, IfCond, Data);
7679 };
7680 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
7681 CodeGen);
7682 };
7683 if (Data.Nogroup) {
7684 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data);
7685 } else {
7686 CGM.getOpenMPRuntime().emitTaskgroupRegion(
7687 *this,
7688 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
7689 PrePostActionTy &Action) {
7690 Action.Enter(CGF);
7691 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
7692 Data);
7693 },
7694 S.getBeginLoc());
7695 }
7696 }
7697
EmitOMPTaskLoopDirective(const OMPTaskLoopDirective & S)7698 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
7699 auto LPCRegion =
7700 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7701 EmitOMPTaskLoopBasedDirective(S);
7702 }
7703
EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective & S)7704 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
7705 const OMPTaskLoopSimdDirective &S) {
7706 auto LPCRegion =
7707 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7708 OMPLexicalScope Scope(*this, S);
7709 EmitOMPTaskLoopBasedDirective(S);
7710 }
7711
EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective & S)7712 void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
7713 const OMPMasterTaskLoopDirective &S) {
7714 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7715 Action.Enter(CGF);
7716 EmitOMPTaskLoopBasedDirective(S);
7717 };
7718 auto LPCRegion =
7719 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7720 OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false);
7721 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
7722 }
7723
EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective & S)7724 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
7725 const OMPMasterTaskLoopSimdDirective &S) {
7726 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7727 Action.Enter(CGF);
7728 EmitOMPTaskLoopBasedDirective(S);
7729 };
7730 auto LPCRegion =
7731 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7732 OMPLexicalScope Scope(*this, S);
7733 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
7734 }
7735
EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective & S)7736 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
7737 const OMPParallelMasterTaskLoopDirective &S) {
7738 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7739 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
7740 PrePostActionTy &Action) {
7741 Action.Enter(CGF);
7742 CGF.EmitOMPTaskLoopBasedDirective(S);
7743 };
7744 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
7745 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
7746 S.getBeginLoc());
7747 };
7748 auto LPCRegion =
7749 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7750 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen,
7751 emitEmptyBoundParameters);
7752 }
7753
EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective & S)7754 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
7755 const OMPParallelMasterTaskLoopSimdDirective &S) {
7756 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7757 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
7758 PrePostActionTy &Action) {
7759 Action.Enter(CGF);
7760 CGF.EmitOMPTaskLoopBasedDirective(S);
7761 };
7762 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
7763 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
7764 S.getBeginLoc());
7765 };
7766 auto LPCRegion =
7767 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7768 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen,
7769 emitEmptyBoundParameters);
7770 }
7771
7772 // Generate the instructions for '#pragma omp target update' directive.
EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective & S)7773 void CodeGenFunction::EmitOMPTargetUpdateDirective(
7774 const OMPTargetUpdateDirective &S) {
7775 // If we don't have target devices, don't bother emitting the data mapping
7776 // code.
7777 if (CGM.getLangOpts().OMPTargetTriples.empty())
7778 return;
7779
7780 // Check if we have any if clause associated with the directive.
7781 const Expr *IfCond = nullptr;
7782 if (const auto *C = S.getSingleClause<OMPIfClause>())
7783 IfCond = C->getCondition();
7784
7785 // Check if we have any device clause associated with the directive.
7786 const Expr *Device = nullptr;
7787 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7788 Device = C->getDevice();
7789
7790 OMPLexicalScope Scope(*this, S, OMPD_task);
7791 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
7792 }
7793
EmitOMPGenericLoopDirective(const OMPGenericLoopDirective & S)7794 void CodeGenFunction::EmitOMPGenericLoopDirective(
7795 const OMPGenericLoopDirective &S) {
7796 // Unimplemented, just inline the underlying statement for now.
7797 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7798 CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
7799 };
7800 OMPLexicalScope Scope(*this, S, OMPD_unknown);
7801 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_loop, CodeGen);
7802 }
7803
EmitSimpleOMPExecutableDirective(const OMPExecutableDirective & D)7804 void CodeGenFunction::EmitSimpleOMPExecutableDirective(
7805 const OMPExecutableDirective &D) {
7806 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
7807 EmitOMPScanDirective(*SD);
7808 return;
7809 }
7810 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
7811 return;
7812 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
7813 OMPPrivateScope GlobalsScope(CGF);
7814 if (isOpenMPTaskingDirective(D.getDirectiveKind())) {
7815 // Capture global firstprivates to avoid crash.
7816 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
7817 for (const Expr *Ref : C->varlists()) {
7818 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
7819 if (!DRE)
7820 continue;
7821 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
7822 if (!VD || VD->hasLocalStorage())
7823 continue;
7824 if (!CGF.LocalDeclMap.count(VD)) {
7825 LValue GlobLVal = CGF.EmitLValue(Ref);
7826 GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
7827 }
7828 }
7829 }
7830 }
7831 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
7832 (void)GlobalsScope.Privatize();
7833 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D);
7834 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
7835 } else {
7836 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
7837 for (const Expr *E : LD->counters()) {
7838 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
7839 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
7840 LValue GlobLVal = CGF.EmitLValue(E);
7841 GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
7842 }
7843 if (isa<OMPCapturedExprDecl>(VD)) {
7844 // Emit only those that were not explicitly referenced in clauses.
7845 if (!CGF.LocalDeclMap.count(VD))
7846 CGF.EmitVarDecl(*VD);
7847 }
7848 }
7849 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
7850 if (!C->getNumForLoops())
7851 continue;
7852 for (unsigned I = LD->getLoopsNumber(),
7853 E = C->getLoopNumIterations().size();
7854 I < E; ++I) {
7855 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
7856 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
7857 // Emit only those that were not explicitly referenced in clauses.
7858 if (!CGF.LocalDeclMap.count(VD))
7859 CGF.EmitVarDecl(*VD);
7860 }
7861 }
7862 }
7863 }
7864 (void)GlobalsScope.Privatize();
7865 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
7866 }
7867 };
7868 if (D.getDirectiveKind() == OMPD_atomic ||
7869 D.getDirectiveKind() == OMPD_critical ||
7870 D.getDirectiveKind() == OMPD_section ||
7871 D.getDirectiveKind() == OMPD_master ||
7872 D.getDirectiveKind() == OMPD_masked) {
7873 EmitStmt(D.getAssociatedStmt());
7874 } else {
7875 auto LPCRegion =
7876 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D);
7877 OMPSimdLexicalScope Scope(*this, D);
7878 CGM.getOpenMPRuntime().emitInlinedDirective(
7879 *this,
7880 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
7881 : D.getDirectiveKind(),
7882 CodeGen);
7883 }
7884 // Check for outer lastprivate conditional update.
7885 checkForLastprivateConditionalUpdate(*this, D);
7886 }
7887