1 //===---- CGOpenMPRuntimeGPU.cpp - Interface to OpenMP GPU Runtimes ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a generalized class for OpenMP runtime code generation
10 // specialized by GPU targets NVPTX and AMDGCN.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGOpenMPRuntimeGPU.h"
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/DeclOpenMP.h"
19 #include "clang/AST/StmtOpenMP.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "clang/Basic/Cuda.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
24 #include "llvm/IR/IntrinsicsNVPTX.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 using namespace llvm::omp;
29 
30 namespace {
31 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
32 class NVPTXActionTy final : public PrePostActionTy {
33   llvm::FunctionCallee EnterCallee = nullptr;
34   ArrayRef<llvm::Value *> EnterArgs;
35   llvm::FunctionCallee ExitCallee = nullptr;
36   ArrayRef<llvm::Value *> ExitArgs;
37   bool Conditional = false;
38   llvm::BasicBlock *ContBlock = nullptr;
39 
40 public:
41   NVPTXActionTy(llvm::FunctionCallee EnterCallee,
42                 ArrayRef<llvm::Value *> EnterArgs,
43                 llvm::FunctionCallee ExitCallee,
44                 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
45       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
46         ExitArgs(ExitArgs), Conditional(Conditional) {}
47   void Enter(CodeGenFunction &CGF) override {
48     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
49     if (Conditional) {
50       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
51       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
52       ContBlock = CGF.createBasicBlock("omp_if.end");
53       // Generate the branch (If-stmt)
54       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
55       CGF.EmitBlock(ThenBlock);
56     }
57   }
58   void Done(CodeGenFunction &CGF) {
59     // Emit the rest of blocks/branches
60     CGF.EmitBranch(ContBlock);
61     CGF.EmitBlock(ContBlock, true);
62   }
63   void Exit(CodeGenFunction &CGF) override {
64     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
65   }
66 };
67 
68 /// A class to track the execution mode when codegening directives within
69 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
70 /// to the target region and used by containing directives such as 'parallel'
71 /// to emit optimized code.
72 class ExecutionRuntimeModesRAII {
73 private:
74   CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
75       CGOpenMPRuntimeGPU::EM_Unknown;
76   CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
77   bool SavedRuntimeMode = false;
78   bool *RuntimeMode = nullptr;
79 
80 public:
81   /// Constructor for Non-SPMD mode.
82   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
83       : ExecMode(ExecMode) {
84     SavedExecMode = ExecMode;
85     ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
86   }
87   /// Constructor for SPMD mode.
88   ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
89                             bool &RuntimeMode, bool FullRuntimeMode)
90       : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
91     SavedExecMode = ExecMode;
92     SavedRuntimeMode = RuntimeMode;
93     ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
94     RuntimeMode = FullRuntimeMode;
95   }
96   ~ExecutionRuntimeModesRAII() {
97     ExecMode = SavedExecMode;
98     if (RuntimeMode)
99       *RuntimeMode = SavedRuntimeMode;
100   }
101 };
102 
103 /// GPU Configuration:  This information can be derived from cuda registers,
104 /// however, providing compile time constants helps generate more efficient
105 /// code.  For all practical purposes this is fine because the configuration
106 /// is the same for all known NVPTX architectures.
107 enum MachineConfiguration : unsigned {
108   /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
109   /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
110   /// and GV_Warp_Size_Log2_Mask.
111 
112   /// Global memory alignment for performance.
113   GlobalMemoryAlignment = 128,
114 
115   /// Maximal size of the shared memory buffer.
116   SharedMemorySize = 128,
117 };
118 
119 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
120   RefExpr = RefExpr->IgnoreParens();
121   if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
122     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
123     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
124       Base = TempASE->getBase()->IgnoreParenImpCasts();
125     RefExpr = Base;
126   } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
127     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
128     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
129       Base = TempOASE->getBase()->IgnoreParenImpCasts();
130     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
131       Base = TempASE->getBase()->IgnoreParenImpCasts();
132     RefExpr = Base;
133   }
134   RefExpr = RefExpr->IgnoreParenImpCasts();
135   if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
136     return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
137   const auto *ME = cast<MemberExpr>(RefExpr);
138   return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
139 }
140 
141 
142 static RecordDecl *buildRecordForGlobalizedVars(
143     ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
144     ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
145     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
146         &MappedDeclsFields, int BufSize) {
147   using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
148   if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
149     return nullptr;
150   SmallVector<VarsDataTy, 4> GlobalizedVars;
151   for (const ValueDecl *D : EscapedDecls)
152     GlobalizedVars.emplace_back(
153         CharUnits::fromQuantity(std::max(
154             C.getDeclAlign(D).getQuantity(),
155             static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
156         D);
157   for (const ValueDecl *D : EscapedDeclsForTeams)
158     GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
159   llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
160     return L.first > R.first;
161   });
162 
163   // Build struct _globalized_locals_ty {
164   //         /*  globalized vars  */[WarSize] align (max(decl_align,
165   //         GlobalMemoryAlignment))
166   //         /*  globalized vars  */ for EscapedDeclsForTeams
167   //       };
168   RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
169   GlobalizedRD->startDefinition();
170   llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
171       EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
172   for (const auto &Pair : GlobalizedVars) {
173     const ValueDecl *VD = Pair.second;
174     QualType Type = VD->getType();
175     if (Type->isLValueReferenceType())
176       Type = C.getPointerType(Type.getNonReferenceType());
177     else
178       Type = Type.getNonReferenceType();
179     SourceLocation Loc = VD->getLocation();
180     FieldDecl *Field;
181     if (SingleEscaped.count(VD)) {
182       Field = FieldDecl::Create(
183           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
184           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
185           /*BW=*/nullptr, /*Mutable=*/false,
186           /*InitStyle=*/ICIS_NoInit);
187       Field->setAccess(AS_public);
188       if (VD->hasAttrs()) {
189         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
190              E(VD->getAttrs().end());
191              I != E; ++I)
192           Field->addAttr(*I);
193       }
194     } else {
195       llvm::APInt ArraySize(32, BufSize);
196       Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
197                                     0);
198       Field = FieldDecl::Create(
199           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
200           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
201           /*BW=*/nullptr, /*Mutable=*/false,
202           /*InitStyle=*/ICIS_NoInit);
203       Field->setAccess(AS_public);
204       llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
205                                      static_cast<CharUnits::QuantityType>(
206                                          GlobalMemoryAlignment)));
207       Field->addAttr(AlignedAttr::CreateImplicit(
208           C, /*IsAlignmentExpr=*/true,
209           IntegerLiteral::Create(C, Align,
210                                  C.getIntTypeForBitwidth(32, /*Signed=*/0),
211                                  SourceLocation()),
212           {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
213     }
214     GlobalizedRD->addDecl(Field);
215     MappedDeclsFields.try_emplace(VD, Field);
216   }
217   GlobalizedRD->completeDefinition();
218   return GlobalizedRD;
219 }
220 
221 /// Get the list of variables that can escape their declaration context.
222 class CheckVarsEscapingDeclContext final
223     : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
224   CodeGenFunction &CGF;
225   llvm::SetVector<const ValueDecl *> EscapedDecls;
226   llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
227   llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
228   RecordDecl *GlobalizedRD = nullptr;
229   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
230   bool AllEscaped = false;
231   bool IsForCombinedParallelRegion = false;
232 
233   void markAsEscaped(const ValueDecl *VD) {
234     // Do not globalize declare target variables.
235     if (!isa<VarDecl>(VD) ||
236         OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
237       return;
238     VD = cast<ValueDecl>(VD->getCanonicalDecl());
239     // Use user-specified allocation.
240     if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
241       return;
242     // Variables captured by value must be globalized.
243     if (auto *CSI = CGF.CapturedStmtInfo) {
244       if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
245         // Check if need to capture the variable that was already captured by
246         // value in the outer region.
247         if (!IsForCombinedParallelRegion) {
248           if (!FD->hasAttrs())
249             return;
250           const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
251           if (!Attr)
252             return;
253           if (((Attr->getCaptureKind() != OMPC_map) &&
254                !isOpenMPPrivate(Attr->getCaptureKind())) ||
255               ((Attr->getCaptureKind() == OMPC_map) &&
256                !FD->getType()->isAnyPointerType()))
257             return;
258         }
259         if (!FD->getType()->isReferenceType()) {
260           assert(!VD->getType()->isVariablyModifiedType() &&
261                  "Parameter captured by value with variably modified type");
262           EscapedParameters.insert(VD);
263         } else if (!IsForCombinedParallelRegion) {
264           return;
265         }
266       }
267     }
268     if ((!CGF.CapturedStmtInfo ||
269          (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
270         VD->getType()->isReferenceType())
271       // Do not globalize variables with reference type.
272       return;
273     if (VD->getType()->isVariablyModifiedType())
274       EscapedVariableLengthDecls.insert(VD);
275     else
276       EscapedDecls.insert(VD);
277   }
278 
279   void VisitValueDecl(const ValueDecl *VD) {
280     if (VD->getType()->isLValueReferenceType())
281       markAsEscaped(VD);
282     if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
283       if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
284         const bool SavedAllEscaped = AllEscaped;
285         AllEscaped = VD->getType()->isLValueReferenceType();
286         Visit(VarD->getInit());
287         AllEscaped = SavedAllEscaped;
288       }
289     }
290   }
291   void VisitOpenMPCapturedStmt(const CapturedStmt *S,
292                                ArrayRef<OMPClause *> Clauses,
293                                bool IsCombinedParallelRegion) {
294     if (!S)
295       return;
296     for (const CapturedStmt::Capture &C : S->captures()) {
297       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
298         const ValueDecl *VD = C.getCapturedVar();
299         bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
300         if (IsCombinedParallelRegion) {
301           // Check if the variable is privatized in the combined construct and
302           // those private copies must be shared in the inner parallel
303           // directive.
304           IsForCombinedParallelRegion = false;
305           for (const OMPClause *C : Clauses) {
306             if (!isOpenMPPrivate(C->getClauseKind()) ||
307                 C->getClauseKind() == OMPC_reduction ||
308                 C->getClauseKind() == OMPC_linear ||
309                 C->getClauseKind() == OMPC_private)
310               continue;
311             ArrayRef<const Expr *> Vars;
312             if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
313               Vars = PC->getVarRefs();
314             else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
315               Vars = PC->getVarRefs();
316             else
317               llvm_unreachable("Unexpected clause.");
318             for (const auto *E : Vars) {
319               const Decl *D =
320                   cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
321               if (D == VD->getCanonicalDecl()) {
322                 IsForCombinedParallelRegion = true;
323                 break;
324               }
325             }
326             if (IsForCombinedParallelRegion)
327               break;
328           }
329         }
330         markAsEscaped(VD);
331         if (isa<OMPCapturedExprDecl>(VD))
332           VisitValueDecl(VD);
333         IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
334       }
335     }
336   }
337 
338   void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
339     assert(!GlobalizedRD &&
340            "Record for globalized variables is built already.");
341     ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
342     unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
343     if (IsInTTDRegion)
344       EscapedDeclsForTeams = EscapedDecls.getArrayRef();
345     else
346       EscapedDeclsForParallel = EscapedDecls.getArrayRef();
347     GlobalizedRD = ::buildRecordForGlobalizedVars(
348         CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
349         MappedDeclsFields, WarpSize);
350   }
351 
352 public:
353   CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
354                                ArrayRef<const ValueDecl *> TeamsReductions)
355       : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
356   }
357   virtual ~CheckVarsEscapingDeclContext() = default;
358   void VisitDeclStmt(const DeclStmt *S) {
359     if (!S)
360       return;
361     for (const Decl *D : S->decls())
362       if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
363         VisitValueDecl(VD);
364   }
365   void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
366     if (!D)
367       return;
368     if (!D->hasAssociatedStmt())
369       return;
370     if (const auto *S =
371             dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
372       // Do not analyze directives that do not actually require capturing,
373       // like `omp for` or `omp simd` directives.
374       llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
375       getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
376       if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
377         VisitStmt(S->getCapturedStmt());
378         return;
379       }
380       VisitOpenMPCapturedStmt(
381           S, D->clauses(),
382           CaptureRegions.back() == OMPD_parallel &&
383               isOpenMPDistributeDirective(D->getDirectiveKind()));
384     }
385   }
386   void VisitCapturedStmt(const CapturedStmt *S) {
387     if (!S)
388       return;
389     for (const CapturedStmt::Capture &C : S->captures()) {
390       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
391         const ValueDecl *VD = C.getCapturedVar();
392         markAsEscaped(VD);
393         if (isa<OMPCapturedExprDecl>(VD))
394           VisitValueDecl(VD);
395       }
396     }
397   }
398   void VisitLambdaExpr(const LambdaExpr *E) {
399     if (!E)
400       return;
401     for (const LambdaCapture &C : E->captures()) {
402       if (C.capturesVariable()) {
403         if (C.getCaptureKind() == LCK_ByRef) {
404           const ValueDecl *VD = C.getCapturedVar();
405           markAsEscaped(VD);
406           if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
407             VisitValueDecl(VD);
408         }
409       }
410     }
411   }
412   void VisitBlockExpr(const BlockExpr *E) {
413     if (!E)
414       return;
415     for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
416       if (C.isByRef()) {
417         const VarDecl *VD = C.getVariable();
418         markAsEscaped(VD);
419         if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
420           VisitValueDecl(VD);
421       }
422     }
423   }
424   void VisitCallExpr(const CallExpr *E) {
425     if (!E)
426       return;
427     for (const Expr *Arg : E->arguments()) {
428       if (!Arg)
429         continue;
430       if (Arg->isLValue()) {
431         const bool SavedAllEscaped = AllEscaped;
432         AllEscaped = true;
433         Visit(Arg);
434         AllEscaped = SavedAllEscaped;
435       } else {
436         Visit(Arg);
437       }
438     }
439     Visit(E->getCallee());
440   }
441   void VisitDeclRefExpr(const DeclRefExpr *E) {
442     if (!E)
443       return;
444     const ValueDecl *VD = E->getDecl();
445     if (AllEscaped)
446       markAsEscaped(VD);
447     if (isa<OMPCapturedExprDecl>(VD))
448       VisitValueDecl(VD);
449     else if (const auto *VarD = dyn_cast<VarDecl>(VD))
450       if (VarD->isInitCapture())
451         VisitValueDecl(VD);
452   }
453   void VisitUnaryOperator(const UnaryOperator *E) {
454     if (!E)
455       return;
456     if (E->getOpcode() == UO_AddrOf) {
457       const bool SavedAllEscaped = AllEscaped;
458       AllEscaped = true;
459       Visit(E->getSubExpr());
460       AllEscaped = SavedAllEscaped;
461     } else {
462       Visit(E->getSubExpr());
463     }
464   }
465   void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
466     if (!E)
467       return;
468     if (E->getCastKind() == CK_ArrayToPointerDecay) {
469       const bool SavedAllEscaped = AllEscaped;
470       AllEscaped = true;
471       Visit(E->getSubExpr());
472       AllEscaped = SavedAllEscaped;
473     } else {
474       Visit(E->getSubExpr());
475     }
476   }
477   void VisitExpr(const Expr *E) {
478     if (!E)
479       return;
480     bool SavedAllEscaped = AllEscaped;
481     if (!E->isLValue())
482       AllEscaped = false;
483     for (const Stmt *Child : E->children())
484       if (Child)
485         Visit(Child);
486     AllEscaped = SavedAllEscaped;
487   }
488   void VisitStmt(const Stmt *S) {
489     if (!S)
490       return;
491     for (const Stmt *Child : S->children())
492       if (Child)
493         Visit(Child);
494   }
495 
496   /// Returns the record that handles all the escaped local variables and used
497   /// instead of their original storage.
498   const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
499     if (!GlobalizedRD)
500       buildRecordForGlobalizedVars(IsInTTDRegion);
501     return GlobalizedRD;
502   }
503 
504   /// Returns the field in the globalized record for the escaped variable.
505   const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
506     assert(GlobalizedRD &&
507            "Record for globalized variables must be generated already.");
508     auto I = MappedDeclsFields.find(VD);
509     if (I == MappedDeclsFields.end())
510       return nullptr;
511     return I->getSecond();
512   }
513 
514   /// Returns the list of the escaped local variables/parameters.
515   ArrayRef<const ValueDecl *> getEscapedDecls() const {
516     return EscapedDecls.getArrayRef();
517   }
518 
519   /// Checks if the escaped local variable is actually a parameter passed by
520   /// value.
521   const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
522     return EscapedParameters;
523   }
524 
525   /// Returns the list of the escaped variables with the variably modified
526   /// types.
527   ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
528     return EscapedVariableLengthDecls.getArrayRef();
529   }
530 };
531 } // anonymous namespace
532 
533 /// Get the id of the warp in the block.
534 /// We assume that the warp size is 32, which is always the case
535 /// on the NVPTX device, to generate more efficient code.
536 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
537   CGBuilderTy &Bld = CGF.Builder;
538   unsigned LaneIDBits =
539       CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
540   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
541   return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
542 }
543 
544 /// Get the id of the current lane in the Warp.
545 /// We assume that the warp size is 32, which is always the case
546 /// on the NVPTX device, to generate more efficient code.
547 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
548   CGBuilderTy &Bld = CGF.Builder;
549   unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
550       llvm::omp::GV_Warp_Size_Log2_Mask);
551   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
552   return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
553                        "nvptx_lane_id");
554 }
555 
556 CGOpenMPRuntimeGPU::ExecutionMode
557 CGOpenMPRuntimeGPU::getExecutionMode() const {
558   return CurrentExecutionMode;
559 }
560 
561 static CGOpenMPRuntimeGPU::DataSharingMode
562 getDataSharingMode(CodeGenModule &CGM) {
563   return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
564                                           : CGOpenMPRuntimeGPU::Generic;
565 }
566 
567 /// Check for inner (nested) SPMD construct, if any
568 static bool hasNestedSPMDDirective(ASTContext &Ctx,
569                                    const OMPExecutableDirective &D) {
570   const auto *CS = D.getInnermostCapturedStmt();
571   const auto *Body =
572       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
573   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
574 
575   if (const auto *NestedDir =
576           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
577     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
578     switch (D.getDirectiveKind()) {
579     case OMPD_target:
580       if (isOpenMPParallelDirective(DKind))
581         return true;
582       if (DKind == OMPD_teams) {
583         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
584             /*IgnoreCaptured=*/true);
585         if (!Body)
586           return false;
587         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
588         if (const auto *NND =
589                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
590           DKind = NND->getDirectiveKind();
591           if (isOpenMPParallelDirective(DKind))
592             return true;
593         }
594       }
595       return false;
596     case OMPD_target_teams:
597       return isOpenMPParallelDirective(DKind);
598     case OMPD_target_simd:
599     case OMPD_target_parallel:
600     case OMPD_target_parallel_for:
601     case OMPD_target_parallel_for_simd:
602     case OMPD_target_teams_distribute:
603     case OMPD_target_teams_distribute_simd:
604     case OMPD_target_teams_distribute_parallel_for:
605     case OMPD_target_teams_distribute_parallel_for_simd:
606     case OMPD_parallel:
607     case OMPD_for:
608     case OMPD_parallel_for:
609     case OMPD_parallel_master:
610     case OMPD_parallel_sections:
611     case OMPD_for_simd:
612     case OMPD_parallel_for_simd:
613     case OMPD_cancel:
614     case OMPD_cancellation_point:
615     case OMPD_ordered:
616     case OMPD_threadprivate:
617     case OMPD_allocate:
618     case OMPD_task:
619     case OMPD_simd:
620     case OMPD_sections:
621     case OMPD_section:
622     case OMPD_single:
623     case OMPD_master:
624     case OMPD_critical:
625     case OMPD_taskyield:
626     case OMPD_barrier:
627     case OMPD_taskwait:
628     case OMPD_taskgroup:
629     case OMPD_atomic:
630     case OMPD_flush:
631     case OMPD_depobj:
632     case OMPD_scan:
633     case OMPD_teams:
634     case OMPD_target_data:
635     case OMPD_target_exit_data:
636     case OMPD_target_enter_data:
637     case OMPD_distribute:
638     case OMPD_distribute_simd:
639     case OMPD_distribute_parallel_for:
640     case OMPD_distribute_parallel_for_simd:
641     case OMPD_teams_distribute:
642     case OMPD_teams_distribute_simd:
643     case OMPD_teams_distribute_parallel_for:
644     case OMPD_teams_distribute_parallel_for_simd:
645     case OMPD_target_update:
646     case OMPD_declare_simd:
647     case OMPD_declare_variant:
648     case OMPD_begin_declare_variant:
649     case OMPD_end_declare_variant:
650     case OMPD_declare_target:
651     case OMPD_end_declare_target:
652     case OMPD_declare_reduction:
653     case OMPD_declare_mapper:
654     case OMPD_taskloop:
655     case OMPD_taskloop_simd:
656     case OMPD_master_taskloop:
657     case OMPD_master_taskloop_simd:
658     case OMPD_parallel_master_taskloop:
659     case OMPD_parallel_master_taskloop_simd:
660     case OMPD_requires:
661     case OMPD_unknown:
662     default:
663       llvm_unreachable("Unexpected directive.");
664     }
665   }
666 
667   return false;
668 }
669 
670 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
671                                       const OMPExecutableDirective &D) {
672   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
673   switch (DirectiveKind) {
674   case OMPD_target:
675   case OMPD_target_teams:
676     return hasNestedSPMDDirective(Ctx, D);
677   case OMPD_target_parallel:
678   case OMPD_target_parallel_for:
679   case OMPD_target_parallel_for_simd:
680   case OMPD_target_teams_distribute_parallel_for:
681   case OMPD_target_teams_distribute_parallel_for_simd:
682   case OMPD_target_simd:
683   case OMPD_target_teams_distribute_simd:
684     return true;
685   case OMPD_target_teams_distribute:
686     return false;
687   case OMPD_parallel:
688   case OMPD_for:
689   case OMPD_parallel_for:
690   case OMPD_parallel_master:
691   case OMPD_parallel_sections:
692   case OMPD_for_simd:
693   case OMPD_parallel_for_simd:
694   case OMPD_cancel:
695   case OMPD_cancellation_point:
696   case OMPD_ordered:
697   case OMPD_threadprivate:
698   case OMPD_allocate:
699   case OMPD_task:
700   case OMPD_simd:
701   case OMPD_sections:
702   case OMPD_section:
703   case OMPD_single:
704   case OMPD_master:
705   case OMPD_critical:
706   case OMPD_taskyield:
707   case OMPD_barrier:
708   case OMPD_taskwait:
709   case OMPD_taskgroup:
710   case OMPD_atomic:
711   case OMPD_flush:
712   case OMPD_depobj:
713   case OMPD_scan:
714   case OMPD_teams:
715   case OMPD_target_data:
716   case OMPD_target_exit_data:
717   case OMPD_target_enter_data:
718   case OMPD_distribute:
719   case OMPD_distribute_simd:
720   case OMPD_distribute_parallel_for:
721   case OMPD_distribute_parallel_for_simd:
722   case OMPD_teams_distribute:
723   case OMPD_teams_distribute_simd:
724   case OMPD_teams_distribute_parallel_for:
725   case OMPD_teams_distribute_parallel_for_simd:
726   case OMPD_target_update:
727   case OMPD_declare_simd:
728   case OMPD_declare_variant:
729   case OMPD_begin_declare_variant:
730   case OMPD_end_declare_variant:
731   case OMPD_declare_target:
732   case OMPD_end_declare_target:
733   case OMPD_declare_reduction:
734   case OMPD_declare_mapper:
735   case OMPD_taskloop:
736   case OMPD_taskloop_simd:
737   case OMPD_master_taskloop:
738   case OMPD_master_taskloop_simd:
739   case OMPD_parallel_master_taskloop:
740   case OMPD_parallel_master_taskloop_simd:
741   case OMPD_requires:
742   case OMPD_unknown:
743   default:
744     break;
745   }
746   llvm_unreachable(
747       "Unknown programming model for OpenMP directive on NVPTX target.");
748 }
749 
750 /// Check if the directive is loops based and has schedule clause at all or has
751 /// static scheduling.
752 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
753   assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
754          isOpenMPLoopDirective(D.getDirectiveKind()) &&
755          "Expected loop-based directive.");
756   return !D.hasClausesOfKind<OMPOrderedClause>() &&
757          (!D.hasClausesOfKind<OMPScheduleClause>() ||
758           llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
759                        [](const OMPScheduleClause *C) {
760                          return C->getScheduleKind() == OMPC_SCHEDULE_static;
761                        }));
762 }
763 
764 /// Check for inner (nested) lightweight runtime construct, if any
765 static bool hasNestedLightweightDirective(ASTContext &Ctx,
766                                           const OMPExecutableDirective &D) {
767   assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
768   const auto *CS = D.getInnermostCapturedStmt();
769   const auto *Body =
770       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
771   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
772 
773   if (const auto *NestedDir =
774           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
775     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
776     switch (D.getDirectiveKind()) {
777     case OMPD_target:
778       if (isOpenMPParallelDirective(DKind) &&
779           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
780           hasStaticScheduling(*NestedDir))
781         return true;
782       if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
783         return true;
784       if (DKind == OMPD_parallel) {
785         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
786             /*IgnoreCaptured=*/true);
787         if (!Body)
788           return false;
789         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
790         if (const auto *NND =
791                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
792           DKind = NND->getDirectiveKind();
793           if (isOpenMPWorksharingDirective(DKind) &&
794               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
795             return true;
796         }
797       } else if (DKind == OMPD_teams) {
798         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
799             /*IgnoreCaptured=*/true);
800         if (!Body)
801           return false;
802         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
803         if (const auto *NND =
804                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
805           DKind = NND->getDirectiveKind();
806           if (isOpenMPParallelDirective(DKind) &&
807               isOpenMPWorksharingDirective(DKind) &&
808               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
809             return true;
810           if (DKind == OMPD_parallel) {
811             Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
812                 /*IgnoreCaptured=*/true);
813             if (!Body)
814               return false;
815             ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
816             if (const auto *NND =
817                     dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
818               DKind = NND->getDirectiveKind();
819               if (isOpenMPWorksharingDirective(DKind) &&
820                   isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
821                 return true;
822             }
823           }
824         }
825       }
826       return false;
827     case OMPD_target_teams:
828       if (isOpenMPParallelDirective(DKind) &&
829           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
830           hasStaticScheduling(*NestedDir))
831         return true;
832       if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
833         return true;
834       if (DKind == OMPD_parallel) {
835         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
836             /*IgnoreCaptured=*/true);
837         if (!Body)
838           return false;
839         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
840         if (const auto *NND =
841                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
842           DKind = NND->getDirectiveKind();
843           if (isOpenMPWorksharingDirective(DKind) &&
844               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
845             return true;
846         }
847       }
848       return false;
849     case OMPD_target_parallel:
850       if (DKind == OMPD_simd)
851         return true;
852       return isOpenMPWorksharingDirective(DKind) &&
853              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
854     case OMPD_target_teams_distribute:
855     case OMPD_target_simd:
856     case OMPD_target_parallel_for:
857     case OMPD_target_parallel_for_simd:
858     case OMPD_target_teams_distribute_simd:
859     case OMPD_target_teams_distribute_parallel_for:
860     case OMPD_target_teams_distribute_parallel_for_simd:
861     case OMPD_parallel:
862     case OMPD_for:
863     case OMPD_parallel_for:
864     case OMPD_parallel_master:
865     case OMPD_parallel_sections:
866     case OMPD_for_simd:
867     case OMPD_parallel_for_simd:
868     case OMPD_cancel:
869     case OMPD_cancellation_point:
870     case OMPD_ordered:
871     case OMPD_threadprivate:
872     case OMPD_allocate:
873     case OMPD_task:
874     case OMPD_simd:
875     case OMPD_sections:
876     case OMPD_section:
877     case OMPD_single:
878     case OMPD_master:
879     case OMPD_critical:
880     case OMPD_taskyield:
881     case OMPD_barrier:
882     case OMPD_taskwait:
883     case OMPD_taskgroup:
884     case OMPD_atomic:
885     case OMPD_flush:
886     case OMPD_depobj:
887     case OMPD_scan:
888     case OMPD_teams:
889     case OMPD_target_data:
890     case OMPD_target_exit_data:
891     case OMPD_target_enter_data:
892     case OMPD_distribute:
893     case OMPD_distribute_simd:
894     case OMPD_distribute_parallel_for:
895     case OMPD_distribute_parallel_for_simd:
896     case OMPD_teams_distribute:
897     case OMPD_teams_distribute_simd:
898     case OMPD_teams_distribute_parallel_for:
899     case OMPD_teams_distribute_parallel_for_simd:
900     case OMPD_target_update:
901     case OMPD_declare_simd:
902     case OMPD_declare_variant:
903     case OMPD_begin_declare_variant:
904     case OMPD_end_declare_variant:
905     case OMPD_declare_target:
906     case OMPD_end_declare_target:
907     case OMPD_declare_reduction:
908     case OMPD_declare_mapper:
909     case OMPD_taskloop:
910     case OMPD_taskloop_simd:
911     case OMPD_master_taskloop:
912     case OMPD_master_taskloop_simd:
913     case OMPD_parallel_master_taskloop:
914     case OMPD_parallel_master_taskloop_simd:
915     case OMPD_requires:
916     case OMPD_unknown:
917     default:
918       llvm_unreachable("Unexpected directive.");
919     }
920   }
921 
922   return false;
923 }
924 
925 /// Checks if the construct supports lightweight runtime. It must be SPMD
926 /// construct + inner loop-based construct with static scheduling.
927 static bool supportsLightweightRuntime(ASTContext &Ctx,
928                                        const OMPExecutableDirective &D) {
929   if (!supportsSPMDExecutionMode(Ctx, D))
930     return false;
931   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
932   switch (DirectiveKind) {
933   case OMPD_target:
934   case OMPD_target_teams:
935   case OMPD_target_parallel:
936     return hasNestedLightweightDirective(Ctx, D);
937   case OMPD_target_parallel_for:
938   case OMPD_target_parallel_for_simd:
939   case OMPD_target_teams_distribute_parallel_for:
940   case OMPD_target_teams_distribute_parallel_for_simd:
941     // (Last|First)-privates must be shared in parallel region.
942     return hasStaticScheduling(D);
943   case OMPD_target_simd:
944   case OMPD_target_teams_distribute_simd:
945     return true;
946   case OMPD_target_teams_distribute:
947     return false;
948   case OMPD_parallel:
949   case OMPD_for:
950   case OMPD_parallel_for:
951   case OMPD_parallel_master:
952   case OMPD_parallel_sections:
953   case OMPD_for_simd:
954   case OMPD_parallel_for_simd:
955   case OMPD_cancel:
956   case OMPD_cancellation_point:
957   case OMPD_ordered:
958   case OMPD_threadprivate:
959   case OMPD_allocate:
960   case OMPD_task:
961   case OMPD_simd:
962   case OMPD_sections:
963   case OMPD_section:
964   case OMPD_single:
965   case OMPD_master:
966   case OMPD_critical:
967   case OMPD_taskyield:
968   case OMPD_barrier:
969   case OMPD_taskwait:
970   case OMPD_taskgroup:
971   case OMPD_atomic:
972   case OMPD_flush:
973   case OMPD_depobj:
974   case OMPD_scan:
975   case OMPD_teams:
976   case OMPD_target_data:
977   case OMPD_target_exit_data:
978   case OMPD_target_enter_data:
979   case OMPD_distribute:
980   case OMPD_distribute_simd:
981   case OMPD_distribute_parallel_for:
982   case OMPD_distribute_parallel_for_simd:
983   case OMPD_teams_distribute:
984   case OMPD_teams_distribute_simd:
985   case OMPD_teams_distribute_parallel_for:
986   case OMPD_teams_distribute_parallel_for_simd:
987   case OMPD_target_update:
988   case OMPD_declare_simd:
989   case OMPD_declare_variant:
990   case OMPD_begin_declare_variant:
991   case OMPD_end_declare_variant:
992   case OMPD_declare_target:
993   case OMPD_end_declare_target:
994   case OMPD_declare_reduction:
995   case OMPD_declare_mapper:
996   case OMPD_taskloop:
997   case OMPD_taskloop_simd:
998   case OMPD_master_taskloop:
999   case OMPD_master_taskloop_simd:
1000   case OMPD_parallel_master_taskloop:
1001   case OMPD_parallel_master_taskloop_simd:
1002   case OMPD_requires:
1003   case OMPD_unknown:
1004   default:
1005     break;
1006   }
1007   llvm_unreachable(
1008       "Unknown programming model for OpenMP directive on NVPTX target.");
1009 }
1010 
1011 void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
1012                                              StringRef ParentName,
1013                                              llvm::Function *&OutlinedFn,
1014                                              llvm::Constant *&OutlinedFnID,
1015                                              bool IsOffloadEntry,
1016                                              const RegionCodeGenTy &CodeGen) {
1017   ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1018   EntryFunctionState EST;
1019   WrapperFunctionsMap.clear();
1020 
1021   // Emit target region as a standalone region.
1022   class NVPTXPrePostActionTy : public PrePostActionTy {
1023     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1024 
1025   public:
1026     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1027         : EST(EST) {}
1028     void Enter(CodeGenFunction &CGF) override {
1029       auto &RT =
1030           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1031       RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
1032       // Skip target region initialization.
1033       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1034     }
1035     void Exit(CodeGenFunction &CGF) override {
1036       auto &RT =
1037           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1038       RT.clearLocThreadIdInsertPt(CGF);
1039       RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
1040     }
1041   } Action(EST);
1042   CodeGen.setAction(Action);
1043   IsInTTDRegion = true;
1044   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1045                                    IsOffloadEntry, CodeGen);
1046   IsInTTDRegion = false;
1047 }
1048 
1049 void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
1050                                         EntryFunctionState &EST, bool IsSPMD) {
1051   CGBuilderTy &Bld = CGF.Builder;
1052   Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
1053   IsInTargetMasterThreadRegion = IsSPMD;
1054   if (!IsSPMD)
1055     emitGenericVarsProlog(CGF, EST.Loc);
1056 }
1057 
1058 void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
1059                                           EntryFunctionState &EST,
1060                                           bool IsSPMD) {
1061   if (!IsSPMD)
1062     emitGenericVarsEpilog(CGF);
1063 
1064   CGBuilderTy &Bld = CGF.Builder;
1065   OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
1066 }
1067 
1068 void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
1069                                           StringRef ParentName,
1070                                           llvm::Function *&OutlinedFn,
1071                                           llvm::Constant *&OutlinedFnID,
1072                                           bool IsOffloadEntry,
1073                                           const RegionCodeGenTy &CodeGen) {
1074   ExecutionRuntimeModesRAII ModeRAII(
1075       CurrentExecutionMode, RequiresFullRuntime,
1076       CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1077           !supportsLightweightRuntime(CGM.getContext(), D));
1078   EntryFunctionState EST;
1079 
1080   // Emit target region as a standalone region.
1081   class NVPTXPrePostActionTy : public PrePostActionTy {
1082     CGOpenMPRuntimeGPU &RT;
1083     CGOpenMPRuntimeGPU::EntryFunctionState &EST;
1084 
1085   public:
1086     NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
1087                          CGOpenMPRuntimeGPU::EntryFunctionState &EST)
1088         : RT(RT), EST(EST) {}
1089     void Enter(CodeGenFunction &CGF) override {
1090       RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
1091       // Skip target region initialization.
1092       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1093     }
1094     void Exit(CodeGenFunction &CGF) override {
1095       RT.clearLocThreadIdInsertPt(CGF);
1096       RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
1097     }
1098   } Action(*this, EST);
1099   CodeGen.setAction(Action);
1100   IsInTTDRegion = true;
1101   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1102                                    IsOffloadEntry, CodeGen);
1103   IsInTTDRegion = false;
1104 }
1105 
1106 // Create a unique global variable to indicate the execution mode of this target
1107 // region. The execution mode is either 'generic', or 'spmd' depending on the
1108 // target directive. This variable is picked up by the offload library to setup
1109 // the device appropriately before kernel launch. If the execution mode is
1110 // 'generic', the runtime reserves one warp for the master, otherwise, all
1111 // warps participate in parallel work.
1112 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1113                                      bool Mode) {
1114   auto *GVMode =
1115       new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1116                                llvm::GlobalValue::WeakAnyLinkage,
1117                                llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1118                                Twine(Name, "_exec_mode"));
1119   CGM.addCompilerUsedGlobal(GVMode);
1120 }
1121 
1122 void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
1123                                               llvm::Constant *Addr,
1124                                               uint64_t Size, int32_t,
1125                                               llvm::GlobalValue::LinkageTypes) {
1126   // TODO: Add support for global variables on the device after declare target
1127   // support.
1128   if (!isa<llvm::Function>(Addr))
1129     return;
1130   llvm::Module &M = CGM.getModule();
1131   llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1132 
1133   // Get "nvvm.annotations" metadata node
1134   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1135 
1136   llvm::Metadata *MDVals[] = {
1137       llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1138       llvm::ConstantAsMetadata::get(
1139           llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1140   // Append metadata to nvvm.annotations
1141   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1142 }
1143 
1144 void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
1145     const OMPExecutableDirective &D, StringRef ParentName,
1146     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1147     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1148   if (!IsOffloadEntry) // Nothing to do.
1149     return;
1150 
1151   assert(!ParentName.empty() && "Invalid target region parent name!");
1152 
1153   bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1154   if (Mode)
1155     emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1156                    CodeGen);
1157   else
1158     emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1159                       CodeGen);
1160 
1161   setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1162 }
1163 
1164 namespace {
1165 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1166 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1167 enum ModeFlagsTy : unsigned {
1168   /// Bit set to 1 when in SPMD mode.
1169   KMP_IDENT_SPMD_MODE = 0x01,
1170   /// Bit set to 1 when a simplified runtime is used.
1171   KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1172   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1173 };
1174 
1175 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1176 static const ModeFlagsTy UndefinedMode =
1177     (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1178 } // anonymous namespace
1179 
1180 unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
1181   switch (getExecutionMode()) {
1182   case EM_SPMD:
1183     if (requiresFullRuntime())
1184       return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1185     return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1186   case EM_NonSPMD:
1187     assert(requiresFullRuntime() && "Expected full runtime.");
1188     return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1189   case EM_Unknown:
1190     return UndefinedMode;
1191   }
1192   llvm_unreachable("Unknown flags are requested.");
1193 }
1194 
1195 CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
1196     : CGOpenMPRuntime(CGM, "_", "$") {
1197   if (!CGM.getLangOpts().OpenMPIsDevice)
1198     llvm_unreachable("OpenMP NVPTX can only handle device code.");
1199 }
1200 
1201 void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
1202                                               ProcBindKind ProcBind,
1203                                               SourceLocation Loc) {
1204   // Do nothing in case of SPMD mode and L0 parallel.
1205   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1206     return;
1207 
1208   CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1209 }
1210 
1211 void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
1212                                                 llvm::Value *NumThreads,
1213                                                 SourceLocation Loc) {
1214   // Do nothing in case of SPMD mode and L0 parallel.
1215   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
1216     return;
1217 
1218   CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1219 }
1220 
1221 void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
1222                                               const Expr *NumTeams,
1223                                               const Expr *ThreadLimit,
1224                                               SourceLocation Loc) {}
1225 
1226 llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
1227     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1228     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1229   // Emit target region as a standalone region.
1230   class NVPTXPrePostActionTy : public PrePostActionTy {
1231     bool &IsInParallelRegion;
1232     bool PrevIsInParallelRegion;
1233 
1234   public:
1235     NVPTXPrePostActionTy(bool &IsInParallelRegion)
1236         : IsInParallelRegion(IsInParallelRegion) {}
1237     void Enter(CodeGenFunction &CGF) override {
1238       PrevIsInParallelRegion = IsInParallelRegion;
1239       IsInParallelRegion = true;
1240     }
1241     void Exit(CodeGenFunction &CGF) override {
1242       IsInParallelRegion = PrevIsInParallelRegion;
1243     }
1244   } Action(IsInParallelRegion);
1245   CodeGen.setAction(Action);
1246   bool PrevIsInTTDRegion = IsInTTDRegion;
1247   IsInTTDRegion = false;
1248   bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1249   IsInTargetMasterThreadRegion = false;
1250   auto *OutlinedFun =
1251       cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1252           D, ThreadIDVar, InnermostKind, CodeGen));
1253   IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1254   IsInTTDRegion = PrevIsInTTDRegion;
1255   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
1256       !IsInParallelRegion) {
1257     llvm::Function *WrapperFun =
1258         createParallelDataSharingWrapper(OutlinedFun, D);
1259     WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1260   }
1261 
1262   return OutlinedFun;
1263 }
1264 
1265 /// Get list of lastprivate variables from the teams distribute ... or
1266 /// teams {distribute ...} directives.
1267 static void
1268 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1269                              llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1270   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1271          "expected teams directive.");
1272   const OMPExecutableDirective *Dir = &D;
1273   if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
1274     if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
1275             Ctx,
1276             D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
1277                 /*IgnoreCaptured=*/true))) {
1278       Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
1279       if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
1280         Dir = nullptr;
1281     }
1282   }
1283   if (!Dir)
1284     return;
1285   for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
1286     for (const Expr *E : C->getVarRefs())
1287       Vars.push_back(getPrivateItem(E));
1288   }
1289 }
1290 
1291 /// Get list of reduction variables from the teams ... directives.
1292 static void
1293 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1294                       llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
1295   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
1296          "expected teams directive.");
1297   for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1298     for (const Expr *E : C->privates())
1299       Vars.push_back(getPrivateItem(E));
1300   }
1301 }
1302 
1303 llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
1304     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1305     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1306   SourceLocation Loc = D.getBeginLoc();
1307 
1308   const RecordDecl *GlobalizedRD = nullptr;
1309   llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
1310   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
1311   unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
1312   // Globalize team reductions variable unconditionally in all modes.
1313   if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1314     getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
1315   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
1316     getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
1317     if (!LastPrivatesReductions.empty()) {
1318       GlobalizedRD = ::buildRecordForGlobalizedVars(
1319           CGM.getContext(), llvm::None, LastPrivatesReductions,
1320           MappedDeclsFields, WarpSize);
1321     }
1322   } else if (!LastPrivatesReductions.empty()) {
1323     assert(!TeamAndReductions.first &&
1324            "Previous team declaration is not expected.");
1325     TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
1326     std::swap(TeamAndReductions.second, LastPrivatesReductions);
1327   }
1328 
1329   // Emit target region as a standalone region.
1330   class NVPTXPrePostActionTy : public PrePostActionTy {
1331     SourceLocation &Loc;
1332     const RecordDecl *GlobalizedRD;
1333     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1334         &MappedDeclsFields;
1335 
1336   public:
1337     NVPTXPrePostActionTy(
1338         SourceLocation &Loc, const RecordDecl *GlobalizedRD,
1339         llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
1340             &MappedDeclsFields)
1341         : Loc(Loc), GlobalizedRD(GlobalizedRD),
1342           MappedDeclsFields(MappedDeclsFields) {}
1343     void Enter(CodeGenFunction &CGF) override {
1344       auto &Rt =
1345           static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1346       if (GlobalizedRD) {
1347         auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
1348         I->getSecond().MappedParams =
1349             std::make_unique<CodeGenFunction::OMPMapVars>();
1350         DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
1351         for (const auto &Pair : MappedDeclsFields) {
1352           assert(Pair.getFirst()->isCanonicalDecl() &&
1353                  "Expected canonical declaration");
1354           Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
1355         }
1356       }
1357       Rt.emitGenericVarsProlog(CGF, Loc);
1358     }
1359     void Exit(CodeGenFunction &CGF) override {
1360       static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
1361           .emitGenericVarsEpilog(CGF);
1362     }
1363   } Action(Loc, GlobalizedRD, MappedDeclsFields);
1364   CodeGen.setAction(Action);
1365   llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
1366       D, ThreadIDVar, InnermostKind, CodeGen);
1367 
1368   return OutlinedFun;
1369 }
1370 
1371 void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
1372                                                  SourceLocation Loc,
1373                                                  bool WithSPMDCheck) {
1374   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1375       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1376     return;
1377 
1378   CGBuilderTy &Bld = CGF.Builder;
1379 
1380   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1381   if (I == FunctionGlobalizedDecls.end())
1382     return;
1383 
1384   for (auto &Rec : I->getSecond().LocalVarData) {
1385     const auto *VD = cast<VarDecl>(Rec.first);
1386     bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
1387     QualType VarTy = VD->getType();
1388 
1389     // Get the local allocation of a firstprivate variable before sharing
1390     llvm::Value *ParValue;
1391     if (EscapedParam) {
1392       LValue ParLVal =
1393           CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
1394       ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
1395     }
1396 
1397     // Allocate space for the variable to be globalized
1398     llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1399     llvm::Instruction *VoidPtr =
1400         CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1401                                 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1402                             AllocArgs, VD->getName());
1403 
1404     // Cast the void pointer and get the address of the globalized variable.
1405     llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
1406     llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1407         VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
1408     LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
1409     Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
1410     Rec.second.GlobalizedVal = VoidPtr;
1411 
1412     // Assign the local allocation to the newly globalized location.
1413     if (EscapedParam) {
1414       CGF.EmitStoreOfScalar(ParValue, VarAddr);
1415       I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
1416     }
1417     if (auto *DI = CGF.getDebugInfo())
1418       VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
1419   }
1420   for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
1421     // Use actual memory size of the VLA object including the padding
1422     // for alignment purposes.
1423     llvm::Value *Size = CGF.getTypeSize(VD->getType());
1424     CharUnits Align = CGM.getContext().getDeclAlign(VD);
1425     Size = Bld.CreateNUWAdd(
1426         Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
1427     llvm::Value *AlignVal =
1428         llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
1429 
1430     Size = Bld.CreateUDiv(Size, AlignVal);
1431     Size = Bld.CreateNUWMul(Size, AlignVal);
1432 
1433     // Allocate space for this VLA object to be globalized.
1434     llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
1435     llvm::Instruction *VoidPtr =
1436         CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1437                                 CGM.getModule(), OMPRTL___kmpc_alloc_shared),
1438                             AllocArgs, VD->getName());
1439 
1440     I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(VoidPtr);
1441     LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
1442                                      CGM.getContext().getDeclAlign(VD),
1443                                      AlignmentSource::Decl);
1444     I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
1445                                             Base.getAddress(CGF));
1446   }
1447   I->getSecond().MappedParams->apply(CGF);
1448 }
1449 
1450 void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
1451                                                  bool WithSPMDCheck) {
1452   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
1453       getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
1454     return;
1455 
1456   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
1457   if (I != FunctionGlobalizedDecls.end()) {
1458     // Deallocate the memory for each globalized VLA object
1459     for (llvm::Value *Addr :
1460          llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
1461       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1462                               CGM.getModule(), OMPRTL___kmpc_free_shared),
1463                           Addr);
1464     }
1465     // Deallocate the memory for each globalized value
1466     for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
1467       I->getSecond().MappedParams->restore(CGF);
1468 
1469       CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1470                               CGM.getModule(), OMPRTL___kmpc_free_shared),
1471                           {Rec.second.GlobalizedVal});
1472     }
1473   }
1474 }
1475 
1476 void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
1477                                          const OMPExecutableDirective &D,
1478                                          SourceLocation Loc,
1479                                          llvm::Function *OutlinedFn,
1480                                          ArrayRef<llvm::Value *> CapturedVars) {
1481   if (!CGF.HaveInsertPoint())
1482     return;
1483 
1484   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
1485                                                       /*Name=*/".zero.addr");
1486   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
1487   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1488   OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
1489   OutlinedFnArgs.push_back(ZeroAddr.getPointer());
1490   OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1491   emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1492 }
1493 
1494 void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
1495                                           SourceLocation Loc,
1496                                           llvm::Function *OutlinedFn,
1497                                           ArrayRef<llvm::Value *> CapturedVars,
1498                                           const Expr *IfCond) {
1499   if (!CGF.HaveInsertPoint())
1500     return;
1501 
1502   auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
1503                         IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
1504     CGBuilderTy &Bld = CGF.Builder;
1505     llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
1506     llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
1507     if (WFn)
1508       ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
1509     llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
1510 
1511     // Create a private scope that will globalize the arguments
1512     // passed from the outside of the target region.
1513     // TODO: Is that needed?
1514     CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
1515 
1516     Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
1517         llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
1518         "captured_vars_addrs");
1519     // There's something to share.
1520     if (!CapturedVars.empty()) {
1521       // Prepare for parallel region. Indicate the outlined function.
1522       ASTContext &Ctx = CGF.getContext();
1523       unsigned Idx = 0;
1524       for (llvm::Value *V : CapturedVars) {
1525         Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
1526         llvm::Value *PtrV;
1527         if (V->getType()->isIntegerTy())
1528           PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
1529         else
1530           PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
1531         CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
1532                               Ctx.getPointerType(Ctx.VoidPtrTy));
1533         ++Idx;
1534       }
1535     }
1536 
1537     llvm::Value *IfCondVal = nullptr;
1538     if (IfCond)
1539       IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
1540                                     /* isSigned */ false);
1541     else
1542       IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
1543 
1544     assert(IfCondVal && "Expected a value");
1545     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
1546     llvm::Value *Args[] = {
1547         RTLoc,
1548         getThreadID(CGF, Loc),
1549         IfCondVal,
1550         llvm::ConstantInt::get(CGF.Int32Ty, -1),
1551         llvm::ConstantInt::get(CGF.Int32Ty, -1),
1552         FnPtr,
1553         ID,
1554         Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
1555                                    CGF.VoidPtrPtrTy),
1556         llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
1557     CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1558                             CGM.getModule(), OMPRTL___kmpc_parallel_51),
1559                         Args);
1560   };
1561 
1562   RegionCodeGenTy RCG(ParallelGen);
1563   RCG(CGF);
1564 }
1565 
1566 void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
1567   // Always emit simple barriers!
1568   if (!CGF.HaveInsertPoint())
1569     return;
1570   // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
1571   // This function does not use parameters, so we can emit just default values.
1572   llvm::Value *Args[] = {
1573       llvm::ConstantPointerNull::get(
1574           cast<llvm::PointerType>(getIdentTyPointerTy())),
1575       llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
1576   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1577                           CGM.getModule(), OMPRTL___kmpc_barrier_simple_spmd),
1578                       Args);
1579 }
1580 
1581 void CGOpenMPRuntimeGPU::emitBarrierCall(CodeGenFunction &CGF,
1582                                            SourceLocation Loc,
1583                                            OpenMPDirectiveKind Kind, bool,
1584                                            bool) {
1585   // Always emit simple barriers!
1586   if (!CGF.HaveInsertPoint())
1587     return;
1588   // Build call __kmpc_cancel_barrier(loc, thread_id);
1589   unsigned Flags = getDefaultFlagsForBarriers(Kind);
1590   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
1591                          getThreadID(CGF, Loc)};
1592 
1593   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1594                           CGM.getModule(), OMPRTL___kmpc_barrier),
1595                       Args);
1596 }
1597 
1598 void CGOpenMPRuntimeGPU::emitCriticalRegion(
1599     CodeGenFunction &CGF, StringRef CriticalName,
1600     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
1601     const Expr *Hint) {
1602   llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
1603   llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
1604   llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
1605   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
1606   llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
1607 
1608   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
1609 
1610   // Get the mask of active threads in the warp.
1611   llvm::Value *Mask = CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1612       CGM.getModule(), OMPRTL___kmpc_warp_active_thread_mask));
1613   // Fetch team-local id of the thread.
1614   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
1615 
1616   // Get the width of the team.
1617   llvm::Value *TeamWidth = RT.getGPUNumThreads(CGF);
1618 
1619   // Initialize the counter variable for the loop.
1620   QualType Int32Ty =
1621       CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
1622   Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
1623   LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
1624   CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
1625                         /*isInit=*/true);
1626 
1627   // Block checks if loop counter exceeds upper bound.
1628   CGF.EmitBlock(LoopBB);
1629   llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1630   llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
1631   CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
1632 
1633   // Block tests which single thread should execute region, and which threads
1634   // should go straight to synchronisation point.
1635   CGF.EmitBlock(TestBB);
1636   CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
1637   llvm::Value *CmpThreadToCounter =
1638       CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
1639   CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
1640 
1641   // Block emits the body of the critical region.
1642   CGF.EmitBlock(BodyBB);
1643 
1644   // Output the critical statement.
1645   CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
1646                                       Hint);
1647 
1648   // After the body surrounded by the critical region, the single executing
1649   // thread will jump to the synchronisation point.
1650   // Block waits for all threads in current team to finish then increments the
1651   // counter variable and returns to the loop.
1652   CGF.EmitBlock(SyncBB);
1653   // Reconverge active threads in the warp.
1654   (void)CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
1655                                 CGM.getModule(), OMPRTL___kmpc_syncwarp),
1656                             Mask);
1657 
1658   llvm::Value *IncCounterVal =
1659       CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
1660   CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
1661   CGF.EmitBranch(LoopBB);
1662 
1663   // Block that is reached when  all threads in the team complete the region.
1664   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1665 }
1666 
1667 /// Cast value to the specified type.
1668 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
1669                                     QualType ValTy, QualType CastTy,
1670                                     SourceLocation Loc) {
1671   assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
1672          "Cast type must sized.");
1673   assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
1674          "Val type must sized.");
1675   llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
1676   if (ValTy == CastTy)
1677     return Val;
1678   if (CGF.getContext().getTypeSizeInChars(ValTy) ==
1679       CGF.getContext().getTypeSizeInChars(CastTy))
1680     return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
1681   if (CastTy->isIntegerType() && ValTy->isIntegerType())
1682     return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
1683                                      CastTy->hasSignedIntegerRepresentation());
1684   Address CastItem = CGF.CreateMemTemp(CastTy);
1685   Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1686       CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
1687   CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
1688                         LValueBaseInfo(AlignmentSource::Type),
1689                         TBAAAccessInfo());
1690   return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc,
1691                               LValueBaseInfo(AlignmentSource::Type),
1692                               TBAAAccessInfo());
1693 }
1694 
1695 /// This function creates calls to one of two shuffle functions to copy
1696 /// variables between lanes in a warp.
1697 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
1698                                                  llvm::Value *Elem,
1699                                                  QualType ElemType,
1700                                                  llvm::Value *Offset,
1701                                                  SourceLocation Loc) {
1702   CodeGenModule &CGM = CGF.CGM;
1703   CGBuilderTy &Bld = CGF.Builder;
1704   CGOpenMPRuntimeGPU &RT =
1705       *(static_cast<CGOpenMPRuntimeGPU *>(&CGM.getOpenMPRuntime()));
1706   llvm::OpenMPIRBuilder &OMPBuilder = RT.getOMPBuilder();
1707 
1708   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1709   assert(Size.getQuantity() <= 8 &&
1710          "Unsupported bitwidth in shuffle instruction.");
1711 
1712   RuntimeFunction ShuffleFn = Size.getQuantity() <= 4
1713                                   ? OMPRTL___kmpc_shuffle_int32
1714                                   : OMPRTL___kmpc_shuffle_int64;
1715 
1716   // Cast all types to 32- or 64-bit values before calling shuffle routines.
1717   QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
1718       Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
1719   llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
1720   llvm::Value *WarpSize =
1721       Bld.CreateIntCast(RT.getGPUWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
1722 
1723   llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
1724       OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), ShuffleFn),
1725       {ElemCast, Offset, WarpSize});
1726 
1727   return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
1728 }
1729 
1730 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
1731                             Address DestAddr, QualType ElemType,
1732                             llvm::Value *Offset, SourceLocation Loc) {
1733   CGBuilderTy &Bld = CGF.Builder;
1734 
1735   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
1736   // Create the loop over the big sized data.
1737   // ptr = (void*)Elem;
1738   // ptrEnd = (void*) Elem + 1;
1739   // Step = 8;
1740   // while (ptr + Step < ptrEnd)
1741   //   shuffle((int64_t)*ptr);
1742   // Step = 4;
1743   // while (ptr + Step < ptrEnd)
1744   //   shuffle((int32_t)*ptr);
1745   // ...
1746   Address ElemPtr = DestAddr;
1747   Address Ptr = SrcAddr;
1748   Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
1749       Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
1750   for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
1751     if (Size < CharUnits::fromQuantity(IntSize))
1752       continue;
1753     QualType IntType = CGF.getContext().getIntTypeForBitwidth(
1754         CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
1755         /*Signed=*/1);
1756     llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
1757     Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
1758     ElemPtr =
1759         Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
1760     if (Size.getQuantity() / IntSize > 1) {
1761       llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
1762       llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
1763       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
1764       llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
1765       CGF.EmitBlock(PreCondBB);
1766       llvm::PHINode *PhiSrc =
1767           Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
1768       PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
1769       llvm::PHINode *PhiDest =
1770           Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
1771       PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
1772       Ptr = Address(PhiSrc, Ptr.getAlignment());
1773       ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
1774       llvm::Value *PtrDiff = Bld.CreatePtrDiff(
1775           PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
1776                                    Ptr.getPointer(), CGF.VoidPtrTy));
1777       Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
1778                        ThenBB, ExitBB);
1779       CGF.EmitBlock(ThenBB);
1780       llvm::Value *Res = createRuntimeShuffleFunction(
1781           CGF,
1782           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1783                                LValueBaseInfo(AlignmentSource::Type),
1784                                TBAAAccessInfo()),
1785           IntType, Offset, Loc);
1786       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1787                             LValueBaseInfo(AlignmentSource::Type),
1788                             TBAAAccessInfo());
1789       Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
1790       Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1791       PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
1792       PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
1793       CGF.EmitBranch(PreCondBB);
1794       CGF.EmitBlock(ExitBB);
1795     } else {
1796       llvm::Value *Res = createRuntimeShuffleFunction(
1797           CGF,
1798           CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc,
1799                                LValueBaseInfo(AlignmentSource::Type),
1800                                TBAAAccessInfo()),
1801           IntType, Offset, Loc);
1802       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType,
1803                             LValueBaseInfo(AlignmentSource::Type),
1804                             TBAAAccessInfo());
1805       Ptr = Bld.CreateConstGEP(Ptr, 1);
1806       ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
1807     }
1808     Size = Size % IntSize;
1809   }
1810 }
1811 
1812 namespace {
1813 enum CopyAction : unsigned {
1814   // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1815   // the warp using shuffle instructions.
1816   RemoteLaneToThread,
1817   // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1818   ThreadCopy,
1819   // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1820   ThreadToScratchpad,
1821   // ScratchpadToThread: Copy from a scratchpad array in global memory
1822   // containing team-reduced data to a thread's stack.
1823   ScratchpadToThread,
1824 };
1825 } // namespace
1826 
1827 struct CopyOptionsTy {
1828   llvm::Value *RemoteLaneOffset;
1829   llvm::Value *ScratchpadIndex;
1830   llvm::Value *ScratchpadWidth;
1831 };
1832 
1833 /// Emit instructions to copy a Reduce list, which contains partially
1834 /// aggregated values, in the specified direction.
1835 static void emitReductionListCopy(
1836     CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1837     ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1838     CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1839 
1840   CodeGenModule &CGM = CGF.CGM;
1841   ASTContext &C = CGM.getContext();
1842   CGBuilderTy &Bld = CGF.Builder;
1843 
1844   llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1845   llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1846   llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1847 
1848   // Iterates, element-by-element, through the source Reduce list and
1849   // make a copy.
1850   unsigned Idx = 0;
1851   unsigned Size = Privates.size();
1852   for (const Expr *Private : Privates) {
1853     Address SrcElementAddr = Address::invalid();
1854     Address DestElementAddr = Address::invalid();
1855     Address DestElementPtrAddr = Address::invalid();
1856     // Should we shuffle in an element from a remote lane?
1857     bool ShuffleInElement = false;
1858     // Set to true to update the pointer in the dest Reduce list to a
1859     // newly created element.
1860     bool UpdateDestListPtr = false;
1861     // Increment the src or dest pointer to the scratchpad, for each
1862     // new element.
1863     bool IncrScratchpadSrc = false;
1864     bool IncrScratchpadDest = false;
1865 
1866     switch (Action) {
1867     case RemoteLaneToThread: {
1868       // Step 1.1: Get the address for the src element in the Reduce list.
1869       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1870       SrcElementAddr = CGF.EmitLoadOfPointer(
1871           SrcElementPtrAddr,
1872           C.getPointerType(Private->getType())->castAs<PointerType>());
1873 
1874       // Step 1.2: Create a temporary to store the element in the destination
1875       // Reduce list.
1876       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1877       DestElementAddr =
1878           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1879       ShuffleInElement = true;
1880       UpdateDestListPtr = true;
1881       break;
1882     }
1883     case ThreadCopy: {
1884       // Step 1.1: Get the address for the src element in the Reduce list.
1885       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1886       SrcElementAddr = CGF.EmitLoadOfPointer(
1887           SrcElementPtrAddr,
1888           C.getPointerType(Private->getType())->castAs<PointerType>());
1889 
1890       // Step 1.2: Get the address for dest element.  The destination
1891       // element has already been created on the thread's stack.
1892       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1893       DestElementAddr = CGF.EmitLoadOfPointer(
1894           DestElementPtrAddr,
1895           C.getPointerType(Private->getType())->castAs<PointerType>());
1896       break;
1897     }
1898     case ThreadToScratchpad: {
1899       // Step 1.1: Get the address for the src element in the Reduce list.
1900       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
1901       SrcElementAddr = CGF.EmitLoadOfPointer(
1902           SrcElementPtrAddr,
1903           C.getPointerType(Private->getType())->castAs<PointerType>());
1904 
1905       // Step 1.2: Get the address for dest element:
1906       // address = base + index * ElementSizeInChars.
1907       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1908       llvm::Value *CurrentOffset =
1909           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1910       llvm::Value *ScratchPadElemAbsolutePtrVal =
1911           Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
1912       ScratchPadElemAbsolutePtrVal =
1913           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1914       DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1915                                 C.getTypeAlignInChars(Private->getType()));
1916       IncrScratchpadDest = true;
1917       break;
1918     }
1919     case ScratchpadToThread: {
1920       // Step 1.1: Get the address for the src element in the scratchpad.
1921       // address = base + index * ElementSizeInChars.
1922       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
1923       llvm::Value *CurrentOffset =
1924           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
1925       llvm::Value *ScratchPadElemAbsolutePtrVal =
1926           Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
1927       ScratchPadElemAbsolutePtrVal =
1928           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1929       SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1930                                C.getTypeAlignInChars(Private->getType()));
1931       IncrScratchpadSrc = true;
1932 
1933       // Step 1.2: Create a temporary to store the element in the destination
1934       // Reduce list.
1935       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
1936       DestElementAddr =
1937           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1938       UpdateDestListPtr = true;
1939       break;
1940     }
1941     }
1942 
1943     // Regardless of src and dest of copy, we emit the load of src
1944     // element as this is required in all directions
1945     SrcElementAddr = Bld.CreateElementBitCast(
1946         SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1947     DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
1948                                                SrcElementAddr.getElementType());
1949 
1950     // Now that all active lanes have read the element in the
1951     // Reduce list, shuffle over the value from the remote lane.
1952     if (ShuffleInElement) {
1953       shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
1954                       RemoteLaneOffset, Private->getExprLoc());
1955     } else {
1956       switch (CGF.getEvaluationKind(Private->getType())) {
1957       case TEK_Scalar: {
1958         llvm::Value *Elem = CGF.EmitLoadOfScalar(
1959             SrcElementAddr, /*Volatile=*/false, Private->getType(),
1960             Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type),
1961             TBAAAccessInfo());
1962         // Store the source element value to the dest element address.
1963         CGF.EmitStoreOfScalar(
1964             Elem, DestElementAddr, /*Volatile=*/false, Private->getType(),
1965             LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
1966         break;
1967       }
1968       case TEK_Complex: {
1969         CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
1970             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1971             Private->getExprLoc());
1972         CGF.EmitStoreOfComplex(
1973             Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1974             /*isInit=*/false);
1975         break;
1976       }
1977       case TEK_Aggregate:
1978         CGF.EmitAggregateCopy(
1979             CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
1980             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
1981             Private->getType(), AggValueSlot::DoesNotOverlap);
1982         break;
1983       }
1984     }
1985 
1986     // Step 3.1: Modify reference in dest Reduce list as needed.
1987     // Modifying the reference in Reduce list to point to the newly
1988     // created element.  The element is live in the current function
1989     // scope and that of functions it invokes (i.e., reduce_function).
1990     // RemoteReduceData[i] = (void*)&RemoteElem
1991     if (UpdateDestListPtr) {
1992       CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
1993                                 DestElementAddr.getPointer(), CGF.VoidPtrTy),
1994                             DestElementPtrAddr, /*Volatile=*/false,
1995                             C.VoidPtrTy);
1996     }
1997 
1998     // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
1999     // address of the next element in scratchpad memory, unless we're currently
2000     // processing the last one.  Memory alignment is also taken care of here.
2001     if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
2002       llvm::Value *ScratchpadBasePtr =
2003           IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
2004       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
2005       ScratchpadBasePtr = Bld.CreateNUWAdd(
2006           ScratchpadBasePtr,
2007           Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
2008 
2009       // Take care of global memory alignment for performance
2010       ScratchpadBasePtr = Bld.CreateNUWSub(
2011           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2012       ScratchpadBasePtr = Bld.CreateUDiv(
2013           ScratchpadBasePtr,
2014           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2015       ScratchpadBasePtr = Bld.CreateNUWAdd(
2016           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
2017       ScratchpadBasePtr = Bld.CreateNUWMul(
2018           ScratchpadBasePtr,
2019           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
2020 
2021       if (IncrScratchpadDest)
2022         DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2023       else /* IncrScratchpadSrc = true */
2024         SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
2025     }
2026 
2027     ++Idx;
2028   }
2029 }
2030 
2031 /// This function emits a helper that gathers Reduce lists from the first
2032 /// lane of every active warp to lanes in the first warp.
2033 ///
2034 /// void inter_warp_copy_func(void* reduce_data, num_warps)
2035 ///   shared smem[warp_size];
2036 ///   For all data entries D in reduce_data:
2037 ///     sync
2038 ///     If (I am the first lane in each warp)
2039 ///       Copy my local D to smem[warp_id]
2040 ///     sync
2041 ///     if (I am the first warp)
2042 ///       Copy smem[thread_id] to my local D
2043 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
2044                                               ArrayRef<const Expr *> Privates,
2045                                               QualType ReductionArrayTy,
2046                                               SourceLocation Loc) {
2047   ASTContext &C = CGM.getContext();
2048   llvm::Module &M = CGM.getModule();
2049 
2050   // ReduceList: thread local Reduce list.
2051   // At the stage of the computation when this function is called, partially
2052   // aggregated values reside in the first lane of every active warp.
2053   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2054                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2055   // NumWarps: number of warps active in the parallel region.  This could
2056   // be smaller than 32 (max warps in a CTA) for partial block reduction.
2057   ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2058                                 C.getIntTypeForBitwidth(32, /* Signed */ true),
2059                                 ImplicitParamDecl::Other);
2060   FunctionArgList Args;
2061   Args.push_back(&ReduceListArg);
2062   Args.push_back(&NumWarpsArg);
2063 
2064   const CGFunctionInfo &CGFI =
2065       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2066   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
2067                                     llvm::GlobalValue::InternalLinkage,
2068                                     "_omp_reduction_inter_warp_copy_func", &M);
2069   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2070   Fn->setDoesNotRecurse();
2071   CodeGenFunction CGF(CGM);
2072   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2073 
2074   CGBuilderTy &Bld = CGF.Builder;
2075 
2076   // This array is used as a medium to transfer, one reduce element at a time,
2077   // the data from the first lane of every warp to lanes in the first warp
2078   // in order to perform the final step of a reduction in a parallel region
2079   // (reduction across warps).  The array is placed in NVPTX __shared__ memory
2080   // for reduced latency, as well as to have a distinct copy for concurrently
2081   // executing target regions.  The array is declared with common linkage so
2082   // as to be shared across compilation units.
2083   StringRef TransferMediumName =
2084       "__openmp_nvptx_data_transfer_temporary_storage";
2085   llvm::GlobalVariable *TransferMedium =
2086       M.getGlobalVariable(TransferMediumName);
2087   unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
2088   if (!TransferMedium) {
2089     auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
2090     unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
2091     TransferMedium = new llvm::GlobalVariable(
2092         M, Ty, /*isConstant=*/false, llvm::GlobalVariable::WeakAnyLinkage,
2093         llvm::UndefValue::get(Ty), TransferMediumName,
2094         /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
2095         SharedAddressSpace);
2096     CGM.addCompilerUsedGlobal(TransferMedium);
2097   }
2098 
2099   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
2100   // Get the CUDA thread id of the current OpenMP thread on the GPU.
2101   llvm::Value *ThreadID = RT.getGPUThreadID(CGF);
2102   // nvptx_lane_id = nvptx_id % warpsize
2103   llvm::Value *LaneID = getNVPTXLaneID(CGF);
2104   // nvptx_warp_id = nvptx_id / warpsize
2105   llvm::Value *WarpID = getNVPTXWarpID(CGF);
2106 
2107   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2108   Address LocalReduceList(
2109       Bld.CreatePointerBitCastOrAddrSpaceCast(
2110           CGF.EmitLoadOfScalar(
2111               AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
2112               LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
2113           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2114       CGF.getPointerAlign());
2115 
2116   unsigned Idx = 0;
2117   for (const Expr *Private : Privates) {
2118     //
2119     // Warp master copies reduce element to transfer medium in __shared__
2120     // memory.
2121     //
2122     unsigned RealTySize =
2123         C.getTypeSizeInChars(Private->getType())
2124             .alignTo(C.getTypeAlignInChars(Private->getType()))
2125             .getQuantity();
2126     for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
2127       unsigned NumIters = RealTySize / TySize;
2128       if (NumIters == 0)
2129         continue;
2130       QualType CType = C.getIntTypeForBitwidth(
2131           C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
2132       llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
2133       CharUnits Align = CharUnits::fromQuantity(TySize);
2134       llvm::Value *Cnt = nullptr;
2135       Address CntAddr = Address::invalid();
2136       llvm::BasicBlock *PrecondBB = nullptr;
2137       llvm::BasicBlock *ExitBB = nullptr;
2138       if (NumIters > 1) {
2139         CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
2140         CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
2141                               /*Volatile=*/false, C.IntTy);
2142         PrecondBB = CGF.createBasicBlock("precond");
2143         ExitBB = CGF.createBasicBlock("exit");
2144         llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
2145         // There is no need to emit line number for unconditional branch.
2146         (void)ApplyDebugLocation::CreateEmpty(CGF);
2147         CGF.EmitBlock(PrecondBB);
2148         Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
2149         llvm::Value *Cmp =
2150             Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
2151         Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
2152         CGF.EmitBlock(BodyBB);
2153       }
2154       // kmpc_barrier.
2155       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2156                                              /*EmitChecks=*/false,
2157                                              /*ForceSimpleCall=*/true);
2158       llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2159       llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2160       llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2161 
2162       // if (lane_id == 0)
2163       llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
2164       Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
2165       CGF.EmitBlock(ThenBB);
2166 
2167       // Reduce element = LocalReduceList[i]
2168       Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2169       llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2170           ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2171       // elemptr = ((CopyType*)(elemptrptr)) + I
2172       Address ElemPtr = Address(ElemPtrPtr, Align);
2173       ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
2174       if (NumIters > 1) {
2175         ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
2176                           ElemPtr.getAlignment());
2177       }
2178 
2179       // Get pointer to location in transfer medium.
2180       // MediumPtr = &medium[warp_id]
2181       llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
2182           TransferMedium->getValueType(), TransferMedium,
2183           {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
2184       Address MediumPtr(MediumPtrVal, Align);
2185       // Casting to actual data type.
2186       // MediumPtr = (CopyType*)MediumPtrAddr;
2187       MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
2188 
2189       // elem = *elemptr
2190       //*MediumPtr = elem
2191       llvm::Value *Elem = CGF.EmitLoadOfScalar(
2192           ElemPtr, /*Volatile=*/false, CType, Loc,
2193           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2194       // Store the source element value to the dest element address.
2195       CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType,
2196                             LValueBaseInfo(AlignmentSource::Type),
2197                             TBAAAccessInfo());
2198 
2199       Bld.CreateBr(MergeBB);
2200 
2201       CGF.EmitBlock(ElseBB);
2202       Bld.CreateBr(MergeBB);
2203 
2204       CGF.EmitBlock(MergeBB);
2205 
2206       // kmpc_barrier.
2207       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
2208                                              /*EmitChecks=*/false,
2209                                              /*ForceSimpleCall=*/true);
2210 
2211       //
2212       // Warp 0 copies reduce element from transfer medium.
2213       //
2214       llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
2215       llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
2216       llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
2217 
2218       Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
2219       llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
2220           AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
2221 
2222       // Up to 32 threads in warp 0 are active.
2223       llvm::Value *IsActiveThread =
2224           Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
2225       Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
2226 
2227       CGF.EmitBlock(W0ThenBB);
2228 
2229       // SrcMediumPtr = &medium[tid]
2230       llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
2231           TransferMedium->getValueType(), TransferMedium,
2232           {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
2233       Address SrcMediumPtr(SrcMediumPtrVal, Align);
2234       // SrcMediumVal = *SrcMediumPtr;
2235       SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
2236 
2237       // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
2238       Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2239       llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
2240           TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
2241       Address TargetElemPtr = Address(TargetElemPtrVal, Align);
2242       TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
2243       if (NumIters > 1) {
2244         TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
2245                                 TargetElemPtr.getAlignment());
2246       }
2247 
2248       // *TargetElemPtr = SrcMediumVal;
2249       llvm::Value *SrcMediumValue =
2250           CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
2251       CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
2252                             CType);
2253       Bld.CreateBr(W0MergeBB);
2254 
2255       CGF.EmitBlock(W0ElseBB);
2256       Bld.CreateBr(W0MergeBB);
2257 
2258       CGF.EmitBlock(W0MergeBB);
2259 
2260       if (NumIters > 1) {
2261         Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
2262         CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
2263         CGF.EmitBranch(PrecondBB);
2264         (void)ApplyDebugLocation::CreateEmpty(CGF);
2265         CGF.EmitBlock(ExitBB);
2266       }
2267       RealTySize %= TySize;
2268     }
2269     ++Idx;
2270   }
2271 
2272   CGF.FinishFunction();
2273   return Fn;
2274 }
2275 
2276 /// Emit a helper that reduces data across two OpenMP threads (lanes)
2277 /// in the same warp.  It uses shuffle instructions to copy over data from
2278 /// a remote lane's stack.  The reduction algorithm performed is specified
2279 /// by the fourth parameter.
2280 ///
2281 /// Algorithm Versions.
2282 /// Full Warp Reduce (argument value 0):
2283 ///   This algorithm assumes that all 32 lanes are active and gathers
2284 ///   data from these 32 lanes, producing a single resultant value.
2285 /// Contiguous Partial Warp Reduce (argument value 1):
2286 ///   This algorithm assumes that only a *contiguous* subset of lanes
2287 ///   are active.  This happens for the last warp in a parallel region
2288 ///   when the user specified num_threads is not an integer multiple of
2289 ///   32.  This contiguous subset always starts with the zeroth lane.
2290 /// Partial Warp Reduce (argument value 2):
2291 ///   This algorithm gathers data from any number of lanes at any position.
2292 /// All reduced values are stored in the lowest possible lane.  The set
2293 /// of problems every algorithm addresses is a super set of those
2294 /// addressable by algorithms with a lower version number.  Overhead
2295 /// increases as algorithm version increases.
2296 ///
2297 /// Terminology
2298 /// Reduce element:
2299 ///   Reduce element refers to the individual data field with primitive
2300 ///   data types to be combined and reduced across threads.
2301 /// Reduce list:
2302 ///   Reduce list refers to a collection of local, thread-private
2303 ///   reduce elements.
2304 /// Remote Reduce list:
2305 ///   Remote Reduce list refers to a collection of remote (relative to
2306 ///   the current thread) reduce elements.
2307 ///
2308 /// We distinguish between three states of threads that are important to
2309 /// the implementation of this function.
2310 /// Alive threads:
2311 ///   Threads in a warp executing the SIMT instruction, as distinguished from
2312 ///   threads that are inactive due to divergent control flow.
2313 /// Active threads:
2314 ///   The minimal set of threads that has to be alive upon entry to this
2315 ///   function.  The computation is correct iff active threads are alive.
2316 ///   Some threads are alive but they are not active because they do not
2317 ///   contribute to the computation in any useful manner.  Turning them off
2318 ///   may introduce control flow overheads without any tangible benefits.
2319 /// Effective threads:
2320 ///   In order to comply with the argument requirements of the shuffle
2321 ///   function, we must keep all lanes holding data alive.  But at most
2322 ///   half of them perform value aggregation; we refer to this half of
2323 ///   threads as effective. The other half is simply handing off their
2324 ///   data.
2325 ///
2326 /// Procedure
2327 /// Value shuffle:
2328 ///   In this step active threads transfer data from higher lane positions
2329 ///   in the warp to lower lane positions, creating Remote Reduce list.
2330 /// Value aggregation:
2331 ///   In this step, effective threads combine their thread local Reduce list
2332 ///   with Remote Reduce list and store the result in the thread local
2333 ///   Reduce list.
2334 /// Value copy:
2335 ///   In this step, we deal with the assumption made by algorithm 2
2336 ///   (i.e. contiguity assumption).  When we have an odd number of lanes
2337 ///   active, say 2k+1, only k threads will be effective and therefore k
2338 ///   new values will be produced.  However, the Reduce list owned by the
2339 ///   (2k+1)th thread is ignored in the value aggregation.  Therefore
2340 ///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
2341 ///   that the contiguity assumption still holds.
2342 static llvm::Function *emitShuffleAndReduceFunction(
2343     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2344     QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
2345   ASTContext &C = CGM.getContext();
2346 
2347   // Thread local Reduce list used to host the values of data to be reduced.
2348   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2349                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2350   // Current lane id; could be logical.
2351   ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
2352                               ImplicitParamDecl::Other);
2353   // Offset of the remote source lane relative to the current lane.
2354   ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2355                                         C.ShortTy, ImplicitParamDecl::Other);
2356   // Algorithm version.  This is expected to be known at compile time.
2357   ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2358                                C.ShortTy, ImplicitParamDecl::Other);
2359   FunctionArgList Args;
2360   Args.push_back(&ReduceListArg);
2361   Args.push_back(&LaneIDArg);
2362   Args.push_back(&RemoteLaneOffsetArg);
2363   Args.push_back(&AlgoVerArg);
2364 
2365   const CGFunctionInfo &CGFI =
2366       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2367   auto *Fn = llvm::Function::Create(
2368       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2369       "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
2370   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2371   Fn->setDoesNotRecurse();
2372 
2373   CodeGenFunction CGF(CGM);
2374   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2375 
2376   CGBuilderTy &Bld = CGF.Builder;
2377 
2378   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2379   Address LocalReduceList(
2380       Bld.CreatePointerBitCastOrAddrSpaceCast(
2381           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2382                                C.VoidPtrTy, SourceLocation()),
2383           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2384       CGF.getPointerAlign());
2385 
2386   Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
2387   llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
2388       AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2389 
2390   Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
2391   llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
2392       AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2393 
2394   Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
2395   llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
2396       AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
2397 
2398   // Create a local thread-private variable to host the Reduce list
2399   // from a remote lane.
2400   Address RemoteReduceList =
2401       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
2402 
2403   // This loop iterates through the list of reduce elements and copies,
2404   // element by element, from a remote lane in the warp to RemoteReduceList,
2405   // hosted on the thread's stack.
2406   emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
2407                         LocalReduceList, RemoteReduceList,
2408                         {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
2409                          /*ScratchpadIndex=*/nullptr,
2410                          /*ScratchpadWidth=*/nullptr});
2411 
2412   // The actions to be performed on the Remote Reduce list is dependent
2413   // on the algorithm version.
2414   //
2415   //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
2416   //  LaneId % 2 == 0 && Offset > 0):
2417   //    do the reduction value aggregation
2418   //
2419   //  The thread local variable Reduce list is mutated in place to host the
2420   //  reduced data, which is the aggregated value produced from local and
2421   //  remote lanes.
2422   //
2423   //  Note that AlgoVer is expected to be a constant integer known at compile
2424   //  time.
2425   //  When AlgoVer==0, the first conjunction evaluates to true, making
2426   //    the entire predicate true during compile time.
2427   //  When AlgoVer==1, the second conjunction has only the second part to be
2428   //    evaluated during runtime.  Other conjunctions evaluates to false
2429   //    during compile time.
2430   //  When AlgoVer==2, the third conjunction has only the second part to be
2431   //    evaluated during runtime.  Other conjunctions evaluates to false
2432   //    during compile time.
2433   llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
2434 
2435   llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2436   llvm::Value *CondAlgo1 = Bld.CreateAnd(
2437       Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
2438 
2439   llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
2440   llvm::Value *CondAlgo2 = Bld.CreateAnd(
2441       Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
2442   CondAlgo2 = Bld.CreateAnd(
2443       CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
2444 
2445   llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
2446   CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
2447 
2448   llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
2449   llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
2450   llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
2451   Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
2452 
2453   CGF.EmitBlock(ThenBB);
2454   // reduce_function(LocalReduceList, RemoteReduceList)
2455   llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2456       LocalReduceList.getPointer(), CGF.VoidPtrTy);
2457   llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2458       RemoteReduceList.getPointer(), CGF.VoidPtrTy);
2459   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2460       CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
2461   Bld.CreateBr(MergeBB);
2462 
2463   CGF.EmitBlock(ElseBB);
2464   Bld.CreateBr(MergeBB);
2465 
2466   CGF.EmitBlock(MergeBB);
2467 
2468   // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
2469   // Reduce list.
2470   Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
2471   llvm::Value *CondCopy = Bld.CreateAnd(
2472       Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
2473 
2474   llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
2475   llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
2476   llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
2477   Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
2478 
2479   CGF.EmitBlock(CpyThenBB);
2480   emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
2481                         RemoteReduceList, LocalReduceList);
2482   Bld.CreateBr(CpyMergeBB);
2483 
2484   CGF.EmitBlock(CpyElseBB);
2485   Bld.CreateBr(CpyMergeBB);
2486 
2487   CGF.EmitBlock(CpyMergeBB);
2488 
2489   CGF.FinishFunction();
2490   return Fn;
2491 }
2492 
2493 /// This function emits a helper that copies all the reduction variables from
2494 /// the team into the provided global buffer for the reduction variables.
2495 ///
2496 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2497 ///   For all data entries D in reduce_data:
2498 ///     Copy local D to buffer.D[Idx]
2499 static llvm::Value *emitListToGlobalCopyFunction(
2500     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2501     QualType ReductionArrayTy, SourceLocation Loc,
2502     const RecordDecl *TeamReductionRec,
2503     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2504         &VarFieldMap) {
2505   ASTContext &C = CGM.getContext();
2506 
2507   // Buffer: global reduction buffer.
2508   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2509                               C.VoidPtrTy, ImplicitParamDecl::Other);
2510   // Idx: index of the buffer.
2511   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2512                            ImplicitParamDecl::Other);
2513   // ReduceList: thread local Reduce list.
2514   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2515                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2516   FunctionArgList Args;
2517   Args.push_back(&BufferArg);
2518   Args.push_back(&IdxArg);
2519   Args.push_back(&ReduceListArg);
2520 
2521   const CGFunctionInfo &CGFI =
2522       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2523   auto *Fn = llvm::Function::Create(
2524       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2525       "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
2526   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2527   Fn->setDoesNotRecurse();
2528   CodeGenFunction CGF(CGM);
2529   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2530 
2531   CGBuilderTy &Bld = CGF.Builder;
2532 
2533   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2534   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2535   Address LocalReduceList(
2536       Bld.CreatePointerBitCastOrAddrSpaceCast(
2537           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2538                                C.VoidPtrTy, Loc),
2539           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2540       CGF.getPointerAlign());
2541   QualType StaticTy = C.getRecordType(TeamReductionRec);
2542   llvm::Type *LLVMReductionsBufferTy =
2543       CGM.getTypes().ConvertTypeForMem(StaticTy);
2544   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2545       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2546       LLVMReductionsBufferTy->getPointerTo());
2547   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2548                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2549                                               /*Volatile=*/false, C.IntTy,
2550                                               Loc)};
2551   unsigned Idx = 0;
2552   for (const Expr *Private : Privates) {
2553     // Reduce element = LocalReduceList[i]
2554     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2555     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2556         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2557     // elemptr = ((CopyType*)(elemptrptr)) + I
2558     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2559         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2560     Address ElemPtr =
2561         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2562     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2563     // Global = Buffer.VD[Idx];
2564     const FieldDecl *FD = VarFieldMap.lookup(VD);
2565     LValue GlobLVal = CGF.EmitLValueForField(
2566         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2567     Address GlobAddr = GlobLVal.getAddress(CGF);
2568     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2569         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2570     GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2571     switch (CGF.getEvaluationKind(Private->getType())) {
2572     case TEK_Scalar: {
2573       llvm::Value *V = CGF.EmitLoadOfScalar(
2574           ElemPtr, /*Volatile=*/false, Private->getType(), Loc,
2575           LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo());
2576       CGF.EmitStoreOfScalar(V, GlobLVal);
2577       break;
2578     }
2579     case TEK_Complex: {
2580       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
2581           CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
2582       CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
2583       break;
2584     }
2585     case TEK_Aggregate:
2586       CGF.EmitAggregateCopy(GlobLVal,
2587                             CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2588                             Private->getType(), AggValueSlot::DoesNotOverlap);
2589       break;
2590     }
2591     ++Idx;
2592   }
2593 
2594   CGF.FinishFunction();
2595   return Fn;
2596 }
2597 
2598 /// This function emits a helper that reduces all the reduction variables from
2599 /// the team into the provided global buffer for the reduction variables.
2600 ///
2601 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
2602 ///  void *GlobPtrs[];
2603 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2604 ///  ...
2605 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2606 ///  reduce_function(GlobPtrs, reduce_data);
2607 static llvm::Value *emitListToGlobalReduceFunction(
2608     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2609     QualType ReductionArrayTy, SourceLocation Loc,
2610     const RecordDecl *TeamReductionRec,
2611     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2612         &VarFieldMap,
2613     llvm::Function *ReduceFn) {
2614   ASTContext &C = CGM.getContext();
2615 
2616   // Buffer: global reduction buffer.
2617   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2618                               C.VoidPtrTy, ImplicitParamDecl::Other);
2619   // Idx: index of the buffer.
2620   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2621                            ImplicitParamDecl::Other);
2622   // ReduceList: thread local Reduce list.
2623   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2624                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2625   FunctionArgList Args;
2626   Args.push_back(&BufferArg);
2627   Args.push_back(&IdxArg);
2628   Args.push_back(&ReduceListArg);
2629 
2630   const CGFunctionInfo &CGFI =
2631       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2632   auto *Fn = llvm::Function::Create(
2633       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2634       "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
2635   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2636   Fn->setDoesNotRecurse();
2637   CodeGenFunction CGF(CGM);
2638   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2639 
2640   CGBuilderTy &Bld = CGF.Builder;
2641 
2642   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2643   QualType StaticTy = C.getRecordType(TeamReductionRec);
2644   llvm::Type *LLVMReductionsBufferTy =
2645       CGM.getTypes().ConvertTypeForMem(StaticTy);
2646   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2647       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2648       LLVMReductionsBufferTy->getPointerTo());
2649 
2650   // 1. Build a list of reduction variables.
2651   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2652   Address ReductionList =
2653       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2654   auto IPriv = Privates.begin();
2655   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2656                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2657                                               /*Volatile=*/false, C.IntTy,
2658                                               Loc)};
2659   unsigned Idx = 0;
2660   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2661     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2662     // Global = Buffer.VD[Idx];
2663     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2664     const FieldDecl *FD = VarFieldMap.lookup(VD);
2665     LValue GlobLVal = CGF.EmitLValueForField(
2666         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2667     Address GlobAddr = GlobLVal.getAddress(CGF);
2668     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2669         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2670     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2671     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2672     if ((*IPriv)->getType()->isVariablyModifiedType()) {
2673       // Store array size.
2674       ++Idx;
2675       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2676       llvm::Value *Size = CGF.Builder.CreateIntCast(
2677           CGF.getVLASize(
2678                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2679               .NumElts,
2680           CGF.SizeTy, /*isSigned=*/false);
2681       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2682                               Elem);
2683     }
2684   }
2685 
2686   // Call reduce_function(GlobalReduceList, ReduceList)
2687   llvm::Value *GlobalReduceList =
2688       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2689   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2690   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2691       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2692   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2693       CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
2694   CGF.FinishFunction();
2695   return Fn;
2696 }
2697 
2698 /// This function emits a helper that copies all the reduction variables from
2699 /// the team into the provided global buffer for the reduction variables.
2700 ///
2701 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
2702 ///   For all data entries D in reduce_data:
2703 ///     Copy buffer.D[Idx] to local D;
2704 static llvm::Value *emitGlobalToListCopyFunction(
2705     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2706     QualType ReductionArrayTy, SourceLocation Loc,
2707     const RecordDecl *TeamReductionRec,
2708     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2709         &VarFieldMap) {
2710   ASTContext &C = CGM.getContext();
2711 
2712   // Buffer: global reduction buffer.
2713   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2714                               C.VoidPtrTy, ImplicitParamDecl::Other);
2715   // Idx: index of the buffer.
2716   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2717                            ImplicitParamDecl::Other);
2718   // ReduceList: thread local Reduce list.
2719   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2720                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2721   FunctionArgList Args;
2722   Args.push_back(&BufferArg);
2723   Args.push_back(&IdxArg);
2724   Args.push_back(&ReduceListArg);
2725 
2726   const CGFunctionInfo &CGFI =
2727       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2728   auto *Fn = llvm::Function::Create(
2729       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2730       "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
2731   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2732   Fn->setDoesNotRecurse();
2733   CodeGenFunction CGF(CGM);
2734   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2735 
2736   CGBuilderTy &Bld = CGF.Builder;
2737 
2738   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2739   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2740   Address LocalReduceList(
2741       Bld.CreatePointerBitCastOrAddrSpaceCast(
2742           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
2743                                C.VoidPtrTy, Loc),
2744           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
2745       CGF.getPointerAlign());
2746   QualType StaticTy = C.getRecordType(TeamReductionRec);
2747   llvm::Type *LLVMReductionsBufferTy =
2748       CGM.getTypes().ConvertTypeForMem(StaticTy);
2749   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2750       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2751       LLVMReductionsBufferTy->getPointerTo());
2752 
2753   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2754                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2755                                               /*Volatile=*/false, C.IntTy,
2756                                               Loc)};
2757   unsigned Idx = 0;
2758   for (const Expr *Private : Privates) {
2759     // Reduce element = LocalReduceList[i]
2760     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
2761     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
2762         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
2763     // elemptr = ((CopyType*)(elemptrptr)) + I
2764     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2765         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
2766     Address ElemPtr =
2767         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
2768     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
2769     // Global = Buffer.VD[Idx];
2770     const FieldDecl *FD = VarFieldMap.lookup(VD);
2771     LValue GlobLVal = CGF.EmitLValueForField(
2772         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2773     Address GlobAddr = GlobLVal.getAddress(CGF);
2774     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2775         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2776     GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
2777     switch (CGF.getEvaluationKind(Private->getType())) {
2778     case TEK_Scalar: {
2779       llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
2780       CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(),
2781                             LValueBaseInfo(AlignmentSource::Type),
2782                             TBAAAccessInfo());
2783       break;
2784     }
2785     case TEK_Complex: {
2786       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
2787       CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2788                              /*isInit=*/false);
2789       break;
2790     }
2791     case TEK_Aggregate:
2792       CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
2793                             GlobLVal, Private->getType(),
2794                             AggValueSlot::DoesNotOverlap);
2795       break;
2796     }
2797     ++Idx;
2798   }
2799 
2800   CGF.FinishFunction();
2801   return Fn;
2802 }
2803 
2804 /// This function emits a helper that reduces all the reduction variables from
2805 /// the team into the provided global buffer for the reduction variables.
2806 ///
2807 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
2808 ///  void *GlobPtrs[];
2809 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
2810 ///  ...
2811 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
2812 ///  reduce_function(reduce_data, GlobPtrs);
2813 static llvm::Value *emitGlobalToListReduceFunction(
2814     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
2815     QualType ReductionArrayTy, SourceLocation Loc,
2816     const RecordDecl *TeamReductionRec,
2817     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2818         &VarFieldMap,
2819     llvm::Function *ReduceFn) {
2820   ASTContext &C = CGM.getContext();
2821 
2822   // Buffer: global reduction buffer.
2823   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2824                               C.VoidPtrTy, ImplicitParamDecl::Other);
2825   // Idx: index of the buffer.
2826   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
2827                            ImplicitParamDecl::Other);
2828   // ReduceList: thread local Reduce list.
2829   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
2830                                   C.VoidPtrTy, ImplicitParamDecl::Other);
2831   FunctionArgList Args;
2832   Args.push_back(&BufferArg);
2833   Args.push_back(&IdxArg);
2834   Args.push_back(&ReduceListArg);
2835 
2836   const CGFunctionInfo &CGFI =
2837       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
2838   auto *Fn = llvm::Function::Create(
2839       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
2840       "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
2841   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
2842   Fn->setDoesNotRecurse();
2843   CodeGenFunction CGF(CGM);
2844   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
2845 
2846   CGBuilderTy &Bld = CGF.Builder;
2847 
2848   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
2849   QualType StaticTy = C.getRecordType(TeamReductionRec);
2850   llvm::Type *LLVMReductionsBufferTy =
2851       CGM.getTypes().ConvertTypeForMem(StaticTy);
2852   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2853       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
2854       LLVMReductionsBufferTy->getPointerTo());
2855 
2856   // 1. Build a list of reduction variables.
2857   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2858   Address ReductionList =
2859       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2860   auto IPriv = Privates.begin();
2861   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
2862                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
2863                                               /*Volatile=*/false, C.IntTy,
2864                                               Loc)};
2865   unsigned Idx = 0;
2866   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
2867     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2868     // Global = Buffer.VD[Idx];
2869     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
2870     const FieldDecl *FD = VarFieldMap.lookup(VD);
2871     LValue GlobLVal = CGF.EmitLValueForField(
2872         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
2873     Address GlobAddr = GlobLVal.getAddress(CGF);
2874     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
2875         GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
2876     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
2877     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
2878     if ((*IPriv)->getType()->isVariablyModifiedType()) {
2879       // Store array size.
2880       ++Idx;
2881       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
2882       llvm::Value *Size = CGF.Builder.CreateIntCast(
2883           CGF.getVLASize(
2884                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2885               .NumElts,
2886           CGF.SizeTy, /*isSigned=*/false);
2887       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2888                               Elem);
2889     }
2890   }
2891 
2892   // Call reduce_function(ReduceList, GlobalReduceList)
2893   llvm::Value *GlobalReduceList =
2894       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
2895   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
2896   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
2897       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
2898   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
2899       CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
2900   CGF.FinishFunction();
2901   return Fn;
2902 }
2903 
2904 ///
2905 /// Design of OpenMP reductions on the GPU
2906 ///
2907 /// Consider a typical OpenMP program with one or more reduction
2908 /// clauses:
2909 ///
2910 /// float foo;
2911 /// double bar;
2912 /// #pragma omp target teams distribute parallel for \
2913 ///             reduction(+:foo) reduction(*:bar)
2914 /// for (int i = 0; i < N; i++) {
2915 ///   foo += A[i]; bar *= B[i];
2916 /// }
2917 ///
2918 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
2919 /// all teams.  In our OpenMP implementation on the NVPTX device an
2920 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
2921 /// within a team are mapped to CUDA threads within a threadblock.
2922 /// Our goal is to efficiently aggregate values across all OpenMP
2923 /// threads such that:
2924 ///
2925 ///   - the compiler and runtime are logically concise, and
2926 ///   - the reduction is performed efficiently in a hierarchical
2927 ///     manner as follows: within OpenMP threads in the same warp,
2928 ///     across warps in a threadblock, and finally across teams on
2929 ///     the NVPTX device.
2930 ///
2931 /// Introduction to Decoupling
2932 ///
2933 /// We would like to decouple the compiler and the runtime so that the
2934 /// latter is ignorant of the reduction variables (number, data types)
2935 /// and the reduction operators.  This allows a simpler interface
2936 /// and implementation while still attaining good performance.
2937 ///
2938 /// Pseudocode for the aforementioned OpenMP program generated by the
2939 /// compiler is as follows:
2940 ///
2941 /// 1. Create private copies of reduction variables on each OpenMP
2942 ///    thread: 'foo_private', 'bar_private'
2943 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
2944 ///    to it and writes the result in 'foo_private' and 'bar_private'
2945 ///    respectively.
2946 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
2947 ///    and store the result on the team master:
2948 ///
2949 ///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
2950 ///        reduceData, shuffleReduceFn, interWarpCpyFn)
2951 ///
2952 ///     where:
2953 ///       struct ReduceData {
2954 ///         double *foo;
2955 ///         double *bar;
2956 ///       } reduceData
2957 ///       reduceData.foo = &foo_private
2958 ///       reduceData.bar = &bar_private
2959 ///
2960 ///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
2961 ///     auxiliary functions generated by the compiler that operate on
2962 ///     variables of type 'ReduceData'.  They aid the runtime perform
2963 ///     algorithmic steps in a data agnostic manner.
2964 ///
2965 ///     'shuffleReduceFn' is a pointer to a function that reduces data
2966 ///     of type 'ReduceData' across two OpenMP threads (lanes) in the
2967 ///     same warp.  It takes the following arguments as input:
2968 ///
2969 ///     a. variable of type 'ReduceData' on the calling lane,
2970 ///     b. its lane_id,
2971 ///     c. an offset relative to the current lane_id to generate a
2972 ///        remote_lane_id.  The remote lane contains the second
2973 ///        variable of type 'ReduceData' that is to be reduced.
2974 ///     d. an algorithm version parameter determining which reduction
2975 ///        algorithm to use.
2976 ///
2977 ///     'shuffleReduceFn' retrieves data from the remote lane using
2978 ///     efficient GPU shuffle intrinsics and reduces, using the
2979 ///     algorithm specified by the 4th parameter, the two operands
2980 ///     element-wise.  The result is written to the first operand.
2981 ///
2982 ///     Different reduction algorithms are implemented in different
2983 ///     runtime functions, all calling 'shuffleReduceFn' to perform
2984 ///     the essential reduction step.  Therefore, based on the 4th
2985 ///     parameter, this function behaves slightly differently to
2986 ///     cooperate with the runtime to ensure correctness under
2987 ///     different circumstances.
2988 ///
2989 ///     'InterWarpCpyFn' is a pointer to a function that transfers
2990 ///     reduced variables across warps.  It tunnels, through CUDA
2991 ///     shared memory, the thread-private data of type 'ReduceData'
2992 ///     from lane 0 of each warp to a lane in the first warp.
2993 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
2994 ///    The last team writes the global reduced value to memory.
2995 ///
2996 ///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
2997 ///             reduceData, shuffleReduceFn, interWarpCpyFn,
2998 ///             scratchpadCopyFn, loadAndReduceFn)
2999 ///
3000 ///     'scratchpadCopyFn' is a helper that stores reduced
3001 ///     data from the team master to a scratchpad array in
3002 ///     global memory.
3003 ///
3004 ///     'loadAndReduceFn' is a helper that loads data from
3005 ///     the scratchpad array and reduces it with the input
3006 ///     operand.
3007 ///
3008 ///     These compiler generated functions hide address
3009 ///     calculation and alignment information from the runtime.
3010 /// 5. if ret == 1:
3011 ///     The team master of the last team stores the reduced
3012 ///     result to the globals in memory.
3013 ///     foo += reduceData.foo; bar *= reduceData.bar
3014 ///
3015 ///
3016 /// Warp Reduction Algorithms
3017 ///
3018 /// On the warp level, we have three algorithms implemented in the
3019 /// OpenMP runtime depending on the number of active lanes:
3020 ///
3021 /// Full Warp Reduction
3022 ///
3023 /// The reduce algorithm within a warp where all lanes are active
3024 /// is implemented in the runtime as follows:
3025 ///
3026 /// full_warp_reduce(void *reduce_data,
3027 ///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3028 ///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
3029 ///     ShuffleReduceFn(reduce_data, 0, offset, 0);
3030 /// }
3031 ///
3032 /// The algorithm completes in log(2, WARPSIZE) steps.
3033 ///
3034 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
3035 /// not used therefore we save instructions by not retrieving lane_id
3036 /// from the corresponding special registers.  The 4th parameter, which
3037 /// represents the version of the algorithm being used, is set to 0 to
3038 /// signify full warp reduction.
3039 ///
3040 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3041 ///
3042 /// #reduce_elem refers to an element in the local lane's data structure
3043 /// #remote_elem is retrieved from a remote lane
3044 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3045 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
3046 ///
3047 /// Contiguous Partial Warp Reduction
3048 ///
3049 /// This reduce algorithm is used within a warp where only the first
3050 /// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
3051 /// number of OpenMP threads in a parallel region is not a multiple of
3052 /// WARPSIZE.  The algorithm is implemented in the runtime as follows:
3053 ///
3054 /// void
3055 /// contiguous_partial_reduce(void *reduce_data,
3056 ///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
3057 ///                           int size, int lane_id) {
3058 ///   int curr_size;
3059 ///   int offset;
3060 ///   curr_size = size;
3061 ///   mask = curr_size/2;
3062 ///   while (offset>0) {
3063 ///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
3064 ///     curr_size = (curr_size+1)/2;
3065 ///     offset = curr_size/2;
3066 ///   }
3067 /// }
3068 ///
3069 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3070 ///
3071 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3072 /// if (lane_id < offset)
3073 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3074 /// else
3075 ///     reduce_elem = remote_elem
3076 ///
3077 /// This algorithm assumes that the data to be reduced are located in a
3078 /// contiguous subset of lanes starting from the first.  When there is
3079 /// an odd number of active lanes, the data in the last lane is not
3080 /// aggregated with any other lane's dat but is instead copied over.
3081 ///
3082 /// Dispersed Partial Warp Reduction
3083 ///
3084 /// This algorithm is used within a warp when any discontiguous subset of
3085 /// lanes are active.  It is used to implement the reduction operation
3086 /// across lanes in an OpenMP simd region or in a nested parallel region.
3087 ///
3088 /// void
3089 /// dispersed_partial_reduce(void *reduce_data,
3090 ///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
3091 ///   int size, remote_id;
3092 ///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
3093 ///   do {
3094 ///       remote_id = next_active_lane_id_right_after_me();
3095 ///       # the above function returns 0 of no active lane
3096 ///       # is present right after the current lane.
3097 ///       size = number_of_active_lanes_in_this_warp();
3098 ///       logical_lane_id /= 2;
3099 ///       ShuffleReduceFn(reduce_data, logical_lane_id,
3100 ///                       remote_id-1-threadIdx.x, 2);
3101 ///   } while (logical_lane_id % 2 == 0 && size > 1);
3102 /// }
3103 ///
3104 /// There is no assumption made about the initial state of the reduction.
3105 /// Any number of lanes (>=1) could be active at any position.  The reduction
3106 /// result is returned in the first active lane.
3107 ///
3108 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
3109 ///
3110 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
3111 /// if (lane_id % 2 == 0 && offset > 0)
3112 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
3113 /// else
3114 ///     reduce_elem = remote_elem
3115 ///
3116 ///
3117 /// Intra-Team Reduction
3118 ///
3119 /// This function, as implemented in the runtime call
3120 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
3121 /// threads in a team.  It first reduces within a warp using the
3122 /// aforementioned algorithms.  We then proceed to gather all such
3123 /// reduced values at the first warp.
3124 ///
3125 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
3126 /// data from each of the "warp master" (zeroth lane of each warp, where
3127 /// warp-reduced data is held) to the zeroth warp.  This step reduces (in
3128 /// a mathematical sense) the problem of reduction across warp masters in
3129 /// a block to the problem of warp reduction.
3130 ///
3131 ///
3132 /// Inter-Team Reduction
3133 ///
3134 /// Once a team has reduced its data to a single value, it is stored in
3135 /// a global scratchpad array.  Since each team has a distinct slot, this
3136 /// can be done without locking.
3137 ///
3138 /// The last team to write to the scratchpad array proceeds to reduce the
3139 /// scratchpad array.  One or more workers in the last team use the helper
3140 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
3141 /// the k'th worker reduces every k'th element.
3142 ///
3143 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
3144 /// reduce across workers and compute a globally reduced value.
3145 ///
3146 void CGOpenMPRuntimeGPU::emitReduction(
3147     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
3148     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
3149     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
3150   if (!CGF.HaveInsertPoint())
3151     return;
3152 
3153   bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
3154 #ifndef NDEBUG
3155   bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
3156 #endif
3157 
3158   if (Options.SimpleReduction) {
3159     assert(!TeamsReduction && !ParallelReduction &&
3160            "Invalid reduction selection in emitReduction.");
3161     CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
3162                                    ReductionOps, Options);
3163     return;
3164   }
3165 
3166   assert((TeamsReduction || ParallelReduction) &&
3167          "Invalid reduction selection in emitReduction.");
3168 
3169   // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
3170   // RedList, shuffle_reduce_func, interwarp_copy_func);
3171   // or
3172   // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
3173   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
3174   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3175 
3176   llvm::Value *Res;
3177   ASTContext &C = CGM.getContext();
3178   // 1. Build a list of reduction variables.
3179   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3180   auto Size = RHSExprs.size();
3181   for (const Expr *E : Privates) {
3182     if (E->getType()->isVariablyModifiedType())
3183       // Reserve place for array size.
3184       ++Size;
3185   }
3186   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
3187   QualType ReductionArrayTy =
3188       C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
3189                              /*IndexTypeQuals=*/0);
3190   Address ReductionList =
3191       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3192   auto IPriv = Privates.begin();
3193   unsigned Idx = 0;
3194   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
3195     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3196     CGF.Builder.CreateStore(
3197         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3198             CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy),
3199         Elem);
3200     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3201       // Store array size.
3202       ++Idx;
3203       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3204       llvm::Value *Size = CGF.Builder.CreateIntCast(
3205           CGF.getVLASize(
3206                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3207               .NumElts,
3208           CGF.SizeTy, /*isSigned=*/false);
3209       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3210                               Elem);
3211     }
3212   }
3213 
3214   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3215       ReductionList.getPointer(), CGF.VoidPtrTy);
3216   llvm::Function *ReductionFn = emitReductionFunction(
3217       Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
3218       LHSExprs, RHSExprs, ReductionOps);
3219   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
3220   llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
3221       CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
3222   llvm::Value *InterWarpCopyFn =
3223       emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
3224 
3225   if (ParallelReduction) {
3226     llvm::Value *Args[] = {RTLoc,
3227                            ThreadId,
3228                            CGF.Builder.getInt32(RHSExprs.size()),
3229                            ReductionArrayTySize,
3230                            RL,
3231                            ShuffleAndReduceFn,
3232                            InterWarpCopyFn};
3233 
3234     Res = CGF.EmitRuntimeCall(
3235         OMPBuilder.getOrCreateRuntimeFunction(
3236             CGM.getModule(), OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2),
3237         Args);
3238   } else {
3239     assert(TeamsReduction && "expected teams reduction.");
3240     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
3241     llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
3242     int Cnt = 0;
3243     for (const Expr *DRE : Privates) {
3244       PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
3245       ++Cnt;
3246     }
3247     const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
3248         CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
3249         C.getLangOpts().OpenMPCUDAReductionBufNum);
3250     TeamsReductions.push_back(TeamReductionRec);
3251     if (!KernelTeamsReductionPtr) {
3252       KernelTeamsReductionPtr = new llvm::GlobalVariable(
3253           CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
3254           llvm::GlobalValue::InternalLinkage, nullptr,
3255           "_openmp_teams_reductions_buffer_$_$ptr");
3256     }
3257     llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
3258         Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
3259         /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
3260     llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
3261         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3262     llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
3263         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3264         ReductionFn);
3265     llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
3266         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
3267     llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
3268         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
3269         ReductionFn);
3270 
3271     llvm::Value *Args[] = {
3272         RTLoc,
3273         ThreadId,
3274         GlobalBufferPtr,
3275         CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
3276         RL,
3277         ShuffleAndReduceFn,
3278         InterWarpCopyFn,
3279         GlobalToBufferCpyFn,
3280         GlobalToBufferRedFn,
3281         BufferToGlobalCpyFn,
3282         BufferToGlobalRedFn};
3283 
3284     Res = CGF.EmitRuntimeCall(
3285         OMPBuilder.getOrCreateRuntimeFunction(
3286             CGM.getModule(), OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2),
3287         Args);
3288   }
3289 
3290   // 5. Build if (res == 1)
3291   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
3292   llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
3293   llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
3294       Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
3295   CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
3296 
3297   // 6. Build then branch: where we have reduced values in the master
3298   //    thread in each team.
3299   //    __kmpc_end_reduce{_nowait}(<gtid>);
3300   //    break;
3301   CGF.EmitBlock(ThenBB);
3302 
3303   // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
3304   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
3305                     this](CodeGenFunction &CGF, PrePostActionTy &Action) {
3306     auto IPriv = Privates.begin();
3307     auto ILHS = LHSExprs.begin();
3308     auto IRHS = RHSExprs.begin();
3309     for (const Expr *E : ReductionOps) {
3310       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
3311                                   cast<DeclRefExpr>(*IRHS));
3312       ++IPriv;
3313       ++ILHS;
3314       ++IRHS;
3315     }
3316   };
3317   llvm::Value *EndArgs[] = {ThreadId};
3318   RegionCodeGenTy RCG(CodeGen);
3319   NVPTXActionTy Action(
3320       nullptr, llvm::None,
3321       OMPBuilder.getOrCreateRuntimeFunction(
3322           CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
3323       EndArgs);
3324   RCG.setAction(Action);
3325   RCG(CGF);
3326   // There is no need to emit line number for unconditional branch.
3327   (void)ApplyDebugLocation::CreateEmpty(CGF);
3328   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
3329 }
3330 
3331 const VarDecl *
3332 CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
3333                                        const VarDecl *NativeParam) const {
3334   if (!NativeParam->getType()->isReferenceType())
3335     return NativeParam;
3336   QualType ArgType = NativeParam->getType();
3337   QualifierCollector QC;
3338   const Type *NonQualTy = QC.strip(ArgType);
3339   QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3340   if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
3341     if (Attr->getCaptureKind() == OMPC_map) {
3342       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
3343                                                         LangAS::opencl_global);
3344     }
3345   }
3346   ArgType = CGM.getContext().getPointerType(PointeeTy);
3347   QC.addRestrict();
3348   enum { NVPTX_local_addr = 5 };
3349   QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
3350   ArgType = QC.apply(CGM.getContext(), ArgType);
3351   if (isa<ImplicitParamDecl>(NativeParam))
3352     return ImplicitParamDecl::Create(
3353         CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
3354         NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
3355   return ParmVarDecl::Create(
3356       CGM.getContext(),
3357       const_cast<DeclContext *>(NativeParam->getDeclContext()),
3358       NativeParam->getBeginLoc(), NativeParam->getLocation(),
3359       NativeParam->getIdentifier(), ArgType,
3360       /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
3361 }
3362 
3363 Address
3364 CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
3365                                           const VarDecl *NativeParam,
3366                                           const VarDecl *TargetParam) const {
3367   assert(NativeParam != TargetParam &&
3368          NativeParam->getType()->isReferenceType() &&
3369          "Native arg must not be the same as target arg.");
3370   Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
3371   QualType NativeParamType = NativeParam->getType();
3372   QualifierCollector QC;
3373   const Type *NonQualTy = QC.strip(NativeParamType);
3374   QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
3375   unsigned NativePointeeAddrSpace =
3376       CGF.getContext().getTargetAddressSpace(NativePointeeTy);
3377   QualType TargetTy = TargetParam->getType();
3378   llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
3379       LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
3380   // First cast to generic.
3381   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3382       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3383                       /*AddrSpace=*/0));
3384   // Cast from generic to native address space.
3385   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3386       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
3387                       NativePointeeAddrSpace));
3388   Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
3389   CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
3390                         NativeParamType);
3391   return NativeParamAddr;
3392 }
3393 
3394 void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
3395     CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
3396     ArrayRef<llvm::Value *> Args) const {
3397   SmallVector<llvm::Value *, 4> TargetArgs;
3398   TargetArgs.reserve(Args.size());
3399   auto *FnType = OutlinedFn.getFunctionType();
3400   for (unsigned I = 0, E = Args.size(); I < E; ++I) {
3401     if (FnType->isVarArg() && FnType->getNumParams() <= I) {
3402       TargetArgs.append(std::next(Args.begin(), I), Args.end());
3403       break;
3404     }
3405     llvm::Type *TargetType = FnType->getParamType(I);
3406     llvm::Value *NativeArg = Args[I];
3407     if (!TargetType->isPointerTy()) {
3408       TargetArgs.emplace_back(NativeArg);
3409       continue;
3410     }
3411     llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3412         NativeArg,
3413         NativeArg->getType()->getPointerElementType()->getPointerTo());
3414     TargetArgs.emplace_back(
3415         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
3416   }
3417   CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
3418 }
3419 
3420 /// Emit function which wraps the outline parallel region
3421 /// and controls the arguments which are passed to this function.
3422 /// The wrapper ensures that the outlined function is called
3423 /// with the correct arguments when data is shared.
3424 llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
3425     llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
3426   ASTContext &Ctx = CGM.getContext();
3427   const auto &CS = *D.getCapturedStmt(OMPD_parallel);
3428 
3429   // Create a function that takes as argument the source thread.
3430   FunctionArgList WrapperArgs;
3431   QualType Int16QTy =
3432       Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
3433   QualType Int32QTy =
3434       Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
3435   ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3436                                      /*Id=*/nullptr, Int16QTy,
3437                                      ImplicitParamDecl::Other);
3438   ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
3439                                /*Id=*/nullptr, Int32QTy,
3440                                ImplicitParamDecl::Other);
3441   WrapperArgs.emplace_back(&ParallelLevelArg);
3442   WrapperArgs.emplace_back(&WrapperArg);
3443 
3444   const CGFunctionInfo &CGFI =
3445       CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
3446 
3447   auto *Fn = llvm::Function::Create(
3448       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3449       Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
3450 
3451   // Ensure we do not inline the function. This is trivially true for the ones
3452   // passed to __kmpc_fork_call but the ones calles in serialized regions
3453   // could be inlined. This is not a perfect but it is closer to the invariant
3454   // we want, namely, every data environment starts with a new function.
3455   // TODO: We should pass the if condition to the runtime function and do the
3456   //       handling there. Much cleaner code.
3457   Fn->addFnAttr(llvm::Attribute::NoInline);
3458 
3459   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3460   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
3461   Fn->setDoesNotRecurse();
3462 
3463   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
3464   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
3465                     D.getBeginLoc(), D.getBeginLoc());
3466 
3467   const auto *RD = CS.getCapturedRecordDecl();
3468   auto CurField = RD->field_begin();
3469 
3470   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
3471                                                       /*Name=*/".zero.addr");
3472   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
3473   // Get the array of arguments.
3474   SmallVector<llvm::Value *, 8> Args;
3475 
3476   Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
3477   Args.emplace_back(ZeroAddr.getPointer());
3478 
3479   CGBuilderTy &Bld = CGF.Builder;
3480   auto CI = CS.capture_begin();
3481 
3482   // Use global memory for data sharing.
3483   // Handle passing of global args to workers.
3484   Address GlobalArgs =
3485       CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
3486   llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
3487   llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
3488   CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
3489                           CGM.getModule(), OMPRTL___kmpc_get_shared_variables),
3490                       DataSharingArgs);
3491 
3492   // Retrieve the shared variables from the list of references returned
3493   // by the runtime. Pass the variables to the outlined function.
3494   Address SharedArgListAddress = Address::invalid();
3495   if (CS.capture_size() > 0 ||
3496       isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3497     SharedArgListAddress = CGF.EmitLoadOfPointer(
3498         GlobalArgs, CGF.getContext()
3499                         .getPointerType(CGF.getContext().getPointerType(
3500                             CGF.getContext().VoidPtrTy))
3501                         .castAs<PointerType>());
3502   }
3503   unsigned Idx = 0;
3504   if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
3505     Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3506     Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3507         Src, CGF.SizeTy->getPointerTo());
3508     llvm::Value *LB = CGF.EmitLoadOfScalar(
3509         TypedAddress,
3510         /*Volatile=*/false,
3511         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3512         cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
3513     Args.emplace_back(LB);
3514     ++Idx;
3515     Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
3516     TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3517         Src, CGF.SizeTy->getPointerTo());
3518     llvm::Value *UB = CGF.EmitLoadOfScalar(
3519         TypedAddress,
3520         /*Volatile=*/false,
3521         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
3522         cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
3523     Args.emplace_back(UB);
3524     ++Idx;
3525   }
3526   if (CS.capture_size() > 0) {
3527     ASTContext &CGFContext = CGF.getContext();
3528     for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
3529       QualType ElemTy = CurField->getType();
3530       Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
3531       Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
3532           Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
3533       llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
3534                                               /*Volatile=*/false,
3535                                               CGFContext.getPointerType(ElemTy),
3536                                               CI->getLocation());
3537       if (CI->capturesVariableByCopy() &&
3538           !CI->getCapturedVar()->getType()->isAnyPointerType()) {
3539         Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
3540                               CI->getLocation());
3541       }
3542       Args.emplace_back(Arg);
3543     }
3544   }
3545 
3546   emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
3547   CGF.FinishFunction();
3548   return Fn;
3549 }
3550 
3551 void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
3552                                               const Decl *D) {
3553   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3554     return;
3555 
3556   assert(D && "Expected function or captured|block decl.");
3557   assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
3558          "Function is registered already.");
3559   assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
3560          "Team is set but not processed.");
3561   const Stmt *Body = nullptr;
3562   bool NeedToDelayGlobalization = false;
3563   if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3564     Body = FD->getBody();
3565   } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
3566     Body = BD->getBody();
3567   } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
3568     Body = CD->getBody();
3569     NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
3570     if (NeedToDelayGlobalization &&
3571         getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
3572       return;
3573   }
3574   if (!Body)
3575     return;
3576   CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
3577   VarChecker.Visit(Body);
3578   const RecordDecl *GlobalizedVarsRecord =
3579       VarChecker.getGlobalizedRecord(IsInTTDRegion);
3580   TeamAndReductions.first = nullptr;
3581   TeamAndReductions.second.clear();
3582   ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
3583       VarChecker.getEscapedVariableLengthDecls();
3584   if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
3585     return;
3586   auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
3587   I->getSecond().MappedParams =
3588       std::make_unique<CodeGenFunction::OMPMapVars>();
3589   I->getSecond().EscapedParameters.insert(
3590       VarChecker.getEscapedParameters().begin(),
3591       VarChecker.getEscapedParameters().end());
3592   I->getSecond().EscapedVariableLengthDecls.append(
3593       EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
3594   DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
3595   for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3596     assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3597     Data.insert(std::make_pair(VD, MappedVarData()));
3598   }
3599   if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
3600     CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
3601     VarChecker.Visit(Body);
3602     I->getSecond().SecondaryLocalVarData.emplace();
3603     DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
3604     for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
3605       assert(VD->isCanonicalDecl() && "Expected canonical declaration");
3606       Data.insert(std::make_pair(VD, MappedVarData()));
3607     }
3608   }
3609   if (!NeedToDelayGlobalization) {
3610     emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
3611     struct GlobalizationScope final : EHScopeStack::Cleanup {
3612       GlobalizationScope() = default;
3613 
3614       void Emit(CodeGenFunction &CGF, Flags flags) override {
3615         static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
3616             .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
3617       }
3618     };
3619     CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
3620   }
3621 }
3622 
3623 Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
3624                                                         const VarDecl *VD) {
3625   if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
3626     const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3627     auto AS = LangAS::Default;
3628     switch (A->getAllocatorType()) {
3629       // Use the default allocator here as by default local vars are
3630       // threadlocal.
3631     case OMPAllocateDeclAttr::OMPNullMemAlloc:
3632     case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3633     case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3634     case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3635     case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3636       // Follow the user decision - use default allocation.
3637       return Address::invalid();
3638     case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3639       // TODO: implement aupport for user-defined allocators.
3640       return Address::invalid();
3641     case OMPAllocateDeclAttr::OMPConstMemAlloc:
3642       AS = LangAS::cuda_constant;
3643       break;
3644     case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3645       AS = LangAS::cuda_shared;
3646       break;
3647     case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3648     case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3649       break;
3650     }
3651     llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
3652     auto *GV = new llvm::GlobalVariable(
3653         CGM.getModule(), VarTy, /*isConstant=*/false,
3654         llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
3655         VD->getName(),
3656         /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
3657         CGM.getContext().getTargetAddressSpace(AS));
3658     CharUnits Align = CGM.getContext().getDeclAlign(VD);
3659     GV->setAlignment(Align.getAsAlign());
3660     return Address(
3661         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3662             GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
3663                     VD->getType().getAddressSpace()))),
3664         Align);
3665   }
3666 
3667   if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
3668     return Address::invalid();
3669 
3670   VD = VD->getCanonicalDecl();
3671   auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
3672   if (I == FunctionGlobalizedDecls.end())
3673     return Address::invalid();
3674   auto VDI = I->getSecond().LocalVarData.find(VD);
3675   if (VDI != I->getSecond().LocalVarData.end())
3676     return VDI->second.PrivateAddr;
3677   if (VD->hasAttrs()) {
3678     for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
3679          E(VD->attr_end());
3680          IT != E; ++IT) {
3681       auto VDI = I->getSecond().LocalVarData.find(
3682           cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
3683               ->getCanonicalDecl());
3684       if (VDI != I->getSecond().LocalVarData.end())
3685         return VDI->second.PrivateAddr;
3686     }
3687   }
3688 
3689   return Address::invalid();
3690 }
3691 
3692 void CGOpenMPRuntimeGPU::functionFinished(CodeGenFunction &CGF) {
3693   FunctionGlobalizedDecls.erase(CGF.CurFn);
3694   CGOpenMPRuntime::functionFinished(CGF);
3695 }
3696 
3697 void CGOpenMPRuntimeGPU::getDefaultDistScheduleAndChunk(
3698     CodeGenFunction &CGF, const OMPLoopDirective &S,
3699     OpenMPDistScheduleClauseKind &ScheduleKind,
3700     llvm::Value *&Chunk) const {
3701   auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
3702   if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD) {
3703     ScheduleKind = OMPC_DIST_SCHEDULE_static;
3704     Chunk = CGF.EmitScalarConversion(
3705         RT.getGPUNumThreads(CGF),
3706         CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3707         S.getIterationVariable()->getType(), S.getBeginLoc());
3708     return;
3709   }
3710   CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
3711       CGF, S, ScheduleKind, Chunk);
3712 }
3713 
3714 void CGOpenMPRuntimeGPU::getDefaultScheduleAndChunk(
3715     CodeGenFunction &CGF, const OMPLoopDirective &S,
3716     OpenMPScheduleClauseKind &ScheduleKind,
3717     const Expr *&ChunkExpr) const {
3718   ScheduleKind = OMPC_SCHEDULE_static;
3719   // Chunk size is 1 in this case.
3720   llvm::APInt ChunkSize(32, 1);
3721   ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
3722       CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
3723       SourceLocation());
3724 }
3725 
3726 void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
3727     CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
3728   assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
3729          " Expected target-based directive.");
3730   const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
3731   for (const CapturedStmt::Capture &C : CS->captures()) {
3732     // Capture variables captured by reference in lambdas for target-based
3733     // directives.
3734     if (!C.capturesVariable())
3735       continue;
3736     const VarDecl *VD = C.getCapturedVar();
3737     const auto *RD = VD->getType()
3738                          .getCanonicalType()
3739                          .getNonReferenceType()
3740                          ->getAsCXXRecordDecl();
3741     if (!RD || !RD->isLambda())
3742       continue;
3743     Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3744     LValue VDLVal;
3745     if (VD->getType().getCanonicalType()->isReferenceType())
3746       VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
3747     else
3748       VDLVal = CGF.MakeAddrLValue(
3749           VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
3750     llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
3751     FieldDecl *ThisCapture = nullptr;
3752     RD->getCaptureFields(Captures, ThisCapture);
3753     if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
3754       LValue ThisLVal =
3755           CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
3756       llvm::Value *CXXThis = CGF.LoadCXXThis();
3757       CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
3758     }
3759     for (const LambdaCapture &LC : RD->captures()) {
3760       if (LC.getCaptureKind() != LCK_ByRef)
3761         continue;
3762       const VarDecl *VD = LC.getCapturedVar();
3763       if (!CS->capturesVariable(VD))
3764         continue;
3765       auto It = Captures.find(VD);
3766       assert(It != Captures.end() && "Found lambda capture without field.");
3767       LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
3768       Address VDAddr = CGF.GetAddrOfLocalVar(VD);
3769       if (VD->getType().getCanonicalType()->isReferenceType())
3770         VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
3771                                                VD->getType().getCanonicalType())
3772                      .getAddress(CGF);
3773       CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
3774     }
3775   }
3776 }
3777 
3778 bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
3779                                                             LangAS &AS) {
3780   if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
3781     return false;
3782   const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
3783   switch(A->getAllocatorType()) {
3784   case OMPAllocateDeclAttr::OMPNullMemAlloc:
3785   case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
3786   // Not supported, fallback to the default mem space.
3787   case OMPAllocateDeclAttr::OMPThreadMemAlloc:
3788   case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
3789   case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
3790   case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
3791   case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
3792     AS = LangAS::Default;
3793     return true;
3794   case OMPAllocateDeclAttr::OMPConstMemAlloc:
3795     AS = LangAS::cuda_constant;
3796     return true;
3797   case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
3798     AS = LangAS::cuda_shared;
3799     return true;
3800   case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
3801     llvm_unreachable("Expected predefined allocator for the variables with the "
3802                      "static storage.");
3803   }
3804   return false;
3805 }
3806 
3807 // Get current CudaArch and ignore any unknown values
3808 static CudaArch getCudaArch(CodeGenModule &CGM) {
3809   if (!CGM.getTarget().hasFeature("ptx"))
3810     return CudaArch::UNKNOWN;
3811   for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) {
3812     if (Feature.getValue()) {
3813       CudaArch Arch = StringToCudaArch(Feature.getKey());
3814       if (Arch != CudaArch::UNKNOWN)
3815         return Arch;
3816     }
3817   }
3818   return CudaArch::UNKNOWN;
3819 }
3820 
3821 /// Check to see if target architecture supports unified addressing which is
3822 /// a restriction for OpenMP requires clause "unified_shared_memory".
3823 void CGOpenMPRuntimeGPU::processRequiresDirective(
3824     const OMPRequiresDecl *D) {
3825   for (const OMPClause *Clause : D->clauselists()) {
3826     if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
3827       CudaArch Arch = getCudaArch(CGM);
3828       switch (Arch) {
3829       case CudaArch::SM_20:
3830       case CudaArch::SM_21:
3831       case CudaArch::SM_30:
3832       case CudaArch::SM_32:
3833       case CudaArch::SM_35:
3834       case CudaArch::SM_37:
3835       case CudaArch::SM_50:
3836       case CudaArch::SM_52:
3837       case CudaArch::SM_53: {
3838         SmallString<256> Buffer;
3839         llvm::raw_svector_ostream Out(Buffer);
3840         Out << "Target architecture " << CudaArchToString(Arch)
3841             << " does not support unified addressing";
3842         CGM.Error(Clause->getBeginLoc(), Out.str());
3843         return;
3844       }
3845       case CudaArch::SM_60:
3846       case CudaArch::SM_61:
3847       case CudaArch::SM_62:
3848       case CudaArch::SM_70:
3849       case CudaArch::SM_72:
3850       case CudaArch::SM_75:
3851       case CudaArch::SM_80:
3852       case CudaArch::SM_86:
3853       case CudaArch::GFX600:
3854       case CudaArch::GFX601:
3855       case CudaArch::GFX602:
3856       case CudaArch::GFX700:
3857       case CudaArch::GFX701:
3858       case CudaArch::GFX702:
3859       case CudaArch::GFX703:
3860       case CudaArch::GFX704:
3861       case CudaArch::GFX705:
3862       case CudaArch::GFX801:
3863       case CudaArch::GFX802:
3864       case CudaArch::GFX803:
3865       case CudaArch::GFX805:
3866       case CudaArch::GFX810:
3867       case CudaArch::GFX900:
3868       case CudaArch::GFX902:
3869       case CudaArch::GFX904:
3870       case CudaArch::GFX906:
3871       case CudaArch::GFX908:
3872       case CudaArch::GFX909:
3873       case CudaArch::GFX90a:
3874       case CudaArch::GFX90c:
3875       case CudaArch::GFX1010:
3876       case CudaArch::GFX1011:
3877       case CudaArch::GFX1012:
3878       case CudaArch::GFX1013:
3879       case CudaArch::GFX1030:
3880       case CudaArch::GFX1031:
3881       case CudaArch::GFX1032:
3882       case CudaArch::GFX1033:
3883       case CudaArch::GFX1034:
3884       case CudaArch::GFX1035:
3885       case CudaArch::UNUSED:
3886       case CudaArch::UNKNOWN:
3887         break;
3888       case CudaArch::LAST:
3889         llvm_unreachable("Unexpected Cuda arch.");
3890       }
3891     }
3892   }
3893   CGOpenMPRuntime::processRequiresDirective(D);
3894 }
3895 
3896 void CGOpenMPRuntimeGPU::clear() {
3897 
3898   if (!TeamsReductions.empty()) {
3899     ASTContext &C = CGM.getContext();
3900     RecordDecl *StaticRD = C.buildImplicitRecord(
3901         "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
3902     StaticRD->startDefinition();
3903     for (const RecordDecl *TeamReductionRec : TeamsReductions) {
3904       QualType RecTy = C.getRecordType(TeamReductionRec);
3905       auto *Field = FieldDecl::Create(
3906           C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
3907           C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
3908           /*BW=*/nullptr, /*Mutable=*/false,
3909           /*InitStyle=*/ICIS_NoInit);
3910       Field->setAccess(AS_public);
3911       StaticRD->addDecl(Field);
3912     }
3913     StaticRD->completeDefinition();
3914     QualType StaticTy = C.getRecordType(StaticRD);
3915     llvm::Type *LLVMReductionsBufferTy =
3916         CGM.getTypes().ConvertTypeForMem(StaticTy);
3917     // FIXME: nvlink does not handle weak linkage correctly (object with the
3918     // different size are reported as erroneous).
3919     // Restore CommonLinkage as soon as nvlink is fixed.
3920     auto *GV = new llvm::GlobalVariable(
3921         CGM.getModule(), LLVMReductionsBufferTy,
3922         /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
3923         llvm::Constant::getNullValue(LLVMReductionsBufferTy),
3924         "_openmp_teams_reductions_buffer_$_");
3925     KernelTeamsReductionPtr->setInitializer(
3926         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
3927                                                              CGM.VoidPtrTy));
3928   }
3929   CGOpenMPRuntime::clear();
3930 }
3931