1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGOpenMPRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/CodeGen/ConstantInitBuilder.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/StmtOpenMP.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/BitmaskEnum.h"
24 #include "llvm/Bitcode/BitcodeReader.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/Format.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <cassert>
32 
33 using namespace clang;
34 using namespace CodeGen;
35 
36 namespace {
37 /// Base class for handling code generation inside OpenMP regions.
38 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
39 public:
40   /// Kinds of OpenMP regions used in codegen.
41   enum CGOpenMPRegionKind {
42     /// Region with outlined function for standalone 'parallel'
43     /// directive.
44     ParallelOutlinedRegion,
45     /// Region with outlined function for standalone 'task' directive.
46     TaskOutlinedRegion,
47     /// Region for constructs that do not require function outlining,
48     /// like 'for', 'sections', 'atomic' etc. directives.
49     InlinedRegion,
50     /// Region with outlined function for standalone 'target' directive.
51     TargetRegion,
52   };
53 
54   CGOpenMPRegionInfo(const CapturedStmt &CS,
55                      const CGOpenMPRegionKind RegionKind,
56                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
57                      bool HasCancel)
58       : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
59         CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
60 
61   CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
62                      const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind,
63                      bool HasCancel)
64       : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
65         Kind(Kind), HasCancel(HasCancel) {}
66 
67   /// Get a variable or parameter for storing global thread id
68   /// inside OpenMP construct.
69   virtual const VarDecl *getThreadIDVariable() const = 0;
70 
71   /// Emit the captured statement body.
72   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
73 
74   /// Get an LValue for the current ThreadID variable.
75   /// \return LValue for thread id variable. This LValue always has type int32*.
76   virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
77 
78   virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {}
79 
80   CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
81 
82   OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
83 
84   bool hasCancel() const { return HasCancel; }
85 
86   static bool classof(const CGCapturedStmtInfo *Info) {
87     return Info->getKind() == CR_OpenMP;
88   }
89 
90   ~CGOpenMPRegionInfo() override = default;
91 
92 protected:
93   CGOpenMPRegionKind RegionKind;
94   RegionCodeGenTy CodeGen;
95   OpenMPDirectiveKind Kind;
96   bool HasCancel;
97 };
98 
99 /// API for captured statement code generation in OpenMP constructs.
100 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo {
101 public:
102   CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
103                              const RegionCodeGenTy &CodeGen,
104                              OpenMPDirectiveKind Kind, bool HasCancel,
105                              StringRef HelperName)
106       : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind,
107                            HasCancel),
108         ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
109     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
110   }
111 
112   /// Get a variable or parameter for storing global thread id
113   /// inside OpenMP construct.
114   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
115 
116   /// Get the name of the capture helper.
117   StringRef getHelperName() const override { return HelperName; }
118 
119   static bool classof(const CGCapturedStmtInfo *Info) {
120     return CGOpenMPRegionInfo::classof(Info) &&
121            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
122                ParallelOutlinedRegion;
123   }
124 
125 private:
126   /// A variable or parameter storing global thread id for OpenMP
127   /// constructs.
128   const VarDecl *ThreadIDVar;
129   StringRef HelperName;
130 };
131 
132 /// API for captured statement code generation in OpenMP constructs.
133 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo {
134 public:
135   class UntiedTaskActionTy final : public PrePostActionTy {
136     bool Untied;
137     const VarDecl *PartIDVar;
138     const RegionCodeGenTy UntiedCodeGen;
139     llvm::SwitchInst *UntiedSwitch = nullptr;
140 
141   public:
142     UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar,
143                        const RegionCodeGenTy &UntiedCodeGen)
144         : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
145     void Enter(CodeGenFunction &CGF) override {
146       if (Untied) {
147         // Emit task switching point.
148         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
149             CGF.GetAddrOfLocalVar(PartIDVar),
150             PartIDVar->getType()->castAs<PointerType>());
151         llvm::Value *Res =
152             CGF.EmitLoadOfScalar(PartIdLVal, PartIDVar->getLocation());
153         llvm::BasicBlock *DoneBB = CGF.createBasicBlock(".untied.done.");
154         UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB);
155         CGF.EmitBlock(DoneBB);
156         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
157         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
158         UntiedSwitch->addCase(CGF.Builder.getInt32(0),
159                               CGF.Builder.GetInsertBlock());
160         emitUntiedSwitch(CGF);
161       }
162     }
163     void emitUntiedSwitch(CodeGenFunction &CGF) const {
164       if (Untied) {
165         LValue PartIdLVal = CGF.EmitLoadOfPointerLValue(
166             CGF.GetAddrOfLocalVar(PartIDVar),
167             PartIDVar->getType()->castAs<PointerType>());
168         CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
169                               PartIdLVal);
170         UntiedCodeGen(CGF);
171         CodeGenFunction::JumpDest CurPoint =
172             CGF.getJumpDestInCurrentScope(".untied.next.");
173         CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
174         CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp."));
175         UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()),
176                               CGF.Builder.GetInsertBlock());
177         CGF.EmitBranchThroughCleanup(CurPoint);
178         CGF.EmitBlock(CurPoint.getBlock());
179       }
180     }
181     unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); }
182   };
183   CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
184                                  const VarDecl *ThreadIDVar,
185                                  const RegionCodeGenTy &CodeGen,
186                                  OpenMPDirectiveKind Kind, bool HasCancel,
187                                  const UntiedTaskActionTy &Action)
188       : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel),
189         ThreadIDVar(ThreadIDVar), Action(Action) {
190     assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
191   }
192 
193   /// Get a variable or parameter for storing global thread id
194   /// inside OpenMP construct.
195   const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
196 
197   /// Get an LValue for the current ThreadID variable.
198   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
199 
200   /// Get the name of the capture helper.
201   StringRef getHelperName() const override { return ".omp_outlined."; }
202 
203   void emitUntiedSwitch(CodeGenFunction &CGF) override {
204     Action.emitUntiedSwitch(CGF);
205   }
206 
207   static bool classof(const CGCapturedStmtInfo *Info) {
208     return CGOpenMPRegionInfo::classof(Info) &&
209            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
210                TaskOutlinedRegion;
211   }
212 
213 private:
214   /// A variable or parameter storing global thread id for OpenMP
215   /// constructs.
216   const VarDecl *ThreadIDVar;
217   /// Action for emitting code for untied tasks.
218   const UntiedTaskActionTy &Action;
219 };
220 
221 /// API for inlined captured statement code generation in OpenMP
222 /// constructs.
223 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
224 public:
225   CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
226                             const RegionCodeGenTy &CodeGen,
227                             OpenMPDirectiveKind Kind, bool HasCancel)
228       : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel),
229         OldCSI(OldCSI),
230         OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
231 
232   // Retrieve the value of the context parameter.
233   llvm::Value *getContextValue() const override {
234     if (OuterRegionInfo)
235       return OuterRegionInfo->getContextValue();
236     llvm_unreachable("No context value for inlined OpenMP region");
237   }
238 
239   void setContextValue(llvm::Value *V) override {
240     if (OuterRegionInfo) {
241       OuterRegionInfo->setContextValue(V);
242       return;
243     }
244     llvm_unreachable("No context value for inlined OpenMP region");
245   }
246 
247   /// Lookup the captured field decl for a variable.
248   const FieldDecl *lookup(const VarDecl *VD) const override {
249     if (OuterRegionInfo)
250       return OuterRegionInfo->lookup(VD);
251     // If there is no outer outlined region,no need to lookup in a list of
252     // captured variables, we can use the original one.
253     return nullptr;
254   }
255 
256   FieldDecl *getThisFieldDecl() const override {
257     if (OuterRegionInfo)
258       return OuterRegionInfo->getThisFieldDecl();
259     return nullptr;
260   }
261 
262   /// Get a variable or parameter for storing global thread id
263   /// inside OpenMP construct.
264   const VarDecl *getThreadIDVariable() const override {
265     if (OuterRegionInfo)
266       return OuterRegionInfo->getThreadIDVariable();
267     return nullptr;
268   }
269 
270   /// Get an LValue for the current ThreadID variable.
271   LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override {
272     if (OuterRegionInfo)
273       return OuterRegionInfo->getThreadIDVariableLValue(CGF);
274     llvm_unreachable("No LValue for inlined OpenMP construct");
275   }
276 
277   /// Get the name of the capture helper.
278   StringRef getHelperName() const override {
279     if (auto *OuterRegionInfo = getOldCSI())
280       return OuterRegionInfo->getHelperName();
281     llvm_unreachable("No helper name for inlined OpenMP construct");
282   }
283 
284   void emitUntiedSwitch(CodeGenFunction &CGF) override {
285     if (OuterRegionInfo)
286       OuterRegionInfo->emitUntiedSwitch(CGF);
287   }
288 
289   CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
290 
291   static bool classof(const CGCapturedStmtInfo *Info) {
292     return CGOpenMPRegionInfo::classof(Info) &&
293            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
294   }
295 
296   ~CGOpenMPInlinedRegionInfo() override = default;
297 
298 private:
299   /// CodeGen info about outer OpenMP region.
300   CodeGenFunction::CGCapturedStmtInfo *OldCSI;
301   CGOpenMPRegionInfo *OuterRegionInfo;
302 };
303 
304 /// API for captured statement code generation in OpenMP target
305 /// constructs. For this captures, implicit parameters are used instead of the
306 /// captured fields. The name of the target region has to be unique in a given
307 /// application so it is provided by the client, because only the client has
308 /// the information to generate that.
309 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo {
310 public:
311   CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
312                            const RegionCodeGenTy &CodeGen, StringRef HelperName)
313       : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
314                            /*HasCancel=*/false),
315         HelperName(HelperName) {}
316 
317   /// This is unused for target regions because each starts executing
318   /// with a single thread.
319   const VarDecl *getThreadIDVariable() const override { return nullptr; }
320 
321   /// Get the name of the capture helper.
322   StringRef getHelperName() const override { return HelperName; }
323 
324   static bool classof(const CGCapturedStmtInfo *Info) {
325     return CGOpenMPRegionInfo::classof(Info) &&
326            cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
327   }
328 
329 private:
330   StringRef HelperName;
331 };
332 
333 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) {
334   llvm_unreachable("No codegen for expressions");
335 }
336 /// API for generation of expressions captured in a innermost OpenMP
337 /// region.
338 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo {
339 public:
340   CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS)
341       : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
342                                   OMPD_unknown,
343                                   /*HasCancel=*/false),
344         PrivScope(CGF) {
345     // Make sure the globals captured in the provided statement are local by
346     // using the privatization logic. We assume the same variable is not
347     // captured more than once.
348     for (const auto &C : CS.captures()) {
349       if (!C.capturesVariable() && !C.capturesVariableByCopy())
350         continue;
351 
352       const VarDecl *VD = C.getCapturedVar();
353       if (VD->isLocalVarDeclOrParm())
354         continue;
355 
356       DeclRefExpr DRE(const_cast<VarDecl *>(VD),
357                       /*RefersToEnclosingVariableOrCapture=*/false,
358                       VD->getType().getNonReferenceType(), VK_LValue,
359                       C.getLocation());
360       PrivScope.addPrivate(
361           VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); });
362     }
363     (void)PrivScope.Privatize();
364   }
365 
366   /// Lookup the captured field decl for a variable.
367   const FieldDecl *lookup(const VarDecl *VD) const override {
368     if (const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
369       return FD;
370     return nullptr;
371   }
372 
373   /// Emit the captured statement body.
374   void EmitBody(CodeGenFunction &CGF, const Stmt *S) override {
375     llvm_unreachable("No body for expressions");
376   }
377 
378   /// Get a variable or parameter for storing global thread id
379   /// inside OpenMP construct.
380   const VarDecl *getThreadIDVariable() const override {
381     llvm_unreachable("No thread id for expressions");
382   }
383 
384   /// Get the name of the capture helper.
385   StringRef getHelperName() const override {
386     llvm_unreachable("No helper name for expressions");
387   }
388 
389   static bool classof(const CGCapturedStmtInfo *Info) { return false; }
390 
391 private:
392   /// Private scope to capture global variables.
393   CodeGenFunction::OMPPrivateScope PrivScope;
394 };
395 
396 /// RAII for emitting code of OpenMP constructs.
397 class InlinedOpenMPRegionRAII {
398   CodeGenFunction &CGF;
399   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
400   FieldDecl *LambdaThisCaptureField = nullptr;
401   const CodeGen::CGBlockInfo *BlockInfo = nullptr;
402 
403 public:
404   /// Constructs region for combined constructs.
405   /// \param CodeGen Code generation sequence for combined directives. Includes
406   /// a list of functions used for code generation of implicitly inlined
407   /// regions.
408   InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
409                           OpenMPDirectiveKind Kind, bool HasCancel)
410       : CGF(CGF) {
411     // Start emission for the construct.
412     CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
413         CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
414     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
415     LambdaThisCaptureField = CGF.LambdaThisCaptureField;
416     CGF.LambdaThisCaptureField = nullptr;
417     BlockInfo = CGF.BlockInfo;
418     CGF.BlockInfo = nullptr;
419   }
420 
421   ~InlinedOpenMPRegionRAII() {
422     // Restore original CapturedStmtInfo only if we're done with code emission.
423     auto *OldCSI =
424         cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
425     delete CGF.CapturedStmtInfo;
426     CGF.CapturedStmtInfo = OldCSI;
427     std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
428     CGF.LambdaThisCaptureField = LambdaThisCaptureField;
429     CGF.BlockInfo = BlockInfo;
430   }
431 };
432 
433 /// Values for bit flags used in the ident_t to describe the fields.
434 /// All enumeric elements are named and described in accordance with the code
435 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
436 enum OpenMPLocationFlags : unsigned {
437   /// Use trampoline for internal microtask.
438   OMP_IDENT_IMD = 0x01,
439   /// Use c-style ident structure.
440   OMP_IDENT_KMPC = 0x02,
441   /// Atomic reduction option for kmpc_reduce.
442   OMP_ATOMIC_REDUCE = 0x10,
443   /// Explicit 'barrier' directive.
444   OMP_IDENT_BARRIER_EXPL = 0x20,
445   /// Implicit barrier in code.
446   OMP_IDENT_BARRIER_IMPL = 0x40,
447   /// Implicit barrier in 'for' directive.
448   OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
449   /// Implicit barrier in 'sections' directive.
450   OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
451   /// Implicit barrier in 'single' directive.
452   OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
453   /// Call of __kmp_for_static_init for static loop.
454   OMP_IDENT_WORK_LOOP = 0x200,
455   /// Call of __kmp_for_static_init for sections.
456   OMP_IDENT_WORK_SECTIONS = 0x400,
457   /// Call of __kmp_for_static_init for distribute.
458   OMP_IDENT_WORK_DISTRIBUTE = 0x800,
459   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
460 };
461 
462 /// Describes ident structure that describes a source location.
463 /// All descriptions are taken from
464 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
465 /// Original structure:
466 /// typedef struct ident {
467 ///    kmp_int32 reserved_1;   /**<  might be used in Fortran;
468 ///                                  see above  */
469 ///    kmp_int32 flags;        /**<  also f.flags; KMP_IDENT_xxx flags;
470 ///                                  KMP_IDENT_KMPC identifies this union
471 ///                                  member  */
472 ///    kmp_int32 reserved_2;   /**<  not really used in Fortran any more;
473 ///                                  see above */
474 ///#if USE_ITT_BUILD
475 ///                            /*  but currently used for storing
476 ///                                region-specific ITT */
477 ///                            /*  contextual information. */
478 ///#endif /* USE_ITT_BUILD */
479 ///    kmp_int32 reserved_3;   /**< source[4] in Fortran, do not use for
480 ///                                 C++  */
481 ///    char const *psource;    /**< String describing the source location.
482 ///                            The string is composed of semi-colon separated
483 //                             fields which describe the source file,
484 ///                            the function and a pair of line numbers that
485 ///                            delimit the construct.
486 ///                             */
487 /// } ident_t;
488 enum IdentFieldIndex {
489   /// might be used in Fortran
490   IdentField_Reserved_1,
491   /// OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
492   IdentField_Flags,
493   /// Not really used in Fortran any more
494   IdentField_Reserved_2,
495   /// Source[4] in Fortran, do not use for C++
496   IdentField_Reserved_3,
497   /// String describing the source location. The string is composed of
498   /// semi-colon separated fields which describe the source file, the function
499   /// and a pair of line numbers that delimit the construct.
500   IdentField_PSource
501 };
502 
503 /// Schedule types for 'omp for' loops (these enumerators are taken from
504 /// the enum sched_type in kmp.h).
505 enum OpenMPSchedType {
506   /// Lower bound for default (unordered) versions.
507   OMP_sch_lower = 32,
508   OMP_sch_static_chunked = 33,
509   OMP_sch_static = 34,
510   OMP_sch_dynamic_chunked = 35,
511   OMP_sch_guided_chunked = 36,
512   OMP_sch_runtime = 37,
513   OMP_sch_auto = 38,
514   /// static with chunk adjustment (e.g., simd)
515   OMP_sch_static_balanced_chunked = 45,
516   /// Lower bound for 'ordered' versions.
517   OMP_ord_lower = 64,
518   OMP_ord_static_chunked = 65,
519   OMP_ord_static = 66,
520   OMP_ord_dynamic_chunked = 67,
521   OMP_ord_guided_chunked = 68,
522   OMP_ord_runtime = 69,
523   OMP_ord_auto = 70,
524   OMP_sch_default = OMP_sch_static,
525   /// dist_schedule types
526   OMP_dist_sch_static_chunked = 91,
527   OMP_dist_sch_static = 92,
528   /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
529   /// Set if the monotonic schedule modifier was present.
530   OMP_sch_modifier_monotonic = (1 << 29),
531   /// Set if the nonmonotonic schedule modifier was present.
532   OMP_sch_modifier_nonmonotonic = (1 << 30),
533 };
534 
535 enum OpenMPRTLFunction {
536   /// Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
537   /// kmpc_micro microtask, ...);
538   OMPRTL__kmpc_fork_call,
539   /// Call to void *__kmpc_threadprivate_cached(ident_t *loc,
540   /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
541   OMPRTL__kmpc_threadprivate_cached,
542   /// Call to void __kmpc_threadprivate_register( ident_t *,
543   /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
544   OMPRTL__kmpc_threadprivate_register,
545   // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
546   OMPRTL__kmpc_global_thread_num,
547   // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
548   // kmp_critical_name *crit);
549   OMPRTL__kmpc_critical,
550   // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
551   // global_tid, kmp_critical_name *crit, uintptr_t hint);
552   OMPRTL__kmpc_critical_with_hint,
553   // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
554   // kmp_critical_name *crit);
555   OMPRTL__kmpc_end_critical,
556   // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
557   // global_tid);
558   OMPRTL__kmpc_cancel_barrier,
559   // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
560   OMPRTL__kmpc_barrier,
561   // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
562   OMPRTL__kmpc_for_static_fini,
563   // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
564   // global_tid);
565   OMPRTL__kmpc_serialized_parallel,
566   // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
567   // global_tid);
568   OMPRTL__kmpc_end_serialized_parallel,
569   // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
570   // kmp_int32 num_threads);
571   OMPRTL__kmpc_push_num_threads,
572   // Call to void __kmpc_flush(ident_t *loc);
573   OMPRTL__kmpc_flush,
574   // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
575   OMPRTL__kmpc_master,
576   // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
577   OMPRTL__kmpc_end_master,
578   // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
579   // int end_part);
580   OMPRTL__kmpc_omp_taskyield,
581   // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
582   OMPRTL__kmpc_single,
583   // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
584   OMPRTL__kmpc_end_single,
585   // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
586   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
587   // kmp_routine_entry_t *task_entry);
588   OMPRTL__kmpc_omp_task_alloc,
589   // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
590   // new_task);
591   OMPRTL__kmpc_omp_task,
592   // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
593   // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
594   // kmp_int32 didit);
595   OMPRTL__kmpc_copyprivate,
596   // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
597   // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
598   // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
599   OMPRTL__kmpc_reduce,
600   // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
601   // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
602   // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
603   // *lck);
604   OMPRTL__kmpc_reduce_nowait,
605   // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
606   // kmp_critical_name *lck);
607   OMPRTL__kmpc_end_reduce,
608   // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
609   // kmp_critical_name *lck);
610   OMPRTL__kmpc_end_reduce_nowait,
611   // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
612   // kmp_task_t * new_task);
613   OMPRTL__kmpc_omp_task_begin_if0,
614   // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
615   // kmp_task_t * new_task);
616   OMPRTL__kmpc_omp_task_complete_if0,
617   // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
618   OMPRTL__kmpc_ordered,
619   // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
620   OMPRTL__kmpc_end_ordered,
621   // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
622   // global_tid);
623   OMPRTL__kmpc_omp_taskwait,
624   // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
625   OMPRTL__kmpc_taskgroup,
626   // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
627   OMPRTL__kmpc_end_taskgroup,
628   // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
629   // int proc_bind);
630   OMPRTL__kmpc_push_proc_bind,
631   // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
632   // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
633   // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
634   OMPRTL__kmpc_omp_task_with_deps,
635   // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
636   // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
637   // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
638   OMPRTL__kmpc_omp_wait_deps,
639   // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
640   // global_tid, kmp_int32 cncl_kind);
641   OMPRTL__kmpc_cancellationpoint,
642   // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
643   // kmp_int32 cncl_kind);
644   OMPRTL__kmpc_cancel,
645   // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
646   // kmp_int32 num_teams, kmp_int32 thread_limit);
647   OMPRTL__kmpc_push_num_teams,
648   // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
649   // microtask, ...);
650   OMPRTL__kmpc_fork_teams,
651   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
652   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
653   // sched, kmp_uint64 grainsize, void *task_dup);
654   OMPRTL__kmpc_taskloop,
655   // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
656   // num_dims, struct kmp_dim *dims);
657   OMPRTL__kmpc_doacross_init,
658   // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
659   OMPRTL__kmpc_doacross_fini,
660   // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
661   // *vec);
662   OMPRTL__kmpc_doacross_post,
663   // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
664   // *vec);
665   OMPRTL__kmpc_doacross_wait,
666   // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void
667   // *data);
668   OMPRTL__kmpc_task_reduction_init,
669   // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
670   // *d);
671   OMPRTL__kmpc_task_reduction_get_th_data,
672 
673   //
674   // Offloading related calls
675   //
676   // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
677   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
678   // *arg_types);
679   OMPRTL__tgt_target,
680   // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
681   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
682   // *arg_types);
683   OMPRTL__tgt_target_nowait,
684   // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
685   // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
686   // *arg_types, int32_t num_teams, int32_t thread_limit);
687   OMPRTL__tgt_target_teams,
688   // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void
689   // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
690   // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
691   OMPRTL__tgt_target_teams_nowait,
692   // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
693   OMPRTL__tgt_register_lib,
694   // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
695   OMPRTL__tgt_unregister_lib,
696   // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
697   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
698   OMPRTL__tgt_target_data_begin,
699   // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
700   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
701   // *arg_types);
702   OMPRTL__tgt_target_data_begin_nowait,
703   // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
704   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
705   OMPRTL__tgt_target_data_end,
706   // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t
707   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
708   // *arg_types);
709   OMPRTL__tgt_target_data_end_nowait,
710   // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
711   // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
712   OMPRTL__tgt_target_data_update,
713   // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t
714   // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
715   // *arg_types);
716   OMPRTL__tgt_target_data_update_nowait,
717 };
718 
719 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP
720 /// region.
721 class CleanupTy final : public EHScopeStack::Cleanup {
722   PrePostActionTy *Action;
723 
724 public:
725   explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
726   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
727     if (!CGF.HaveInsertPoint())
728       return;
729     Action->Exit(CGF);
730   }
731 };
732 
733 } // anonymous namespace
734 
735 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const {
736   CodeGenFunction::RunCleanupsScope Scope(CGF);
737   if (PrePostAction) {
738     CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction);
739     Callback(CodeGen, CGF, *PrePostAction);
740   } else {
741     PrePostActionTy Action;
742     Callback(CodeGen, CGF, Action);
743   }
744 }
745 
746 /// Check if the combiner is a call to UDR combiner and if it is so return the
747 /// UDR decl used for reduction.
748 static const OMPDeclareReductionDecl *
749 getReductionInit(const Expr *ReductionOp) {
750   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
751     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
752       if (const auto *DRE =
753               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
754         if (const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
755           return DRD;
756   return nullptr;
757 }
758 
759 static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
760                                              const OMPDeclareReductionDecl *DRD,
761                                              const Expr *InitOp,
762                                              Address Private, Address Original,
763                                              QualType Ty) {
764   if (DRD->getInitializer()) {
765     std::pair<llvm::Function *, llvm::Function *> Reduction =
766         CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
767     const auto *CE = cast<CallExpr>(InitOp);
768     const auto *OVE = cast<OpaqueValueExpr>(CE->getCallee());
769     const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts();
770     const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts();
771     const auto *LHSDRE =
772         cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr());
773     const auto *RHSDRE =
774         cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
775     CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
776     PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
777                             [=]() { return Private; });
778     PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
779                             [=]() { return Original; });
780     (void)PrivateScope.Privatize();
781     RValue Func = RValue::get(Reduction.second);
782     CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
783     CGF.EmitIgnoredExpr(InitOp);
784   } else {
785     llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty);
786     std::string Name = CGF.CGM.getOpenMPRuntime().getName({"init"});
787     auto *GV = new llvm::GlobalVariable(
788         CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
789         llvm::GlobalValue::PrivateLinkage, Init, Name);
790     LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty);
791     RValue InitRVal;
792     switch (CGF.getEvaluationKind(Ty)) {
793     case TEK_Scalar:
794       InitRVal = CGF.EmitLoadOfLValue(LV, DRD->getLocation());
795       break;
796     case TEK_Complex:
797       InitRVal =
798           RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
799       break;
800     case TEK_Aggregate:
801       InitRVal = RValue::getAggregate(LV.getAddress());
802       break;
803     }
804     OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
805     CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
806     CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
807                          /*IsInitializer=*/false);
808   }
809 }
810 
811 /// Emit initialization of arrays of complex types.
812 /// \param DestAddr Address of the array.
813 /// \param Type Type of array.
814 /// \param Init Initial expression of array.
815 /// \param SrcAddr Address of the original array.
816 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
817                                  QualType Type, bool EmitDeclareReductionInit,
818                                  const Expr *Init,
819                                  const OMPDeclareReductionDecl *DRD,
820                                  Address SrcAddr = Address::invalid()) {
821   // Perform element-by-element initialization.
822   QualType ElementTy;
823 
824   // Drill down to the base element type on both arrays.
825   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
826   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
827   DestAddr =
828       CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
829   if (DRD)
830     SrcAddr =
831         CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
832 
833   llvm::Value *SrcBegin = nullptr;
834   if (DRD)
835     SrcBegin = SrcAddr.getPointer();
836   llvm::Value *DestBegin = DestAddr.getPointer();
837   // Cast from pointer to array type to pointer to single element.
838   llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
839   // The basic structure here is a while-do loop.
840   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
841   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
842   llvm::Value *IsEmpty =
843       CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty");
844   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
845 
846   // Enter the loop body, making that address the current address.
847   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
848   CGF.EmitBlock(BodyBB);
849 
850   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
851 
852   llvm::PHINode *SrcElementPHI = nullptr;
853   Address SrcElementCurrent = Address::invalid();
854   if (DRD) {
855     SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2,
856                                           "omp.arraycpy.srcElementPast");
857     SrcElementPHI->addIncoming(SrcBegin, EntryBB);
858     SrcElementCurrent =
859         Address(SrcElementPHI,
860                 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
861   }
862   llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
863       DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
864   DestElementPHI->addIncoming(DestBegin, EntryBB);
865   Address DestElementCurrent =
866       Address(DestElementPHI,
867               DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
868 
869   // Emit copy.
870   {
871     CodeGenFunction::RunCleanupsScope InitScope(CGF);
872     if (EmitDeclareReductionInit) {
873       emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent,
874                                        SrcElementCurrent, ElementTy);
875     } else
876       CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(),
877                            /*IsInitializer=*/false);
878   }
879 
880   if (DRD) {
881     // Shift the address forward by one element.
882     llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
883         SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
884     SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
885   }
886 
887   // Shift the address forward by one element.
888   llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
889       DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
890   // Check whether we've reached the end.
891   llvm::Value *Done =
892       CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
893   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
894   DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock());
895 
896   // Done.
897   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
898 }
899 
900 static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
901 isDeclareTargetDeclaration(const ValueDecl *VD) {
902   for (const Decl *D : VD->redecls()) {
903     if (!D->hasAttrs())
904       continue;
905     if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
906       return Attr->getMapType();
907   }
908   if (const auto *V = dyn_cast<VarDecl>(VD)) {
909     if (const VarDecl *TD = V->getTemplateInstantiationPattern())
910       return isDeclareTargetDeclaration(TD);
911   } else if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
912     if (const auto *TD = FD->getTemplateInstantiationPattern())
913       return isDeclareTargetDeclaration(TD);
914   }
915 
916   return llvm::None;
917 }
918 
919 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
920   return CGF.EmitOMPSharedLValue(E);
921 }
922 
923 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
924                                             const Expr *E) {
925   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E))
926     return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false);
927   return LValue();
928 }
929 
930 void ReductionCodeGen::emitAggregateInitialization(
931     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
932     const OMPDeclareReductionDecl *DRD) {
933   // Emit VarDecl with copy init for arrays.
934   // Get the address of the original variable captured in current
935   // captured region.
936   const auto *PrivateVD =
937       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
938   bool EmitDeclareReductionInit =
939       DRD && (DRD->getInitializer() || !PrivateVD->hasInit());
940   EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(),
941                        EmitDeclareReductionInit,
942                        EmitDeclareReductionInit ? ClausesData[N].ReductionOp
943                                                 : PrivateVD->getInit(),
944                        DRD, SharedLVal.getAddress());
945 }
946 
947 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
948                                    ArrayRef<const Expr *> Privates,
949                                    ArrayRef<const Expr *> ReductionOps) {
950   ClausesData.reserve(Shareds.size());
951   SharedAddresses.reserve(Shareds.size());
952   Sizes.reserve(Shareds.size());
953   BaseDecls.reserve(Shareds.size());
954   auto IPriv = Privates.begin();
955   auto IRed = ReductionOps.begin();
956   for (const Expr *Ref : Shareds) {
957     ClausesData.emplace_back(Ref, *IPriv, *IRed);
958     std::advance(IPriv, 1);
959     std::advance(IRed, 1);
960   }
961 }
962 
963 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) {
964   assert(SharedAddresses.size() == N &&
965          "Number of generated lvalues must be exactly N.");
966   LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
967   LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
968   SharedAddresses.emplace_back(First, Second);
969 }
970 
971 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
972   const auto *PrivateVD =
973       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
974   QualType PrivateType = PrivateVD->getType();
975   bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
976   if (!PrivateType->isVariablyModifiedType()) {
977     Sizes.emplace_back(
978         CGF.getTypeSize(
979             SharedAddresses[N].first.getType().getNonReferenceType()),
980         nullptr);
981     return;
982   }
983   llvm::Value *Size;
984   llvm::Value *SizeInChars;
985   auto *ElemType =
986       cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType())
987           ->getElementType();
988   auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
989   if (AsArraySection) {
990     Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(),
991                                      SharedAddresses[N].first.getPointer());
992     Size = CGF.Builder.CreateNUWAdd(
993         Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
994     SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf);
995   } else {
996     SizeInChars = CGF.getTypeSize(
997         SharedAddresses[N].first.getType().getNonReferenceType());
998     Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
999   }
1000   Sizes.emplace_back(SizeInChars, Size);
1001   CodeGenFunction::OpaqueValueMapping OpaqueMap(
1002       CGF,
1003       cast<OpaqueValueExpr>(
1004           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1005       RValue::get(Size));
1006   CGF.EmitVariablyModifiedType(PrivateType);
1007 }
1008 
1009 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
1010                                          llvm::Value *Size) {
1011   const auto *PrivateVD =
1012       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1013   QualType PrivateType = PrivateVD->getType();
1014   if (!PrivateType->isVariablyModifiedType()) {
1015     assert(!Size && !Sizes[N].second &&
1016            "Size should be nullptr for non-variably modified reduction "
1017            "items.");
1018     return;
1019   }
1020   CodeGenFunction::OpaqueValueMapping OpaqueMap(
1021       CGF,
1022       cast<OpaqueValueExpr>(
1023           CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()),
1024       RValue::get(Size));
1025   CGF.EmitVariablyModifiedType(PrivateType);
1026 }
1027 
1028 void ReductionCodeGen::emitInitialization(
1029     CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
1030     llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
1031   assert(SharedAddresses.size() > N && "No variable was generated");
1032   const auto *PrivateVD =
1033       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1034   const OMPDeclareReductionDecl *DRD =
1035       getReductionInit(ClausesData[N].ReductionOp);
1036   QualType PrivateType = PrivateVD->getType();
1037   PrivateAddr = CGF.Builder.CreateElementBitCast(
1038       PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1039   QualType SharedType = SharedAddresses[N].first.getType();
1040   SharedLVal = CGF.MakeAddrLValue(
1041       CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(),
1042                                        CGF.ConvertTypeForMem(SharedType)),
1043       SharedType, SharedAddresses[N].first.getBaseInfo(),
1044       CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
1045   if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
1046     emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
1047   } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
1048     emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
1049                                      PrivateAddr, SharedLVal.getAddress(),
1050                                      SharedLVal.getType());
1051   } else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
1052              !CGF.isTrivialInitializer(PrivateVD->getInit())) {
1053     CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
1054                          PrivateVD->getType().getQualifiers(),
1055                          /*IsInitializer=*/false);
1056   }
1057 }
1058 
1059 bool ReductionCodeGen::needCleanups(unsigned N) {
1060   const auto *PrivateVD =
1061       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1062   QualType PrivateType = PrivateVD->getType();
1063   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1064   return DTorKind != QualType::DK_none;
1065 }
1066 
1067 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
1068                                     Address PrivateAddr) {
1069   const auto *PrivateVD =
1070       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
1071   QualType PrivateType = PrivateVD->getType();
1072   QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
1073   if (needCleanups(N)) {
1074     PrivateAddr = CGF.Builder.CreateElementBitCast(
1075         PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
1076     CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
1077   }
1078 }
1079 
1080 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1081                           LValue BaseLV) {
1082   BaseTy = BaseTy.getNonReferenceType();
1083   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1084          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1085     if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
1086       BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
1087     } else {
1088       LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
1089       BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
1090     }
1091     BaseTy = BaseTy->getPointeeType();
1092   }
1093   return CGF.MakeAddrLValue(
1094       CGF.Builder.CreateElementBitCast(BaseLV.getAddress(),
1095                                        CGF.ConvertTypeForMem(ElTy)),
1096       BaseLV.getType(), BaseLV.getBaseInfo(),
1097       CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
1098 }
1099 
1100 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
1101                           llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
1102                           llvm::Value *Addr) {
1103   Address Tmp = Address::invalid();
1104   Address TopTmp = Address::invalid();
1105   Address MostTopTmp = Address::invalid();
1106   BaseTy = BaseTy.getNonReferenceType();
1107   while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
1108          !CGF.getContext().hasSameType(BaseTy, ElTy)) {
1109     Tmp = CGF.CreateMemTemp(BaseTy);
1110     if (TopTmp.isValid())
1111       CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp);
1112     else
1113       MostTopTmp = Tmp;
1114     TopTmp = Tmp;
1115     BaseTy = BaseTy->getPointeeType();
1116   }
1117   llvm::Type *Ty = BaseLVType;
1118   if (Tmp.isValid())
1119     Ty = Tmp.getElementType();
1120   Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
1121   if (Tmp.isValid()) {
1122     CGF.Builder.CreateStore(Addr, Tmp);
1123     return MostTopTmp;
1124   }
1125   return Address(Addr, BaseLVAlignment);
1126 }
1127 
1128 static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
1129   const VarDecl *OrigVD = nullptr;
1130   if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(Ref)) {
1131     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
1132     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
1133       Base = TempOASE->getBase()->IgnoreParenImpCasts();
1134     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1135       Base = TempASE->getBase()->IgnoreParenImpCasts();
1136     DE = cast<DeclRefExpr>(Base);
1137     OrigVD = cast<VarDecl>(DE->getDecl());
1138   } else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
1139     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
1140     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
1141       Base = TempASE->getBase()->IgnoreParenImpCasts();
1142     DE = cast<DeclRefExpr>(Base);
1143     OrigVD = cast<VarDecl>(DE->getDecl());
1144   }
1145   return OrigVD;
1146 }
1147 
1148 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
1149                                                Address PrivateAddr) {
1150   const DeclRefExpr *DE;
1151   if (const VarDecl *OrigVD = ::getBaseDecl(ClausesData[N].Ref, DE)) {
1152     BaseDecls.emplace_back(OrigVD);
1153     LValue OriginalBaseLValue = CGF.EmitLValue(DE);
1154     LValue BaseLValue =
1155         loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
1156                     OriginalBaseLValue);
1157     llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
1158         BaseLValue.getPointer(), SharedAddresses[N].first.getPointer());
1159     llvm::Value *PrivatePointer =
1160         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1161             PrivateAddr.getPointer(),
1162             SharedAddresses[N].first.getAddress().getType());
1163     llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
1164     return castToBase(CGF, OrigVD->getType(),
1165                       SharedAddresses[N].first.getType(),
1166                       OriginalBaseLValue.getAddress().getType(),
1167                       OriginalBaseLValue.getAlignment(), Ptr);
1168   }
1169   BaseDecls.emplace_back(
1170       cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
1171   return PrivateAddr;
1172 }
1173 
1174 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const {
1175   const OMPDeclareReductionDecl *DRD =
1176       getReductionInit(ClausesData[N].ReductionOp);
1177   return DRD && DRD->getInitializer();
1178 }
1179 
1180 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
1181   return CGF.EmitLoadOfPointerLValue(
1182       CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1183       getThreadIDVariable()->getType()->castAs<PointerType>());
1184 }
1185 
1186 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
1187   if (!CGF.HaveInsertPoint())
1188     return;
1189   // 1.2.2 OpenMP Language Terminology
1190   // Structured block - An executable statement with a single entry at the
1191   // top and a single exit at the bottom.
1192   // The point of exit cannot be a branch out of the structured block.
1193   // longjmp() and throw() must not violate the entry/exit criteria.
1194   CGF.EHStack.pushTerminate();
1195   CodeGen(CGF);
1196   CGF.EHStack.popTerminate();
1197 }
1198 
1199 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1200     CodeGenFunction &CGF) {
1201   return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()),
1202                             getThreadIDVariable()->getType(),
1203                             AlignmentSource::Decl);
1204 }
1205 
1206 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
1207                                        QualType FieldTy) {
1208   auto *Field = FieldDecl::Create(
1209       C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
1210       C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
1211       /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
1212   Field->setAccess(AS_public);
1213   DC->addDecl(Field);
1214   return Field;
1215 }
1216 
1217 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
1218                                  StringRef Separator)
1219     : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
1220       OffloadEntriesInfoManager(CGM) {
1221   ASTContext &C = CGM.getContext();
1222   RecordDecl *RD = C.buildImplicitRecord("ident_t");
1223   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
1224   RD->startDefinition();
1225   // reserved_1
1226   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1227   // flags
1228   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1229   // reserved_2
1230   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1231   // reserved_3
1232   addFieldToRecordDecl(C, RD, KmpInt32Ty);
1233   // psource
1234   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
1235   RD->completeDefinition();
1236   IdentQTy = C.getRecordType(RD);
1237   IdentTy = CGM.getTypes().ConvertRecordDeclType(RD);
1238   KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
1239 
1240   loadOffloadInfoMetadata();
1241 }
1242 
1243 void CGOpenMPRuntime::clear() {
1244   InternalVars.clear();
1245 }
1246 
1247 std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
1248   SmallString<128> Buffer;
1249   llvm::raw_svector_ostream OS(Buffer);
1250   StringRef Sep = FirstSeparator;
1251   for (StringRef Part : Parts) {
1252     OS << Sep << Part;
1253     Sep = Separator;
1254   }
1255   return OS.str();
1256 }
1257 
1258 static llvm::Function *
1259 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
1260                           const Expr *CombinerInitializer, const VarDecl *In,
1261                           const VarDecl *Out, bool IsCombiner) {
1262   // void .omp_combiner.(Ty *in, Ty *out);
1263   ASTContext &C = CGM.getContext();
1264   QualType PtrTy = C.getPointerType(Ty).withRestrict();
1265   FunctionArgList Args;
1266   ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
1267                                /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1268   ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
1269                               /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
1270   Args.push_back(&OmpOutParm);
1271   Args.push_back(&OmpInParm);
1272   const CGFunctionInfo &FnInfo =
1273       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1274   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
1275   std::string Name = CGM.getOpenMPRuntime().getName(
1276       {IsCombiner ? "omp_combiner" : "omp_initializer", ""});
1277   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1278                                     Name, &CGM.getModule());
1279   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
1280   Fn->removeFnAttr(llvm::Attribute::NoInline);
1281   Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1282   Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1283   CodeGenFunction CGF(CGM);
1284   // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
1285   // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions.
1286   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(),
1287                     Out->getLocation());
1288   CodeGenFunction::OMPPrivateScope Scope(CGF);
1289   Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
1290   Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
1291     return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
1292         .getAddress();
1293   });
1294   Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
1295   Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
1296     return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
1297         .getAddress();
1298   });
1299   (void)Scope.Privatize();
1300   if (!IsCombiner && Out->hasInit() &&
1301       !CGF.isTrivialInitializer(Out->getInit())) {
1302     CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out),
1303                          Out->getType().getQualifiers(),
1304                          /*IsInitializer=*/true);
1305   }
1306   if (CombinerInitializer)
1307     CGF.EmitIgnoredExpr(CombinerInitializer);
1308   Scope.ForceCleanup();
1309   CGF.FinishFunction();
1310   return Fn;
1311 }
1312 
1313 void CGOpenMPRuntime::emitUserDefinedReduction(
1314     CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
1315   if (UDRMap.count(D) > 0)
1316     return;
1317   ASTContext &C = CGM.getContext();
1318   if (!In || !Out) {
1319     In = &C.Idents.get("omp_in");
1320     Out = &C.Idents.get("omp_out");
1321   }
1322   llvm::Function *Combiner = emitCombinerOrInitializer(
1323       CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
1324       cast<VarDecl>(D->lookup(Out).front()),
1325       /*IsCombiner=*/true);
1326   llvm::Function *Initializer = nullptr;
1327   if (const Expr *Init = D->getInitializer()) {
1328     if (!Priv || !Orig) {
1329       Priv = &C.Idents.get("omp_priv");
1330       Orig = &C.Idents.get("omp_orig");
1331     }
1332     Initializer = emitCombinerOrInitializer(
1333         CGM, D->getType(),
1334         D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
1335                                                                      : nullptr,
1336         cast<VarDecl>(D->lookup(Orig).front()),
1337         cast<VarDecl>(D->lookup(Priv).front()),
1338         /*IsCombiner=*/false);
1339   }
1340   UDRMap.try_emplace(D, Combiner, Initializer);
1341   if (CGF) {
1342     auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn);
1343     Decls.second.push_back(D);
1344   }
1345 }
1346 
1347 std::pair<llvm::Function *, llvm::Function *>
1348 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) {
1349   auto I = UDRMap.find(D);
1350   if (I != UDRMap.end())
1351     return I->second;
1352   emitUserDefinedReduction(/*CGF=*/nullptr, D);
1353   return UDRMap.lookup(D);
1354 }
1355 
1356 static llvm::Value *emitParallelOrTeamsOutlinedFunction(
1357     CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS,
1358     const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
1359     const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) {
1360   assert(ThreadIDVar->getType()->isPointerType() &&
1361          "thread id variable must be of type kmp_int32 *");
1362   CodeGenFunction CGF(CGM, true);
1363   bool HasCancel = false;
1364   if (const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1365     HasCancel = OPD->hasCancel();
1366   else if (const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1367     HasCancel = OPSD->hasCancel();
1368   else if (const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1369     HasCancel = OPFD->hasCancel();
1370   else if (const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1371     HasCancel = OPFD->hasCancel();
1372   else if (const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1373     HasCancel = OPFD->hasCancel();
1374   else if (const auto *OPFD =
1375                dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1376     HasCancel = OPFD->hasCancel();
1377   else if (const auto *OPFD =
1378                dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1379     HasCancel = OPFD->hasCancel();
1380   CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
1381                                     HasCancel, OutlinedHelperName);
1382   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1383   return CGF.GenerateOpenMPCapturedStmtFunction(*CS);
1384 }
1385 
1386 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
1387     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1388     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1389   const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1390   return emitParallelOrTeamsOutlinedFunction(
1391       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1392 }
1393 
1394 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction(
1395     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1396     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1397   const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1398   return emitParallelOrTeamsOutlinedFunction(
1399       CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
1400 }
1401 
1402 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
1403     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1404     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
1405     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1406     bool Tied, unsigned &NumberOfParts) {
1407   auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
1408                                               PrePostActionTy &) {
1409     llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
1410     llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
1411     llvm::Value *TaskArgs[] = {
1412         UpLoc, ThreadID,
1413         CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1414                                     TaskTVar->getType()->castAs<PointerType>())
1415             .getPointer()};
1416     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs);
1417   };
1418   CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1419                                                             UntiedCodeGen);
1420   CodeGen.setAction(Action);
1421   assert(!ThreadIDVar->getType()->isPointerType() &&
1422          "thread id variable must be of type kmp_int32 for tasks");
1423   const OpenMPDirectiveKind Region =
1424       isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop
1425                                                       : OMPD_task;
1426   const CapturedStmt *CS = D.getCapturedStmt(Region);
1427   const auto *TD = dyn_cast<OMPTaskDirective>(&D);
1428   CodeGenFunction CGF(CGM, true);
1429   CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
1430                                         InnermostKind,
1431                                         TD ? TD->hasCancel() : false, Action);
1432   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
1433   llvm::Value *Res = CGF.GenerateCapturedStmtFunction(*CS);
1434   if (!Tied)
1435     NumberOfParts = Action.getNumberOfParts();
1436   return Res;
1437 }
1438 
1439 static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
1440                              const RecordDecl *RD, const CGRecordLayout &RL,
1441                              ArrayRef<llvm::Constant *> Data) {
1442   llvm::StructType *StructTy = RL.getLLVMType();
1443   unsigned PrevIdx = 0;
1444   ConstantInitBuilder CIBuilder(CGM);
1445   auto DI = Data.begin();
1446   for (const FieldDecl *FD : RD->fields()) {
1447     unsigned Idx = RL.getLLVMFieldNo(FD);
1448     // Fill the alignment.
1449     for (unsigned I = PrevIdx; I < Idx; ++I)
1450       Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
1451     PrevIdx = Idx + 1;
1452     Fields.add(*DI);
1453     ++DI;
1454   }
1455 }
1456 
1457 template <class... As>
1458 static llvm::GlobalVariable *
1459 createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
1460                            ArrayRef<llvm::Constant *> Data, const Twine &Name,
1461                            As &&... Args) {
1462   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1463   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1464   ConstantInitBuilder CIBuilder(CGM);
1465   ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
1466   buildStructValue(Fields, CGM, RD, RL, Data);
1467   return Fields.finishAndCreateGlobal(
1468       Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
1469       /*isConstant=*/true, std::forward<As>(Args)...);
1470 }
1471 
1472 template <typename T>
1473 void createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
1474                                               ArrayRef<llvm::Constant *> Data,
1475                                               T &Parent) {
1476   const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
1477   const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
1478   ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
1479   buildStructValue(Fields, CGM, RD, RL, Data);
1480   Fields.finishAndAddTo(Parent);
1481 }
1482 
1483 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
1484   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1485   llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
1486   if (!Entry) {
1487     if (!DefaultOpenMPPSource) {
1488       // Initialize default location for psource field of ident_t structure of
1489       // all ident_t objects. Format is ";file;function;line;column;;".
1490       // Taken from
1491       // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
1492       DefaultOpenMPPSource =
1493           CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer();
1494       DefaultOpenMPPSource =
1495           llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
1496     }
1497 
1498     llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1499                               llvm::ConstantInt::get(CGM.Int32Ty, Flags),
1500                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1501                               llvm::ConstantInt::getNullValue(CGM.Int32Ty),
1502                               DefaultOpenMPPSource};
1503     llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
1504         CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
1505     DefaultOpenMPLocation->setUnnamedAddr(
1506         llvm::GlobalValue::UnnamedAddr::Global);
1507 
1508     OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
1509   }
1510   return Address(Entry, Align);
1511 }
1512 
1513 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
1514                                                  SourceLocation Loc,
1515                                                  unsigned Flags) {
1516   Flags |= OMP_IDENT_KMPC;
1517   // If no debug info is generated - return global default location.
1518   if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
1519       Loc.isInvalid())
1520     return getOrCreateDefaultLocation(Flags).getPointer();
1521 
1522   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1523 
1524   CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
1525   Address LocValue = Address::invalid();
1526   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1527   if (I != OpenMPLocThreadIDMap.end())
1528     LocValue = Address(I->second.DebugLoc, Align);
1529 
1530   // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
1531   // GetOpenMPThreadID was called before this routine.
1532   if (!LocValue.isValid()) {
1533     // Generate "ident_t .kmpc_loc.addr;"
1534     Address AI = CGF.CreateMemTemp(IdentQTy, ".kmpc_loc.addr");
1535     auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1536     Elem.second.DebugLoc = AI.getPointer();
1537     LocValue = AI;
1538 
1539     CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1540     CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1541     CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
1542                              CGF.getTypeSize(IdentQTy));
1543   }
1544 
1545   // char **psource = &.kmpc_loc_<flags>.addr.psource;
1546   LValue Base = CGF.MakeAddrLValue(LocValue, IdentQTy);
1547   auto Fields = cast<RecordDecl>(IdentQTy->getAsTagDecl())->field_begin();
1548   LValue PSource =
1549       CGF.EmitLValueForField(Base, *std::next(Fields, IdentField_PSource));
1550 
1551   llvm::Value *OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
1552   if (OMPDebugLoc == nullptr) {
1553     SmallString<128> Buffer2;
1554     llvm::raw_svector_ostream OS2(Buffer2);
1555     // Build debug location
1556     PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
1557     OS2 << ";" << PLoc.getFilename() << ";";
1558     if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
1559       OS2 << FD->getQualifiedNameAsString();
1560     OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
1561     OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
1562     OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
1563   }
1564   // *psource = ";<File>;<Function>;<Line>;<Column>;;";
1565   CGF.EmitStoreOfScalar(OMPDebugLoc, PSource);
1566 
1567   // Our callers always pass this to a runtime function, so for
1568   // convenience, go ahead and return a naked pointer.
1569   return LocValue.getPointer();
1570 }
1571 
1572 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
1573                                           SourceLocation Loc) {
1574   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1575 
1576   llvm::Value *ThreadID = nullptr;
1577   // Check whether we've already cached a load of the thread id in this
1578   // function.
1579   auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
1580   if (I != OpenMPLocThreadIDMap.end()) {
1581     ThreadID = I->second.ThreadID;
1582     if (ThreadID != nullptr)
1583       return ThreadID;
1584   }
1585   // If exceptions are enabled, do not use parameter to avoid possible crash.
1586   if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions ||
1587       !CGF.getLangOpts().CXXExceptions ||
1588       CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1589     if (auto *OMPRegionInfo =
1590             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
1591       if (OMPRegionInfo->getThreadIDVariable()) {
1592         // Check if this an outlined function with thread id passed as argument.
1593         LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1594         ThreadID = CGF.EmitLoadOfScalar(LVal, Loc);
1595         // If value loaded in entry block, cache it and use it everywhere in
1596         // function.
1597         if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
1598           auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1599           Elem.second.ThreadID = ThreadID;
1600         }
1601         return ThreadID;
1602       }
1603     }
1604   }
1605 
1606   // This is not an outlined function region - need to call __kmpc_int32
1607   // kmpc_global_thread_num(ident_t *loc).
1608   // Generate thread id value and cache this value for use across the
1609   // function.
1610   CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
1611   CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
1612   llvm::CallInst *Call = CGF.Builder.CreateCall(
1613       createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
1614       emitUpdateLocation(CGF, Loc));
1615   Call->setCallingConv(CGF.getRuntimeCC());
1616   auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
1617   Elem.second.ThreadID = Call;
1618   return Call;
1619 }
1620 
1621 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
1622   assert(CGF.CurFn && "No function in current CodeGenFunction.");
1623   if (OpenMPLocThreadIDMap.count(CGF.CurFn))
1624     OpenMPLocThreadIDMap.erase(CGF.CurFn);
1625   if (FunctionUDRMap.count(CGF.CurFn) > 0) {
1626     for(auto *D : FunctionUDRMap[CGF.CurFn])
1627       UDRMap.erase(D);
1628     FunctionUDRMap.erase(CGF.CurFn);
1629   }
1630 }
1631 
1632 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
1633   return IdentTy->getPointerTo();
1634 }
1635 
1636 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
1637   if (!Kmpc_MicroTy) {
1638     // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
1639     llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
1640                                  llvm::PointerType::getUnqual(CGM.Int32Ty)};
1641     Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
1642   }
1643   return llvm::PointerType::getUnqual(Kmpc_MicroTy);
1644 }
1645 
1646 llvm::Constant *
1647 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
1648   llvm::Constant *RTLFn = nullptr;
1649   switch (static_cast<OpenMPRTLFunction>(Function)) {
1650   case OMPRTL__kmpc_fork_call: {
1651     // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
1652     // microtask, ...);
1653     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1654                                 getKmpc_MicroPointerTy()};
1655     auto *FnTy =
1656         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
1657     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
1658     break;
1659   }
1660   case OMPRTL__kmpc_global_thread_num: {
1661     // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
1662     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1663     auto *FnTy =
1664         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1665     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
1666     break;
1667   }
1668   case OMPRTL__kmpc_threadprivate_cached: {
1669     // Build void *__kmpc_threadprivate_cached(ident_t *loc,
1670     // kmp_int32 global_tid, void *data, size_t size, void ***cache);
1671     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1672                                 CGM.VoidPtrTy, CGM.SizeTy,
1673                                 CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
1674     auto *FnTy =
1675         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
1676     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
1677     break;
1678   }
1679   case OMPRTL__kmpc_critical: {
1680     // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1681     // kmp_critical_name *crit);
1682     llvm::Type *TypeParams[] = {
1683         getIdentTyPointerTy(), CGM.Int32Ty,
1684         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1685     auto *FnTy =
1686         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1687     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
1688     break;
1689   }
1690   case OMPRTL__kmpc_critical_with_hint: {
1691     // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1692     // kmp_critical_name *crit, uintptr_t hint);
1693     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1694                                 llvm::PointerType::getUnqual(KmpCriticalNameTy),
1695                                 CGM.IntPtrTy};
1696     auto *FnTy =
1697         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1698     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint");
1699     break;
1700   }
1701   case OMPRTL__kmpc_threadprivate_register: {
1702     // Build void __kmpc_threadprivate_register(ident_t *, void *data,
1703     // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
1704     // typedef void *(*kmpc_ctor)(void *);
1705     auto *KmpcCtorTy =
1706         llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
1707                                 /*isVarArg*/ false)->getPointerTo();
1708     // typedef void *(*kmpc_cctor)(void *, void *);
1709     llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1710     auto *KmpcCopyCtorTy =
1711         llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
1712                                 /*isVarArg*/ false)
1713             ->getPointerTo();
1714     // typedef void (*kmpc_dtor)(void *);
1715     auto *KmpcDtorTy =
1716         llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
1717             ->getPointerTo();
1718     llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
1719                               KmpcCopyCtorTy, KmpcDtorTy};
1720     auto *FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
1721                                         /*isVarArg*/ false);
1722     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
1723     break;
1724   }
1725   case OMPRTL__kmpc_end_critical: {
1726     // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1727     // kmp_critical_name *crit);
1728     llvm::Type *TypeParams[] = {
1729         getIdentTyPointerTy(), CGM.Int32Ty,
1730         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1731     auto *FnTy =
1732         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1733     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
1734     break;
1735   }
1736   case OMPRTL__kmpc_cancel_barrier: {
1737     // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
1738     // global_tid);
1739     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1740     auto *FnTy =
1741         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1742     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
1743     break;
1744   }
1745   case OMPRTL__kmpc_barrier: {
1746     // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1747     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1748     auto *FnTy =
1749         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1750     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1751     break;
1752   }
1753   case OMPRTL__kmpc_for_static_fini: {
1754     // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
1755     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1756     auto *FnTy =
1757         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1758     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
1759     break;
1760   }
1761   case OMPRTL__kmpc_push_num_threads: {
1762     // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
1763     // kmp_int32 num_threads)
1764     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1765                                 CGM.Int32Ty};
1766     auto *FnTy =
1767         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1768     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
1769     break;
1770   }
1771   case OMPRTL__kmpc_serialized_parallel: {
1772     // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1773     // global_tid);
1774     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1775     auto *FnTy =
1776         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1777     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1778     break;
1779   }
1780   case OMPRTL__kmpc_end_serialized_parallel: {
1781     // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1782     // global_tid);
1783     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1784     auto *FnTy =
1785         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1786     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1787     break;
1788   }
1789   case OMPRTL__kmpc_flush: {
1790     // Build void __kmpc_flush(ident_t *loc);
1791     llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
1792     auto *FnTy =
1793         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1794     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
1795     break;
1796   }
1797   case OMPRTL__kmpc_master: {
1798     // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
1799     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1800     auto *FnTy =
1801         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1802     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
1803     break;
1804   }
1805   case OMPRTL__kmpc_end_master: {
1806     // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
1807     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1808     auto *FnTy =
1809         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1810     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
1811     break;
1812   }
1813   case OMPRTL__kmpc_omp_taskyield: {
1814     // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
1815     // int end_part);
1816     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1817     auto *FnTy =
1818         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1819     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
1820     break;
1821   }
1822   case OMPRTL__kmpc_single: {
1823     // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
1824     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1825     auto *FnTy =
1826         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1827     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
1828     break;
1829   }
1830   case OMPRTL__kmpc_end_single: {
1831     // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
1832     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1833     auto *FnTy =
1834         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1835     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
1836     break;
1837   }
1838   case OMPRTL__kmpc_omp_task_alloc: {
1839     // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
1840     // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1841     // kmp_routine_entry_t *task_entry);
1842     assert(KmpRoutineEntryPtrTy != nullptr &&
1843            "Type kmp_routine_entry_t must be created.");
1844     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
1845                                 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
1846     // Return void * and then cast to particular kmp_task_t type.
1847     auto *FnTy =
1848         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1849     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
1850     break;
1851   }
1852   case OMPRTL__kmpc_omp_task: {
1853     // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1854     // *new_task);
1855     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1856                                 CGM.VoidPtrTy};
1857     auto *FnTy =
1858         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1859     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
1860     break;
1861   }
1862   case OMPRTL__kmpc_copyprivate: {
1863     // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
1864     // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
1865     // kmp_int32 didit);
1866     llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1867     auto *CpyFnTy =
1868         llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
1869     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
1870                                 CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
1871                                 CGM.Int32Ty};
1872     auto *FnTy =
1873         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1874     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
1875     break;
1876   }
1877   case OMPRTL__kmpc_reduce: {
1878     // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
1879     // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
1880     // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
1881     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1882     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1883                                                /*isVarArg=*/false);
1884     llvm::Type *TypeParams[] = {
1885         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1886         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1887         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1888     auto *FnTy =
1889         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1890     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
1891     break;
1892   }
1893   case OMPRTL__kmpc_reduce_nowait: {
1894     // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
1895     // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
1896     // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
1897     // *lck);
1898     llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
1899     auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
1900                                                /*isVarArg=*/false);
1901     llvm::Type *TypeParams[] = {
1902         getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
1903         CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
1904         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1905     auto *FnTy =
1906         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1907     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
1908     break;
1909   }
1910   case OMPRTL__kmpc_end_reduce: {
1911     // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
1912     // kmp_critical_name *lck);
1913     llvm::Type *TypeParams[] = {
1914         getIdentTyPointerTy(), CGM.Int32Ty,
1915         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1916     auto *FnTy =
1917         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1918     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
1919     break;
1920   }
1921   case OMPRTL__kmpc_end_reduce_nowait: {
1922     // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
1923     // kmp_critical_name *lck);
1924     llvm::Type *TypeParams[] = {
1925         getIdentTyPointerTy(), CGM.Int32Ty,
1926         llvm::PointerType::getUnqual(KmpCriticalNameTy)};
1927     auto *FnTy =
1928         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1929     RTLFn =
1930         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
1931     break;
1932   }
1933   case OMPRTL__kmpc_omp_task_begin_if0: {
1934     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1935     // *new_task);
1936     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1937                                 CGM.VoidPtrTy};
1938     auto *FnTy =
1939         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1940     RTLFn =
1941         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
1942     break;
1943   }
1944   case OMPRTL__kmpc_omp_task_complete_if0: {
1945     // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
1946     // *new_task);
1947     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
1948                                 CGM.VoidPtrTy};
1949     auto *FnTy =
1950         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1951     RTLFn = CGM.CreateRuntimeFunction(FnTy,
1952                                       /*Name=*/"__kmpc_omp_task_complete_if0");
1953     break;
1954   }
1955   case OMPRTL__kmpc_ordered: {
1956     // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
1957     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1958     auto *FnTy =
1959         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1960     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
1961     break;
1962   }
1963   case OMPRTL__kmpc_end_ordered: {
1964     // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
1965     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1966     auto *FnTy =
1967         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1968     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
1969     break;
1970   }
1971   case OMPRTL__kmpc_omp_taskwait: {
1972     // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
1973     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1974     auto *FnTy =
1975         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1976     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
1977     break;
1978   }
1979   case OMPRTL__kmpc_taskgroup: {
1980     // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
1981     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1982     auto *FnTy =
1983         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1984     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
1985     break;
1986   }
1987   case OMPRTL__kmpc_end_taskgroup: {
1988     // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
1989     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1990     auto *FnTy =
1991         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1992     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
1993     break;
1994   }
1995   case OMPRTL__kmpc_push_proc_bind: {
1996     // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
1997     // int proc_bind)
1998     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
1999     auto *FnTy =
2000         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2001     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
2002     break;
2003   }
2004   case OMPRTL__kmpc_omp_task_with_deps: {
2005     // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
2006     // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
2007     // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
2008     llvm::Type *TypeParams[] = {
2009         getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
2010         CGM.VoidPtrTy,         CGM.Int32Ty, CGM.VoidPtrTy};
2011     auto *FnTy =
2012         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
2013     RTLFn =
2014         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
2015     break;
2016   }
2017   case OMPRTL__kmpc_omp_wait_deps: {
2018     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
2019     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
2020     // kmp_depend_info_t *noalias_dep_list);
2021     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2022                                 CGM.Int32Ty,           CGM.VoidPtrTy,
2023                                 CGM.Int32Ty,           CGM.VoidPtrTy};
2024     auto *FnTy =
2025         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2026     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
2027     break;
2028   }
2029   case OMPRTL__kmpc_cancellationpoint: {
2030     // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
2031     // global_tid, kmp_int32 cncl_kind)
2032     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2033     auto *FnTy =
2034         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2035     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
2036     break;
2037   }
2038   case OMPRTL__kmpc_cancel: {
2039     // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
2040     // kmp_int32 cncl_kind)
2041     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
2042     auto *FnTy =
2043         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2044     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
2045     break;
2046   }
2047   case OMPRTL__kmpc_push_num_teams: {
2048     // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid,
2049     // kmp_int32 num_teams, kmp_int32 num_threads)
2050     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
2051         CGM.Int32Ty};
2052     auto *FnTy =
2053         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2054     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams");
2055     break;
2056   }
2057   case OMPRTL__kmpc_fork_teams: {
2058     // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro
2059     // microtask, ...);
2060     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2061                                 getKmpc_MicroPointerTy()};
2062     auto *FnTy =
2063         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
2064     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams");
2065     break;
2066   }
2067   case OMPRTL__kmpc_taskloop: {
2068     // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
2069     // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
2070     // sched, kmp_uint64 grainsize, void *task_dup);
2071     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2072                                 CGM.IntTy,
2073                                 CGM.VoidPtrTy,
2074                                 CGM.IntTy,
2075                                 CGM.Int64Ty->getPointerTo(),
2076                                 CGM.Int64Ty->getPointerTo(),
2077                                 CGM.Int64Ty,
2078                                 CGM.IntTy,
2079                                 CGM.IntTy,
2080                                 CGM.Int64Ty,
2081                                 CGM.VoidPtrTy};
2082     auto *FnTy =
2083         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2084     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop");
2085     break;
2086   }
2087   case OMPRTL__kmpc_doacross_init: {
2088     // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32
2089     // num_dims, struct kmp_dim *dims);
2090     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
2091                                 CGM.Int32Ty,
2092                                 CGM.Int32Ty,
2093                                 CGM.VoidPtrTy};
2094     auto *FnTy =
2095         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2096     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init");
2097     break;
2098   }
2099   case OMPRTL__kmpc_doacross_fini: {
2100     // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
2101     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
2102     auto *FnTy =
2103         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2104     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini");
2105     break;
2106   }
2107   case OMPRTL__kmpc_doacross_post: {
2108     // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64
2109     // *vec);
2110     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2111                                 CGM.Int64Ty->getPointerTo()};
2112     auto *FnTy =
2113         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2114     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post");
2115     break;
2116   }
2117   case OMPRTL__kmpc_doacross_wait: {
2118     // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64
2119     // *vec);
2120     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
2121                                 CGM.Int64Ty->getPointerTo()};
2122     auto *FnTy =
2123         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2124     RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait");
2125     break;
2126   }
2127   case OMPRTL__kmpc_task_reduction_init: {
2128     // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void
2129     // *data);
2130     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy};
2131     auto *FnTy =
2132         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2133     RTLFn =
2134         CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init");
2135     break;
2136   }
2137   case OMPRTL__kmpc_task_reduction_get_th_data: {
2138     // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
2139     // *d);
2140     llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy};
2141     auto *FnTy =
2142         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
2143     RTLFn = CGM.CreateRuntimeFunction(
2144         FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
2145     break;
2146   }
2147   case OMPRTL__tgt_target: {
2148     // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
2149     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2150     // *arg_types);
2151     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2152                                 CGM.VoidPtrTy,
2153                                 CGM.Int32Ty,
2154                                 CGM.VoidPtrPtrTy,
2155                                 CGM.VoidPtrPtrTy,
2156                                 CGM.SizeTy->getPointerTo(),
2157                                 CGM.Int64Ty->getPointerTo()};
2158     auto *FnTy =
2159         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2160     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
2161     break;
2162   }
2163   case OMPRTL__tgt_target_nowait: {
2164     // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr,
2165     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2166     // int64_t *arg_types);
2167     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2168                                 CGM.VoidPtrTy,
2169                                 CGM.Int32Ty,
2170                                 CGM.VoidPtrPtrTy,
2171                                 CGM.VoidPtrPtrTy,
2172                                 CGM.SizeTy->getPointerTo(),
2173                                 CGM.Int64Ty->getPointerTo()};
2174     auto *FnTy =
2175         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2176     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait");
2177     break;
2178   }
2179   case OMPRTL__tgt_target_teams: {
2180     // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr,
2181     // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes,
2182     // int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2183     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2184                                 CGM.VoidPtrTy,
2185                                 CGM.Int32Ty,
2186                                 CGM.VoidPtrPtrTy,
2187                                 CGM.VoidPtrPtrTy,
2188                                 CGM.SizeTy->getPointerTo(),
2189                                 CGM.Int64Ty->getPointerTo(),
2190                                 CGM.Int32Ty,
2191                                 CGM.Int32Ty};
2192     auto *FnTy =
2193         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2194     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams");
2195     break;
2196   }
2197   case OMPRTL__tgt_target_teams_nowait: {
2198     // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void
2199     // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t
2200     // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit);
2201     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2202                                 CGM.VoidPtrTy,
2203                                 CGM.Int32Ty,
2204                                 CGM.VoidPtrPtrTy,
2205                                 CGM.VoidPtrPtrTy,
2206                                 CGM.SizeTy->getPointerTo(),
2207                                 CGM.Int64Ty->getPointerTo(),
2208                                 CGM.Int32Ty,
2209                                 CGM.Int32Ty};
2210     auto *FnTy =
2211         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2212     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait");
2213     break;
2214   }
2215   case OMPRTL__tgt_register_lib: {
2216     // Build void __tgt_register_lib(__tgt_bin_desc *desc);
2217     QualType ParamTy =
2218         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2219     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2220     auto *FnTy =
2221         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2222     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
2223     break;
2224   }
2225   case OMPRTL__tgt_unregister_lib: {
2226     // Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
2227     QualType ParamTy =
2228         CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
2229     llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
2230     auto *FnTy =
2231         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2232     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
2233     break;
2234   }
2235   case OMPRTL__tgt_target_data_begin: {
2236     // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num,
2237     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2238     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2239                                 CGM.Int32Ty,
2240                                 CGM.VoidPtrPtrTy,
2241                                 CGM.VoidPtrPtrTy,
2242                                 CGM.SizeTy->getPointerTo(),
2243                                 CGM.Int64Ty->getPointerTo()};
2244     auto *FnTy =
2245         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2246     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin");
2247     break;
2248   }
2249   case OMPRTL__tgt_target_data_begin_nowait: {
2250     // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t
2251     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2252     // *arg_types);
2253     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2254                                 CGM.Int32Ty,
2255                                 CGM.VoidPtrPtrTy,
2256                                 CGM.VoidPtrPtrTy,
2257                                 CGM.SizeTy->getPointerTo(),
2258                                 CGM.Int64Ty->getPointerTo()};
2259     auto *FnTy =
2260         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2261     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait");
2262     break;
2263   }
2264   case OMPRTL__tgt_target_data_end: {
2265     // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num,
2266     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2267     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2268                                 CGM.Int32Ty,
2269                                 CGM.VoidPtrPtrTy,
2270                                 CGM.VoidPtrPtrTy,
2271                                 CGM.SizeTy->getPointerTo(),
2272                                 CGM.Int64Ty->getPointerTo()};
2273     auto *FnTy =
2274         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2275     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end");
2276     break;
2277   }
2278   case OMPRTL__tgt_target_data_end_nowait: {
2279     // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t
2280     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2281     // *arg_types);
2282     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2283                                 CGM.Int32Ty,
2284                                 CGM.VoidPtrPtrTy,
2285                                 CGM.VoidPtrPtrTy,
2286                                 CGM.SizeTy->getPointerTo(),
2287                                 CGM.Int64Ty->getPointerTo()};
2288     auto *FnTy =
2289         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2290     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait");
2291     break;
2292   }
2293   case OMPRTL__tgt_target_data_update: {
2294     // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num,
2295     // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types);
2296     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2297                                 CGM.Int32Ty,
2298                                 CGM.VoidPtrPtrTy,
2299                                 CGM.VoidPtrPtrTy,
2300                                 CGM.SizeTy->getPointerTo(),
2301                                 CGM.Int64Ty->getPointerTo()};
2302     auto *FnTy =
2303         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2304     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update");
2305     break;
2306   }
2307   case OMPRTL__tgt_target_data_update_nowait: {
2308     // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t
2309     // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
2310     // *arg_types);
2311     llvm::Type *TypeParams[] = {CGM.Int64Ty,
2312                                 CGM.Int32Ty,
2313                                 CGM.VoidPtrPtrTy,
2314                                 CGM.VoidPtrPtrTy,
2315                                 CGM.SizeTy->getPointerTo(),
2316                                 CGM.Int64Ty->getPointerTo()};
2317     auto *FnTy =
2318         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2319     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait");
2320     break;
2321   }
2322   }
2323   assert(RTLFn && "Unable to find OpenMP runtime function");
2324   return RTLFn;
2325 }
2326 
2327 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
2328                                                              bool IVSigned) {
2329   assert((IVSize == 32 || IVSize == 64) &&
2330          "IV size is not compatible with the omp runtime");
2331   StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
2332                                             : "__kmpc_for_static_init_4u")
2333                                 : (IVSigned ? "__kmpc_for_static_init_8"
2334                                             : "__kmpc_for_static_init_8u");
2335   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2336   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2337   llvm::Type *TypeParams[] = {
2338     getIdentTyPointerTy(),                     // loc
2339     CGM.Int32Ty,                               // tid
2340     CGM.Int32Ty,                               // schedtype
2341     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2342     PtrTy,                                     // p_lower
2343     PtrTy,                                     // p_upper
2344     PtrTy,                                     // p_stride
2345     ITy,                                       // incr
2346     ITy                                        // chunk
2347   };
2348   auto *FnTy =
2349       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2350   return CGM.CreateRuntimeFunction(FnTy, Name);
2351 }
2352 
2353 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
2354                                                             bool IVSigned) {
2355   assert((IVSize == 32 || IVSize == 64) &&
2356          "IV size is not compatible with the omp runtime");
2357   StringRef Name =
2358       IVSize == 32
2359           ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
2360           : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
2361   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2362   llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
2363                                CGM.Int32Ty,           // tid
2364                                CGM.Int32Ty,           // schedtype
2365                                ITy,                   // lower
2366                                ITy,                   // upper
2367                                ITy,                   // stride
2368                                ITy                    // chunk
2369   };
2370   auto *FnTy =
2371       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
2372   return CGM.CreateRuntimeFunction(FnTy, Name);
2373 }
2374 
2375 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
2376                                                             bool IVSigned) {
2377   assert((IVSize == 32 || IVSize == 64) &&
2378          "IV size is not compatible with the omp runtime");
2379   StringRef Name =
2380       IVSize == 32
2381           ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
2382           : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
2383   llvm::Type *TypeParams[] = {
2384       getIdentTyPointerTy(), // loc
2385       CGM.Int32Ty,           // tid
2386   };
2387   auto *FnTy =
2388       llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
2389   return CGM.CreateRuntimeFunction(FnTy, Name);
2390 }
2391 
2392 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
2393                                                             bool IVSigned) {
2394   assert((IVSize == 32 || IVSize == 64) &&
2395          "IV size is not compatible with the omp runtime");
2396   StringRef Name =
2397       IVSize == 32
2398           ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
2399           : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
2400   llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
2401   auto *PtrTy = llvm::PointerType::getUnqual(ITy);
2402   llvm::Type *TypeParams[] = {
2403     getIdentTyPointerTy(),                     // loc
2404     CGM.Int32Ty,                               // tid
2405     llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
2406     PtrTy,                                     // p_lower
2407     PtrTy,                                     // p_upper
2408     PtrTy                                      // p_stride
2409   };
2410   auto *FnTy =
2411       llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
2412   return CGM.CreateRuntimeFunction(FnTy, Name);
2413 }
2414 
2415 Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
2416   if (CGM.getLangOpts().OpenMPSimd)
2417     return Address::invalid();
2418   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2419       isDeclareTargetDeclaration(VD);
2420   if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
2421     SmallString<64> PtrName;
2422     {
2423       llvm::raw_svector_ostream OS(PtrName);
2424       OS << CGM.getMangledName(GlobalDecl(VD)) << "_decl_tgt_link_ptr";
2425     }
2426     llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
2427     if (!Ptr) {
2428       QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
2429       Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
2430                                         PtrName);
2431       if (!CGM.getLangOpts().OpenMPIsDevice) {
2432         auto *GV = cast<llvm::GlobalVariable>(Ptr);
2433         GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
2434         GV->setInitializer(CGM.GetAddrOfGlobal(VD));
2435       }
2436       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ptr));
2437       registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
2438     }
2439     return Address(Ptr, CGM.getContext().getDeclAlign(VD));
2440   }
2441   return Address::invalid();
2442 }
2443 
2444 llvm::Constant *
2445 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
2446   assert(!CGM.getLangOpts().OpenMPUseTLS ||
2447          !CGM.getContext().getTargetInfo().isTLSSupported());
2448   // Lookup the entry, lazily creating it if necessary.
2449   std::string Suffix = getName({"cache", ""});
2450   return getOrCreateInternalVariable(
2451       CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
2452 }
2453 
2454 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
2455                                                 const VarDecl *VD,
2456                                                 Address VDAddr,
2457                                                 SourceLocation Loc) {
2458   if (CGM.getLangOpts().OpenMPUseTLS &&
2459       CGM.getContext().getTargetInfo().isTLSSupported())
2460     return VDAddr;
2461 
2462   llvm::Type *VarTy = VDAddr.getElementType();
2463   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2464                          CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
2465                                                        CGM.Int8PtrTy),
2466                          CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
2467                          getOrCreateThreadPrivateCache(VD)};
2468   return Address(CGF.EmitRuntimeCall(
2469       createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2470                  VDAddr.getAlignment());
2471 }
2472 
2473 void CGOpenMPRuntime::emitThreadPrivateVarInit(
2474     CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor,
2475     llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
2476   // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
2477   // library.
2478   llvm::Value *OMPLoc = emitUpdateLocation(CGF, Loc);
2479   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
2480                       OMPLoc);
2481   // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
2482   // to register constructor/destructor for variable.
2483   llvm::Value *Args[] = {
2484       OMPLoc, CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.VoidPtrTy),
2485       Ctor, CopyCtor, Dtor};
2486   CGF.EmitRuntimeCall(
2487       createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
2488 }
2489 
2490 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
2491     const VarDecl *VD, Address VDAddr, SourceLocation Loc,
2492     bool PerformInit, CodeGenFunction *CGF) {
2493   if (CGM.getLangOpts().OpenMPUseTLS &&
2494       CGM.getContext().getTargetInfo().isTLSSupported())
2495     return nullptr;
2496 
2497   VD = VD->getDefinition(CGM.getContext());
2498   if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
2499     ThreadPrivateWithDefinition.insert(VD);
2500     QualType ASTTy = VD->getType();
2501 
2502     llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
2503     const Expr *Init = VD->getAnyInitializer();
2504     if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2505       // Generate function that re-emits the declaration's initializer into the
2506       // threadprivate copy of the variable VD
2507       CodeGenFunction CtorCGF(CGM);
2508       FunctionArgList Args;
2509       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2510                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2511                             ImplicitParamDecl::Other);
2512       Args.push_back(&Dst);
2513 
2514       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2515           CGM.getContext().VoidPtrTy, Args);
2516       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2517       std::string Name = getName({"__kmpc_global_ctor_", ""});
2518       llvm::Function *Fn =
2519           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2520       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
2521                             Args, Loc, Loc);
2522       llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
2523           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2524           CGM.getContext().VoidPtrTy, Dst.getLocation());
2525       Address Arg = Address(ArgVal, VDAddr.getAlignment());
2526       Arg = CtorCGF.Builder.CreateElementBitCast(
2527           Arg, CtorCGF.ConvertTypeForMem(ASTTy));
2528       CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
2529                                /*IsInitializer=*/true);
2530       ArgVal = CtorCGF.EmitLoadOfScalar(
2531           CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
2532           CGM.getContext().VoidPtrTy, Dst.getLocation());
2533       CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
2534       CtorCGF.FinishFunction();
2535       Ctor = Fn;
2536     }
2537     if (VD->getType().isDestructedType() != QualType::DK_none) {
2538       // Generate function that emits destructor call for the threadprivate copy
2539       // of the variable VD
2540       CodeGenFunction DtorCGF(CGM);
2541       FunctionArgList Args;
2542       ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
2543                             /*Id=*/nullptr, CGM.getContext().VoidPtrTy,
2544                             ImplicitParamDecl::Other);
2545       Args.push_back(&Dst);
2546 
2547       const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2548           CGM.getContext().VoidTy, Args);
2549       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2550       std::string Name = getName({"__kmpc_global_dtor_", ""});
2551       llvm::Function *Fn =
2552           CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, Loc);
2553       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2554       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
2555                             Loc, Loc);
2556       // Create a scope with an artificial location for the body of this function.
2557       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2558       llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
2559           DtorCGF.GetAddrOfLocalVar(&Dst),
2560           /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
2561       DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
2562                           DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2563                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2564       DtorCGF.FinishFunction();
2565       Dtor = Fn;
2566     }
2567     // Do not emit init function if it is not required.
2568     if (!Ctor && !Dtor)
2569       return nullptr;
2570 
2571     llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
2572     auto *CopyCtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
2573                                                /*isVarArg=*/false)
2574                            ->getPointerTo();
2575     // Copying constructor for the threadprivate variable.
2576     // Must be NULL - reserved by runtime, but currently it requires that this
2577     // parameter is always NULL. Otherwise it fires assertion.
2578     CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
2579     if (Ctor == nullptr) {
2580       auto *CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
2581                                              /*isVarArg=*/false)
2582                          ->getPointerTo();
2583       Ctor = llvm::Constant::getNullValue(CtorTy);
2584     }
2585     if (Dtor == nullptr) {
2586       auto *DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
2587                                              /*isVarArg=*/false)
2588                          ->getPointerTo();
2589       Dtor = llvm::Constant::getNullValue(DtorTy);
2590     }
2591     if (!CGF) {
2592       auto *InitFunctionTy =
2593           llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
2594       std::string Name = getName({"__omp_threadprivate_init_", ""});
2595       llvm::Function *InitFunction = CGM.CreateGlobalInitOrDestructFunction(
2596           InitFunctionTy, Name, CGM.getTypes().arrangeNullaryFunction());
2597       CodeGenFunction InitCGF(CGM);
2598       FunctionArgList ArgList;
2599       InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
2600                             CGM.getTypes().arrangeNullaryFunction(), ArgList,
2601                             Loc, Loc);
2602       emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2603       InitCGF.FinishFunction();
2604       return InitFunction;
2605     }
2606     emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
2607   }
2608   return nullptr;
2609 }
2610 
2611 /// Obtain information that uniquely identifies a target entry. This
2612 /// consists of the file and device IDs as well as line number associated with
2613 /// the relevant entry source location.
2614 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
2615                                      unsigned &DeviceID, unsigned &FileID,
2616                                      unsigned &LineNum) {
2617   SourceManager &SM = C.getSourceManager();
2618 
2619   // The loc should be always valid and have a file ID (the user cannot use
2620   // #pragma directives in macros)
2621 
2622   assert(Loc.isValid() && "Source location is expected to be always valid.");
2623 
2624   PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2625   assert(PLoc.isValid() && "Source location is expected to be always valid.");
2626 
2627   llvm::sys::fs::UniqueID ID;
2628   if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
2629     SM.getDiagnostics().Report(diag::err_cannot_open_file)
2630         << PLoc.getFilename() << EC.message();
2631 
2632   DeviceID = ID.getDevice();
2633   FileID = ID.getFile();
2634   LineNum = PLoc.getLine();
2635 }
2636 
2637 bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
2638                                                      llvm::GlobalVariable *Addr,
2639                                                      bool PerformInit) {
2640   Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2641       isDeclareTargetDeclaration(VD);
2642   if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
2643     return false;
2644   VD = VD->getDefinition(CGM.getContext());
2645   if (VD && !DeclareTargetWithDefinition.insert(VD).second)
2646     return CGM.getLangOpts().OpenMPIsDevice;
2647 
2648   QualType ASTTy = VD->getType();
2649 
2650   SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
2651   // Produce the unique prefix to identify the new target regions. We use
2652   // the source location of the variable declaration which we know to not
2653   // conflict with any target region.
2654   unsigned DeviceID;
2655   unsigned FileID;
2656   unsigned Line;
2657   getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
2658   SmallString<128> Buffer, Out;
2659   {
2660     llvm::raw_svector_ostream OS(Buffer);
2661     OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
2662        << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
2663   }
2664 
2665   const Expr *Init = VD->getAnyInitializer();
2666   if (CGM.getLangOpts().CPlusPlus && PerformInit) {
2667     llvm::Constant *Ctor;
2668     llvm::Constant *ID;
2669     if (CGM.getLangOpts().OpenMPIsDevice) {
2670       // Generate function that re-emits the declaration's initializer into
2671       // the threadprivate copy of the variable VD
2672       CodeGenFunction CtorCGF(CGM);
2673 
2674       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2675       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2676       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2677           FTy, Twine(Buffer, "_ctor"), FI, Loc);
2678       auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
2679       CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2680                             FunctionArgList(), Loc, Loc);
2681       auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
2682       CtorCGF.EmitAnyExprToMem(Init,
2683                                Address(Addr, CGM.getContext().getDeclAlign(VD)),
2684                                Init->getType().getQualifiers(),
2685                                /*IsInitializer=*/true);
2686       CtorCGF.FinishFunction();
2687       Ctor = Fn;
2688       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2689       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
2690     } else {
2691       Ctor = new llvm::GlobalVariable(
2692           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2693           llvm::GlobalValue::PrivateLinkage,
2694           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
2695       ID = Ctor;
2696     }
2697 
2698     // Register the information for the entry associated with the constructor.
2699     Out.clear();
2700     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2701         DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
2702         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
2703   }
2704   if (VD->getType().isDestructedType() != QualType::DK_none) {
2705     llvm::Constant *Dtor;
2706     llvm::Constant *ID;
2707     if (CGM.getLangOpts().OpenMPIsDevice) {
2708       // Generate function that emits destructor call for the threadprivate
2709       // copy of the variable VD
2710       CodeGenFunction DtorCGF(CGM);
2711 
2712       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2713       llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
2714       llvm::Function *Fn = CGM.CreateGlobalInitOrDestructFunction(
2715           FTy, Twine(Buffer, "_dtor"), FI, Loc);
2716       auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
2717       DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
2718                             FunctionArgList(), Loc, Loc);
2719       // Create a scope with an artificial location for the body of this
2720       // function.
2721       auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
2722       DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
2723                           ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
2724                           DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
2725       DtorCGF.FinishFunction();
2726       Dtor = Fn;
2727       ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
2728       CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
2729     } else {
2730       Dtor = new llvm::GlobalVariable(
2731           CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
2732           llvm::GlobalValue::PrivateLinkage,
2733           llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
2734       ID = Dtor;
2735     }
2736     // Register the information for the entry associated with the destructor.
2737     Out.clear();
2738     OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
2739         DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
2740         ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
2741   }
2742   return CGM.getLangOpts().OpenMPIsDevice;
2743 }
2744 
2745 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
2746                                                           QualType VarType,
2747                                                           StringRef Name) {
2748   std::string Suffix = getName({"artificial", ""});
2749   std::string CacheSuffix = getName({"cache", ""});
2750   llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
2751   llvm::Value *GAddr =
2752       getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
2753   llvm::Value *Args[] = {
2754       emitUpdateLocation(CGF, SourceLocation()),
2755       getThreadID(CGF, SourceLocation()),
2756       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
2757       CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
2758                                 /*IsSigned=*/false),
2759       getOrCreateInternalVariable(
2760           CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
2761   return Address(
2762       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2763           CGF.EmitRuntimeCall(
2764               createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args),
2765           VarLVType->getPointerTo(/*AddrSpace=*/0)),
2766       CGM.getPointerAlign());
2767 }
2768 
2769 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
2770                                       const RegionCodeGenTy &ThenGen,
2771                                       const RegionCodeGenTy &ElseGen) {
2772   CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
2773 
2774   // If the condition constant folds and can be elided, try to avoid emitting
2775   // the condition and the dead arm of the if/else.
2776   bool CondConstant;
2777   if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
2778     if (CondConstant)
2779       ThenGen(CGF);
2780     else
2781       ElseGen(CGF);
2782     return;
2783   }
2784 
2785   // Otherwise, the condition did not fold, or we couldn't elide it.  Just
2786   // emit the conditional branch.
2787   llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("omp_if.then");
2788   llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
2789   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("omp_if.end");
2790   CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
2791 
2792   // Emit the 'then' code.
2793   CGF.EmitBlock(ThenBlock);
2794   ThenGen(CGF);
2795   CGF.EmitBranch(ContBlock);
2796   // Emit the 'else' code if present.
2797   // There is no need to emit line number for unconditional branch.
2798   (void)ApplyDebugLocation::CreateEmpty(CGF);
2799   CGF.EmitBlock(ElseBlock);
2800   ElseGen(CGF);
2801   // There is no need to emit line number for unconditional branch.
2802   (void)ApplyDebugLocation::CreateEmpty(CGF);
2803   CGF.EmitBranch(ContBlock);
2804   // Emit the continuation block for code after the if.
2805   CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
2806 }
2807 
2808 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
2809                                        llvm::Value *OutlinedFn,
2810                                        ArrayRef<llvm::Value *> CapturedVars,
2811                                        const Expr *IfCond) {
2812   if (!CGF.HaveInsertPoint())
2813     return;
2814   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2815   auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF,
2816                                                      PrePostActionTy &) {
2817     // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn);
2818     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2819     llvm::Value *Args[] = {
2820         RTLoc,
2821         CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
2822         CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())};
2823     llvm::SmallVector<llvm::Value *, 16> RealArgs;
2824     RealArgs.append(std::begin(Args), std::end(Args));
2825     RealArgs.append(CapturedVars.begin(), CapturedVars.end());
2826 
2827     llvm::Value *RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call);
2828     CGF.EmitRuntimeCall(RTLFn, RealArgs);
2829   };
2830   auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF,
2831                                                           PrePostActionTy &) {
2832     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
2833     llvm::Value *ThreadID = RT.getThreadID(CGF, Loc);
2834     // Build calls:
2835     // __kmpc_serialized_parallel(&Loc, GTid);
2836     llvm::Value *Args[] = {RTLoc, ThreadID};
2837     CGF.EmitRuntimeCall(
2838         RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args);
2839 
2840     // OutlinedFn(&GTid, &zero, CapturedStruct);
2841     Address ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc);
2842     Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2843                                                         /*Name*/ ".zero.addr");
2844     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2845     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2846     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2847     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2848     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2849     RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2850 
2851     // __kmpc_end_serialized_parallel(&Loc, GTid);
2852     llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID};
2853     CGF.EmitRuntimeCall(
2854         RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel),
2855         EndArgs);
2856   };
2857   if (IfCond) {
2858     emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
2859   } else {
2860     RegionCodeGenTy ThenRCG(ThenGen);
2861     ThenRCG(CGF);
2862   }
2863 }
2864 
2865 // If we're inside an (outlined) parallel region, use the region info's
2866 // thread-ID variable (it is passed in a first argument of the outlined function
2867 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
2868 // regular serial code region, get thread ID by calling kmp_int32
2869 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
2870 // return the address of that temp.
2871 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
2872                                              SourceLocation Loc) {
2873   if (auto *OMPRegionInfo =
2874           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
2875     if (OMPRegionInfo->getThreadIDVariable())
2876       return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
2877 
2878   llvm::Value *ThreadID = getThreadID(CGF, Loc);
2879   QualType Int32Ty =
2880       CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
2881   Address ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
2882   CGF.EmitStoreOfScalar(ThreadID,
2883                         CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty));
2884 
2885   return ThreadIDTemp;
2886 }
2887 
2888 llvm::Constant *
2889 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
2890                                              const llvm::Twine &Name) {
2891   SmallString<256> Buffer;
2892   llvm::raw_svector_ostream Out(Buffer);
2893   Out << Name;
2894   StringRef RuntimeName = Out.str();
2895   auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
2896   if (Elem.second) {
2897     assert(Elem.second->getType()->getPointerElementType() == Ty &&
2898            "OMP internal variable has different type than requested");
2899     return &*Elem.second;
2900   }
2901 
2902   return Elem.second = new llvm::GlobalVariable(
2903              CGM.getModule(), Ty, /*IsConstant*/ false,
2904              llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
2905              Elem.first());
2906 }
2907 
2908 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
2909   std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
2910   std::string Name = getName({Prefix, "var"});
2911   return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
2912 }
2913 
2914 namespace {
2915 /// Common pre(post)-action for different OpenMP constructs.
2916 class CommonActionTy final : public PrePostActionTy {
2917   llvm::Value *EnterCallee;
2918   ArrayRef<llvm::Value *> EnterArgs;
2919   llvm::Value *ExitCallee;
2920   ArrayRef<llvm::Value *> ExitArgs;
2921   bool Conditional;
2922   llvm::BasicBlock *ContBlock = nullptr;
2923 
2924 public:
2925   CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
2926                  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
2927                  bool Conditional = false)
2928       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
2929         ExitArgs(ExitArgs), Conditional(Conditional) {}
2930   void Enter(CodeGenFunction &CGF) override {
2931     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
2932     if (Conditional) {
2933       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
2934       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
2935       ContBlock = CGF.createBasicBlock("omp_if.end");
2936       // Generate the branch (If-stmt)
2937       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
2938       CGF.EmitBlock(ThenBlock);
2939     }
2940   }
2941   void Done(CodeGenFunction &CGF) {
2942     // Emit the rest of blocks/branches
2943     CGF.EmitBranch(ContBlock);
2944     CGF.EmitBlock(ContBlock, true);
2945   }
2946   void Exit(CodeGenFunction &CGF) override {
2947     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
2948   }
2949 };
2950 } // anonymous namespace
2951 
2952 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
2953                                          StringRef CriticalName,
2954                                          const RegionCodeGenTy &CriticalOpGen,
2955                                          SourceLocation Loc, const Expr *Hint) {
2956   // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]);
2957   // CriticalOpGen();
2958   // __kmpc_end_critical(ident_t *, gtid, Lock);
2959   // Prepare arguments and build a call to __kmpc_critical
2960   if (!CGF.HaveInsertPoint())
2961     return;
2962   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
2963                          getCriticalRegionLock(CriticalName)};
2964   llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args),
2965                                                 std::end(Args));
2966   if (Hint) {
2967     EnterArgs.push_back(CGF.Builder.CreateIntCast(
2968         CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false));
2969   }
2970   CommonActionTy Action(
2971       createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint
2972                                  : OMPRTL__kmpc_critical),
2973       EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args);
2974   CriticalOpGen.setAction(Action);
2975   emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
2976 }
2977 
2978 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
2979                                        const RegionCodeGenTy &MasterOpGen,
2980                                        SourceLocation Loc) {
2981   if (!CGF.HaveInsertPoint())
2982     return;
2983   // if(__kmpc_master(ident_t *, gtid)) {
2984   //   MasterOpGen();
2985   //   __kmpc_end_master(ident_t *, gtid);
2986   // }
2987   // Prepare arguments and build a call to __kmpc_master
2988   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
2989   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args,
2990                         createRuntimeFunction(OMPRTL__kmpc_end_master), Args,
2991                         /*Conditional=*/true);
2992   MasterOpGen.setAction(Action);
2993   emitInlinedDirective(CGF, OMPD_master, MasterOpGen);
2994   Action.Done(CGF);
2995 }
2996 
2997 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
2998                                         SourceLocation Loc) {
2999   if (!CGF.HaveInsertPoint())
3000     return;
3001   // Build call __kmpc_omp_taskyield(loc, thread_id, 0);
3002   llvm::Value *Args[] = {
3003       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3004       llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
3005   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
3006   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
3007     Region->emitUntiedSwitch(CGF);
3008 }
3009 
3010 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
3011                                           const RegionCodeGenTy &TaskgroupOpGen,
3012                                           SourceLocation Loc) {
3013   if (!CGF.HaveInsertPoint())
3014     return;
3015   // __kmpc_taskgroup(ident_t *, gtid);
3016   // TaskgroupOpGen();
3017   // __kmpc_end_taskgroup(ident_t *, gtid);
3018   // Prepare arguments and build a call to __kmpc_taskgroup
3019   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3020   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args,
3021                         createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
3022                         Args);
3023   TaskgroupOpGen.setAction(Action);
3024   emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
3025 }
3026 
3027 /// Given an array of pointers to variables, project the address of a
3028 /// given variable.
3029 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
3030                                       unsigned Index, const VarDecl *Var) {
3031   // Pull out the pointer to the variable.
3032   Address PtrAddr =
3033       CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize());
3034   llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
3035 
3036   Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
3037   Addr = CGF.Builder.CreateElementBitCast(
3038       Addr, CGF.ConvertTypeForMem(Var->getType()));
3039   return Addr;
3040 }
3041 
3042 static llvm::Value *emitCopyprivateCopyFunction(
3043     CodeGenModule &CGM, llvm::Type *ArgsType,
3044     ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
3045     ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
3046     SourceLocation Loc) {
3047   ASTContext &C = CGM.getContext();
3048   // void copy_func(void *LHSArg, void *RHSArg);
3049   FunctionArgList Args;
3050   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3051                            ImplicitParamDecl::Other);
3052   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
3053                            ImplicitParamDecl::Other);
3054   Args.push_back(&LHSArg);
3055   Args.push_back(&RHSArg);
3056   const auto &CGFI =
3057       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3058   std::string Name =
3059       CGM.getOpenMPRuntime().getName({"omp", "copyprivate", "copy_func"});
3060   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3061                                     llvm::GlobalValue::InternalLinkage, Name,
3062                                     &CGM.getModule());
3063   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3064   Fn->setDoesNotRecurse();
3065   CodeGenFunction CGF(CGM);
3066   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3067   // Dest = (void*[n])(LHSArg);
3068   // Src = (void*[n])(RHSArg);
3069   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3070       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
3071       ArgsType), CGF.getPointerAlign());
3072   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3073       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
3074       ArgsType), CGF.getPointerAlign());
3075   // *(Type0*)Dst[0] = *(Type0*)Src[0];
3076   // *(Type1*)Dst[1] = *(Type1*)Src[1];
3077   // ...
3078   // *(Typen*)Dst[n] = *(Typen*)Src[n];
3079   for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
3080     const auto *DestVar =
3081         cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl());
3082     Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar);
3083 
3084     const auto *SrcVar =
3085         cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl());
3086     Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar);
3087 
3088     const auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
3089     QualType Type = VD->getType();
3090     CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
3091   }
3092   CGF.FinishFunction();
3093   return Fn;
3094 }
3095 
3096 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
3097                                        const RegionCodeGenTy &SingleOpGen,
3098                                        SourceLocation Loc,
3099                                        ArrayRef<const Expr *> CopyprivateVars,
3100                                        ArrayRef<const Expr *> SrcExprs,
3101                                        ArrayRef<const Expr *> DstExprs,
3102                                        ArrayRef<const Expr *> AssignmentOps) {
3103   if (!CGF.HaveInsertPoint())
3104     return;
3105   assert(CopyprivateVars.size() == SrcExprs.size() &&
3106          CopyprivateVars.size() == DstExprs.size() &&
3107          CopyprivateVars.size() == AssignmentOps.size());
3108   ASTContext &C = CGM.getContext();
3109   // int32 did_it = 0;
3110   // if(__kmpc_single(ident_t *, gtid)) {
3111   //   SingleOpGen();
3112   //   __kmpc_end_single(ident_t *, gtid);
3113   //   did_it = 1;
3114   // }
3115   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3116   // <copy_func>, did_it);
3117 
3118   Address DidIt = Address::invalid();
3119   if (!CopyprivateVars.empty()) {
3120     // int32 did_it = 0;
3121     QualType KmpInt32Ty =
3122         C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
3123     DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
3124     CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt);
3125   }
3126   // Prepare arguments and build a call to __kmpc_single
3127   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3128   CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args,
3129                         createRuntimeFunction(OMPRTL__kmpc_end_single), Args,
3130                         /*Conditional=*/true);
3131   SingleOpGen.setAction(Action);
3132   emitInlinedDirective(CGF, OMPD_single, SingleOpGen);
3133   if (DidIt.isValid()) {
3134     // did_it = 1;
3135     CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt);
3136   }
3137   Action.Done(CGF);
3138   // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
3139   // <copy_func>, did_it);
3140   if (DidIt.isValid()) {
3141     llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
3142     QualType CopyprivateArrayTy =
3143         C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
3144                                /*IndexTypeQuals=*/0);
3145     // Create a list of all private variables for copyprivate.
3146     Address CopyprivateList =
3147         CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
3148     for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
3149       Address Elem = CGF.Builder.CreateConstArrayGEP(
3150           CopyprivateList, I, CGF.getPointerSize());
3151       CGF.Builder.CreateStore(
3152           CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
3153               CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy),
3154           Elem);
3155     }
3156     // Build function that copies private values from single region to all other
3157     // threads in the corresponding parallel region.
3158     llvm::Value *CpyFn = emitCopyprivateCopyFunction(
3159         CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
3160         CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
3161     llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
3162     Address CL =
3163       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
3164                                                       CGF.VoidPtrTy);
3165     llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
3166     llvm::Value *Args[] = {
3167         emitUpdateLocation(CGF, Loc), // ident_t *<loc>
3168         getThreadID(CGF, Loc),        // i32 <gtid>
3169         BufSize,                      // size_t <buf_size>
3170         CL.getPointer(),              // void *<copyprivate list>
3171         CpyFn,                        // void (*) (void *, void *) <copy_func>
3172         DidItVal                      // i32 did_it
3173     };
3174     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
3175   }
3176 }
3177 
3178 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
3179                                         const RegionCodeGenTy &OrderedOpGen,
3180                                         SourceLocation Loc, bool IsThreads) {
3181   if (!CGF.HaveInsertPoint())
3182     return;
3183   // __kmpc_ordered(ident_t *, gtid);
3184   // OrderedOpGen();
3185   // __kmpc_end_ordered(ident_t *, gtid);
3186   // Prepare arguments and build a call to __kmpc_ordered
3187   if (IsThreads) {
3188     llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3189     CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args,
3190                           createRuntimeFunction(OMPRTL__kmpc_end_ordered),
3191                           Args);
3192     OrderedOpGen.setAction(Action);
3193     emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3194     return;
3195   }
3196   emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
3197 }
3198 
3199 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
3200                                       OpenMPDirectiveKind Kind, bool EmitChecks,
3201                                       bool ForceSimpleCall) {
3202   if (!CGF.HaveInsertPoint())
3203     return;
3204   // Build call __kmpc_cancel_barrier(loc, thread_id);
3205   // Build call __kmpc_barrier(loc, thread_id);
3206   unsigned Flags;
3207   if (Kind == OMPD_for)
3208     Flags = OMP_IDENT_BARRIER_IMPL_FOR;
3209   else if (Kind == OMPD_sections)
3210     Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
3211   else if (Kind == OMPD_single)
3212     Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
3213   else if (Kind == OMPD_barrier)
3214     Flags = OMP_IDENT_BARRIER_EXPL;
3215   else
3216     Flags = OMP_IDENT_BARRIER_IMPL;
3217   // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
3218   // thread_id);
3219   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
3220                          getThreadID(CGF, Loc)};
3221   if (auto *OMPRegionInfo =
3222           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
3223     if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
3224       llvm::Value *Result = CGF.EmitRuntimeCall(
3225           createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
3226       if (EmitChecks) {
3227         // if (__kmpc_cancel_barrier()) {
3228         //   exit from construct;
3229         // }
3230         llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
3231         llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
3232         llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
3233         CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
3234         CGF.EmitBlock(ExitBB);
3235         //   exit from construct;
3236         CodeGenFunction::JumpDest CancelDestination =
3237             CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
3238         CGF.EmitBranchThroughCleanup(CancelDestination);
3239         CGF.EmitBlock(ContBB, /*IsFinished=*/true);
3240       }
3241       return;
3242     }
3243   }
3244   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
3245 }
3246 
3247 /// Map the OpenMP loop schedule to the runtime enumeration.
3248 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
3249                                           bool Chunked, bool Ordered) {
3250   switch (ScheduleKind) {
3251   case OMPC_SCHEDULE_static:
3252     return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
3253                    : (Ordered ? OMP_ord_static : OMP_sch_static);
3254   case OMPC_SCHEDULE_dynamic:
3255     return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
3256   case OMPC_SCHEDULE_guided:
3257     return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
3258   case OMPC_SCHEDULE_runtime:
3259     return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
3260   case OMPC_SCHEDULE_auto:
3261     return Ordered ? OMP_ord_auto : OMP_sch_auto;
3262   case OMPC_SCHEDULE_unknown:
3263     assert(!Chunked && "chunk was specified but schedule kind not known");
3264     return Ordered ? OMP_ord_static : OMP_sch_static;
3265   }
3266   llvm_unreachable("Unexpected runtime schedule");
3267 }
3268 
3269 /// Map the OpenMP distribute schedule to the runtime enumeration.
3270 static OpenMPSchedType
3271 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) {
3272   // only static is allowed for dist_schedule
3273   return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
3274 }
3275 
3276 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
3277                                          bool Chunked) const {
3278   OpenMPSchedType Schedule =
3279       getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
3280   return Schedule == OMP_sch_static;
3281 }
3282 
3283 bool CGOpenMPRuntime::isStaticNonchunked(
3284     OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
3285   OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
3286   return Schedule == OMP_dist_sch_static;
3287 }
3288 
3289 
3290 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
3291   OpenMPSchedType Schedule =
3292       getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
3293   assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
3294   return Schedule != OMP_sch_static;
3295 }
3296 
3297 static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
3298                                   OpenMPScheduleClauseModifier M1,
3299                                   OpenMPScheduleClauseModifier M2) {
3300   int Modifier = 0;
3301   switch (M1) {
3302   case OMPC_SCHEDULE_MODIFIER_monotonic:
3303     Modifier = OMP_sch_modifier_monotonic;
3304     break;
3305   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3306     Modifier = OMP_sch_modifier_nonmonotonic;
3307     break;
3308   case OMPC_SCHEDULE_MODIFIER_simd:
3309     if (Schedule == OMP_sch_static_chunked)
3310       Schedule = OMP_sch_static_balanced_chunked;
3311     break;
3312   case OMPC_SCHEDULE_MODIFIER_last:
3313   case OMPC_SCHEDULE_MODIFIER_unknown:
3314     break;
3315   }
3316   switch (M2) {
3317   case OMPC_SCHEDULE_MODIFIER_monotonic:
3318     Modifier = OMP_sch_modifier_monotonic;
3319     break;
3320   case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
3321     Modifier = OMP_sch_modifier_nonmonotonic;
3322     break;
3323   case OMPC_SCHEDULE_MODIFIER_simd:
3324     if (Schedule == OMP_sch_static_chunked)
3325       Schedule = OMP_sch_static_balanced_chunked;
3326     break;
3327   case OMPC_SCHEDULE_MODIFIER_last:
3328   case OMPC_SCHEDULE_MODIFIER_unknown:
3329     break;
3330   }
3331   return Schedule | Modifier;
3332 }
3333 
3334 void CGOpenMPRuntime::emitForDispatchInit(
3335     CodeGenFunction &CGF, SourceLocation Loc,
3336     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
3337     bool Ordered, const DispatchRTInput &DispatchValues) {
3338   if (!CGF.HaveInsertPoint())
3339     return;
3340   OpenMPSchedType Schedule = getRuntimeSchedule(
3341       ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
3342   assert(Ordered ||
3343          (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
3344           Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
3345           Schedule != OMP_sch_static_balanced_chunked));
3346   // Call __kmpc_dispatch_init(
3347   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
3348   //          kmp_int[32|64] lower, kmp_int[32|64] upper,
3349   //          kmp_int[32|64] stride, kmp_int[32|64] chunk);
3350 
3351   // If the Chunk was not specified in the clause - use default value 1.
3352   llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
3353                                             : CGF.Builder.getIntN(IVSize, 1);
3354   llvm::Value *Args[] = {
3355       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3356       CGF.Builder.getInt32(addMonoNonMonoModifier(
3357           Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
3358       DispatchValues.LB,                                // Lower
3359       DispatchValues.UB,                                // Upper
3360       CGF.Builder.getIntN(IVSize, 1),                   // Stride
3361       Chunk                                             // Chunk
3362   };
3363   CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
3364 }
3365 
3366 static void emitForStaticInitCall(
3367     CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
3368     llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule,
3369     OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
3370     const CGOpenMPRuntime::StaticRTInput &Values) {
3371   if (!CGF.HaveInsertPoint())
3372     return;
3373 
3374   assert(!Values.Ordered);
3375   assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
3376          Schedule == OMP_sch_static_balanced_chunked ||
3377          Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
3378          Schedule == OMP_dist_sch_static ||
3379          Schedule == OMP_dist_sch_static_chunked);
3380 
3381   // Call __kmpc_for_static_init(
3382   //          ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
3383   //          kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
3384   //          kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
3385   //          kmp_int[32|64] incr, kmp_int[32|64] chunk);
3386   llvm::Value *Chunk = Values.Chunk;
3387   if (Chunk == nullptr) {
3388     assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
3389             Schedule == OMP_dist_sch_static) &&
3390            "expected static non-chunked schedule");
3391     // If the Chunk was not specified in the clause - use default value 1.
3392     Chunk = CGF.Builder.getIntN(Values.IVSize, 1);
3393   } else {
3394     assert((Schedule == OMP_sch_static_chunked ||
3395             Schedule == OMP_sch_static_balanced_chunked ||
3396             Schedule == OMP_ord_static_chunked ||
3397             Schedule == OMP_dist_sch_static_chunked) &&
3398            "expected static chunked schedule");
3399   }
3400   llvm::Value *Args[] = {
3401       UpdateLocation,
3402       ThreadId,
3403       CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1,
3404                                                   M2)), // Schedule type
3405       Values.IL.getPointer(),                           // &isLastIter
3406       Values.LB.getPointer(),                           // &LB
3407       Values.UB.getPointer(),                           // &UB
3408       Values.ST.getPointer(),                           // &Stride
3409       CGF.Builder.getIntN(Values.IVSize, 1),            // Incr
3410       Chunk                                             // Chunk
3411   };
3412   CGF.EmitRuntimeCall(ForStaticInitFunction, Args);
3413 }
3414 
3415 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
3416                                         SourceLocation Loc,
3417                                         OpenMPDirectiveKind DKind,
3418                                         const OpenMPScheduleTy &ScheduleKind,
3419                                         const StaticRTInput &Values) {
3420   OpenMPSchedType ScheduleNum = getRuntimeSchedule(
3421       ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
3422   assert(isOpenMPWorksharingDirective(DKind) &&
3423          "Expected loop-based or sections-based directive.");
3424   llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
3425                                              isOpenMPLoopDirective(DKind)
3426                                                  ? OMP_IDENT_WORK_LOOP
3427                                                  : OMP_IDENT_WORK_SECTIONS);
3428   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3429   llvm::Constant *StaticInitFunction =
3430       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3431   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3432                         ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
3433 }
3434 
3435 void CGOpenMPRuntime::emitDistributeStaticInit(
3436     CodeGenFunction &CGF, SourceLocation Loc,
3437     OpenMPDistScheduleClauseKind SchedKind,
3438     const CGOpenMPRuntime::StaticRTInput &Values) {
3439   OpenMPSchedType ScheduleNum =
3440       getRuntimeSchedule(SchedKind, Values.Chunk != nullptr);
3441   llvm::Value *UpdatedLocation =
3442       emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
3443   llvm::Value *ThreadId = getThreadID(CGF, Loc);
3444   llvm::Constant *StaticInitFunction =
3445       createForStaticInitFunction(Values.IVSize, Values.IVSigned);
3446   emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
3447                         ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
3448                         OMPC_SCHEDULE_MODIFIER_unknown, Values);
3449 }
3450 
3451 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
3452                                           SourceLocation Loc,
3453                                           OpenMPDirectiveKind DKind) {
3454   if (!CGF.HaveInsertPoint())
3455     return;
3456   // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
3457   llvm::Value *Args[] = {
3458       emitUpdateLocation(CGF, Loc,
3459                          isOpenMPDistributeDirective(DKind)
3460                              ? OMP_IDENT_WORK_DISTRIBUTE
3461                              : isOpenMPLoopDirective(DKind)
3462                                    ? OMP_IDENT_WORK_LOOP
3463                                    : OMP_IDENT_WORK_SECTIONS),
3464       getThreadID(CGF, Loc)};
3465   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
3466                       Args);
3467 }
3468 
3469 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
3470                                                  SourceLocation Loc,
3471                                                  unsigned IVSize,
3472                                                  bool IVSigned) {
3473   if (!CGF.HaveInsertPoint())
3474     return;
3475   // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
3476   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
3477   CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
3478 }
3479 
3480 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
3481                                           SourceLocation Loc, unsigned IVSize,
3482                                           bool IVSigned, Address IL,
3483                                           Address LB, Address UB,
3484                                           Address ST) {
3485   // Call __kmpc_dispatch_next(
3486   //          ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
3487   //          kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
3488   //          kmp_int[32|64] *p_stride);
3489   llvm::Value *Args[] = {
3490       emitUpdateLocation(CGF, Loc),
3491       getThreadID(CGF, Loc),
3492       IL.getPointer(), // &isLastIter
3493       LB.getPointer(), // &Lower
3494       UB.getPointer(), // &Upper
3495       ST.getPointer()  // &Stride
3496   };
3497   llvm::Value *Call =
3498       CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
3499   return CGF.EmitScalarConversion(
3500       Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
3501       CGF.getContext().BoolTy, Loc);
3502 }
3503 
3504 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
3505                                            llvm::Value *NumThreads,
3506                                            SourceLocation Loc) {
3507   if (!CGF.HaveInsertPoint())
3508     return;
3509   // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
3510   llvm::Value *Args[] = {
3511       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3512       CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
3513   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
3514                       Args);
3515 }
3516 
3517 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
3518                                          OpenMPProcBindClauseKind ProcBind,
3519                                          SourceLocation Loc) {
3520   if (!CGF.HaveInsertPoint())
3521     return;
3522   // Constants for proc bind value accepted by the runtime.
3523   enum ProcBindTy {
3524     ProcBindFalse = 0,
3525     ProcBindTrue,
3526     ProcBindMaster,
3527     ProcBindClose,
3528     ProcBindSpread,
3529     ProcBindIntel,
3530     ProcBindDefault
3531   } RuntimeProcBind;
3532   switch (ProcBind) {
3533   case OMPC_PROC_BIND_master:
3534     RuntimeProcBind = ProcBindMaster;
3535     break;
3536   case OMPC_PROC_BIND_close:
3537     RuntimeProcBind = ProcBindClose;
3538     break;
3539   case OMPC_PROC_BIND_spread:
3540     RuntimeProcBind = ProcBindSpread;
3541     break;
3542   case OMPC_PROC_BIND_unknown:
3543     llvm_unreachable("Unsupported proc_bind value.");
3544   }
3545   // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
3546   llvm::Value *Args[] = {
3547       emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
3548       llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
3549   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
3550 }
3551 
3552 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
3553                                 SourceLocation Loc) {
3554   if (!CGF.HaveInsertPoint())
3555     return;
3556   // Build call void __kmpc_flush(ident_t *loc)
3557   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
3558                       emitUpdateLocation(CGF, Loc));
3559 }
3560 
3561 namespace {
3562 /// Indexes of fields for type kmp_task_t.
3563 enum KmpTaskTFields {
3564   /// List of shared variables.
3565   KmpTaskTShareds,
3566   /// Task routine.
3567   KmpTaskTRoutine,
3568   /// Partition id for the untied tasks.
3569   KmpTaskTPartId,
3570   /// Function with call of destructors for private variables.
3571   Data1,
3572   /// Task priority.
3573   Data2,
3574   /// (Taskloops only) Lower bound.
3575   KmpTaskTLowerBound,
3576   /// (Taskloops only) Upper bound.
3577   KmpTaskTUpperBound,
3578   /// (Taskloops only) Stride.
3579   KmpTaskTStride,
3580   /// (Taskloops only) Is last iteration flag.
3581   KmpTaskTLastIter,
3582   /// (Taskloops only) Reduction data.
3583   KmpTaskTReductions,
3584 };
3585 } // anonymous namespace
3586 
3587 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
3588   return OffloadEntriesTargetRegion.empty() &&
3589          OffloadEntriesDeviceGlobalVar.empty();
3590 }
3591 
3592 /// Initialize target region entry.
3593 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3594     initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3595                                     StringRef ParentName, unsigned LineNum,
3596                                     unsigned Order) {
3597   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3598                                              "only required for the device "
3599                                              "code generation.");
3600   OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
3601       OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
3602                                    OMPTargetRegionEntryTargetRegion);
3603   ++OffloadingEntriesNum;
3604 }
3605 
3606 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3607     registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
3608                                   StringRef ParentName, unsigned LineNum,
3609                                   llvm::Constant *Addr, llvm::Constant *ID,
3610                                   OMPTargetRegionEntryKind Flags) {
3611   // If we are emitting code for a target, the entry is already initialized,
3612   // only has to be registered.
3613   if (CGM.getLangOpts().OpenMPIsDevice) {
3614     if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum)) {
3615       unsigned DiagID = CGM.getDiags().getCustomDiagID(
3616           DiagnosticsEngine::Error,
3617           "Unable to find target region on line '%0' in the device code.");
3618       CGM.getDiags().Report(DiagID) << LineNum;
3619       return;
3620     }
3621     auto &Entry =
3622         OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
3623     assert(Entry.isValid() && "Entry not initialized!");
3624     Entry.setAddress(Addr);
3625     Entry.setID(ID);
3626     Entry.setFlags(Flags);
3627   } else {
3628     OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
3629     OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
3630     ++OffloadingEntriesNum;
3631   }
3632 }
3633 
3634 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
3635     unsigned DeviceID, unsigned FileID, StringRef ParentName,
3636     unsigned LineNum) const {
3637   auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
3638   if (PerDevice == OffloadEntriesTargetRegion.end())
3639     return false;
3640   auto PerFile = PerDevice->second.find(FileID);
3641   if (PerFile == PerDevice->second.end())
3642     return false;
3643   auto PerParentName = PerFile->second.find(ParentName);
3644   if (PerParentName == PerFile->second.end())
3645     return false;
3646   auto PerLine = PerParentName->second.find(LineNum);
3647   if (PerLine == PerParentName->second.end())
3648     return false;
3649   // Fail if this entry is already registered.
3650   if (PerLine->second.getAddress() || PerLine->second.getID())
3651     return false;
3652   return true;
3653 }
3654 
3655 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
3656     const OffloadTargetRegionEntryInfoActTy &Action) {
3657   // Scan all target region entries and perform the provided action.
3658   for (const auto &D : OffloadEntriesTargetRegion)
3659     for (const auto &F : D.second)
3660       for (const auto &P : F.second)
3661         for (const auto &L : P.second)
3662           Action(D.first, F.first, P.first(), L.first, L.second);
3663 }
3664 
3665 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3666     initializeDeviceGlobalVarEntryInfo(StringRef Name,
3667                                        OMPTargetGlobalVarEntryKind Flags,
3668                                        unsigned Order) {
3669   assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
3670                                              "only required for the device "
3671                                              "code generation.");
3672   OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
3673   ++OffloadingEntriesNum;
3674 }
3675 
3676 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3677     registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
3678                                      CharUnits VarSize,
3679                                      OMPTargetGlobalVarEntryKind Flags,
3680                                      llvm::GlobalValue::LinkageTypes Linkage) {
3681   if (CGM.getLangOpts().OpenMPIsDevice) {
3682     auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
3683     assert(Entry.isValid() && Entry.getFlags() == Flags &&
3684            "Entry not initialized!");
3685     assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
3686            "Resetting with the new address.");
3687     if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName))
3688       return;
3689     Entry.setAddress(Addr);
3690     Entry.setVarSize(VarSize);
3691     Entry.setLinkage(Linkage);
3692   } else {
3693     if (hasDeviceGlobalVarEntryInfo(VarName))
3694       return;
3695     OffloadEntriesDeviceGlobalVar.try_emplace(
3696         VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
3697     ++OffloadingEntriesNum;
3698   }
3699 }
3700 
3701 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
3702     actOnDeviceGlobalVarEntriesInfo(
3703         const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
3704   // Scan all target region entries and perform the provided action.
3705   for (const auto &E : OffloadEntriesDeviceGlobalVar)
3706     Action(E.getKey(), E.getValue());
3707 }
3708 
3709 llvm::Function *
3710 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
3711   // If we don't have entries or if we are emitting code for the device, we
3712   // don't need to do anything.
3713   if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
3714     return nullptr;
3715 
3716   llvm::Module &M = CGM.getModule();
3717   ASTContext &C = CGM.getContext();
3718 
3719   // Get list of devices we care about
3720   const std::vector<llvm::Triple> &Devices = CGM.getLangOpts().OMPTargetTriples;
3721 
3722   // We should be creating an offloading descriptor only if there are devices
3723   // specified.
3724   assert(!Devices.empty() && "No OpenMP offloading devices??");
3725 
3726   // Create the external variables that will point to the begin and end of the
3727   // host entries section. These will be defined by the linker.
3728   llvm::Type *OffloadEntryTy =
3729       CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
3730   std::string EntriesBeginName = getName({"omp_offloading", "entries_begin"});
3731   auto *HostEntriesBegin = new llvm::GlobalVariable(
3732       M, OffloadEntryTy, /*isConstant=*/true,
3733       llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr,
3734       EntriesBeginName);
3735   std::string EntriesEndName = getName({"omp_offloading", "entries_end"});
3736   auto *HostEntriesEnd =
3737       new llvm::GlobalVariable(M, OffloadEntryTy, /*isConstant=*/true,
3738                                llvm::GlobalValue::ExternalLinkage,
3739                                /*Initializer=*/nullptr, EntriesEndName);
3740 
3741   // Create all device images
3742   auto *DeviceImageTy = cast<llvm::StructType>(
3743       CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
3744   ConstantInitBuilder DeviceImagesBuilder(CGM);
3745   ConstantArrayBuilder DeviceImagesEntries =
3746       DeviceImagesBuilder.beginArray(DeviceImageTy);
3747 
3748   for (const llvm::Triple &Device : Devices) {
3749     StringRef T = Device.getTriple();
3750     std::string BeginName = getName({"omp_offloading", "img_start", ""});
3751     auto *ImgBegin = new llvm::GlobalVariable(
3752         M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3753         /*Initializer=*/nullptr, Twine(BeginName).concat(T));
3754     std::string EndName = getName({"omp_offloading", "img_end", ""});
3755     auto *ImgEnd = new llvm::GlobalVariable(
3756         M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
3757         /*Initializer=*/nullptr, Twine(EndName).concat(T));
3758 
3759     llvm::Constant *Data[] = {ImgBegin, ImgEnd, HostEntriesBegin,
3760                               HostEntriesEnd};
3761     createConstantGlobalStructAndAddToParent(CGM, getTgtDeviceImageQTy(), Data,
3762                                              DeviceImagesEntries);
3763   }
3764 
3765   // Create device images global array.
3766   std::string ImagesName = getName({"omp_offloading", "device_images"});
3767   llvm::GlobalVariable *DeviceImages =
3768       DeviceImagesEntries.finishAndCreateGlobal(ImagesName,
3769                                                 CGM.getPointerAlign(),
3770                                                 /*isConstant=*/true);
3771   DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3772 
3773   // This is a Zero array to be used in the creation of the constant expressions
3774   llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
3775                              llvm::Constant::getNullValue(CGM.Int32Ty)};
3776 
3777   // Create the target region descriptor.
3778   llvm::Constant *Data[] = {
3779       llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
3780       llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
3781                                            DeviceImages, Index),
3782       HostEntriesBegin, HostEntriesEnd};
3783   std::string Descriptor = getName({"omp_offloading", "descriptor"});
3784   llvm::GlobalVariable *Desc = createConstantGlobalStruct(
3785       CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
3786 
3787   // Emit code to register or unregister the descriptor at execution
3788   // startup or closing, respectively.
3789 
3790   llvm::Function *UnRegFn;
3791   {
3792     FunctionArgList Args;
3793     ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other);
3794     Args.push_back(&DummyPtr);
3795 
3796     CodeGenFunction CGF(CGM);
3797     // Disable debug info for global (de-)initializer because they are not part
3798     // of some particular construct.
3799     CGF.disableDebugInfo();
3800     const auto &FI =
3801         CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3802     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3803     std::string UnregName = getName({"omp_offloading", "descriptor_unreg"});
3804     UnRegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, UnregName, FI);
3805     CGF.StartFunction(GlobalDecl(), C.VoidTy, UnRegFn, FI, Args);
3806     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
3807                         Desc);
3808     CGF.FinishFunction();
3809   }
3810   llvm::Function *RegFn;
3811   {
3812     CodeGenFunction CGF(CGM);
3813     // Disable debug info for global (de-)initializer because they are not part
3814     // of some particular construct.
3815     CGF.disableDebugInfo();
3816     const auto &FI = CGM.getTypes().arrangeNullaryFunction();
3817     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
3818     std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
3819     RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
3820     CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
3821     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
3822     // Create a variable to drive the registration and unregistration of the
3823     // descriptor, so we can reuse the logic that emits Ctors and Dtors.
3824     ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(),
3825                                   SourceLocation(), nullptr, C.CharTy,
3826                                   ImplicitParamDecl::Other);
3827     CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
3828     CGF.FinishFunction();
3829   }
3830   if (CGM.supportsCOMDAT()) {
3831     // It is sufficient to call registration function only once, so create a
3832     // COMDAT group for registration/unregistration functions and associated
3833     // data. That would reduce startup time and code size. Registration
3834     // function serves as a COMDAT group key.
3835     llvm::Comdat *ComdatKey = M.getOrInsertComdat(RegFn->getName());
3836     RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage);
3837     RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility);
3838     RegFn->setComdat(ComdatKey);
3839     UnRegFn->setComdat(ComdatKey);
3840     DeviceImages->setComdat(ComdatKey);
3841     Desc->setComdat(ComdatKey);
3842   }
3843   return RegFn;
3844 }
3845 
3846 void CGOpenMPRuntime::createOffloadEntry(
3847     llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
3848     llvm::GlobalValue::LinkageTypes Linkage) {
3849   StringRef Name = Addr->getName();
3850   llvm::Module &M = CGM.getModule();
3851   llvm::LLVMContext &C = M.getContext();
3852 
3853   // Create constant string with the name.
3854   llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
3855 
3856   std::string StringName = getName({"omp_offloading", "entry_name"});
3857   auto *Str = new llvm::GlobalVariable(
3858       M, StrPtrInit->getType(), /*isConstant=*/true,
3859       llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
3860   Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3861 
3862   llvm::Constant *Data[] = {llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy),
3863                             llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy),
3864                             llvm::ConstantInt::get(CGM.SizeTy, Size),
3865                             llvm::ConstantInt::get(CGM.Int32Ty, Flags),
3866                             llvm::ConstantInt::get(CGM.Int32Ty, 0)};
3867   std::string EntryName = getName({"omp_offloading", "entry", ""});
3868   llvm::GlobalVariable *Entry = createConstantGlobalStruct(
3869       CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
3870       llvm::GlobalValue::WeakAnyLinkage);
3871 
3872   // The entry has to be created in the section the linker expects it to be.
3873   std::string Section = getName({"omp_offloading", "entries"});
3874   Entry->setSection(Section);
3875 }
3876 
3877 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
3878   // Emit the offloading entries and metadata so that the device codegen side
3879   // can easily figure out what to emit. The produced metadata looks like
3880   // this:
3881   //
3882   // !omp_offload.info = !{!1, ...}
3883   //
3884   // Right now we only generate metadata for function that contain target
3885   // regions.
3886 
3887   // If we do not have entries, we don't need to do anything.
3888   if (OffloadEntriesInfoManager.empty())
3889     return;
3890 
3891   llvm::Module &M = CGM.getModule();
3892   llvm::LLVMContext &C = M.getContext();
3893   SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
3894       OrderedEntries(OffloadEntriesInfoManager.size());
3895 
3896   // Auxiliary methods to create metadata values and strings.
3897   auto &&GetMDInt = [this](unsigned V) {
3898     return llvm::ConstantAsMetadata::get(
3899         llvm::ConstantInt::get(CGM.Int32Ty, V));
3900   };
3901 
3902   auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
3903 
3904   // Create the offloading info metadata node.
3905   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
3906 
3907   // Create function that emits metadata for each target region entry;
3908   auto &&TargetRegionMetadataEmitter =
3909       [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
3910           unsigned DeviceID, unsigned FileID, StringRef ParentName,
3911           unsigned Line,
3912           const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
3913         // Generate metadata for target regions. Each entry of this metadata
3914         // contains:
3915         // - Entry 0 -> Kind of this type of metadata (0).
3916         // - Entry 1 -> Device ID of the file where the entry was identified.
3917         // - Entry 2 -> File ID of the file where the entry was identified.
3918         // - Entry 3 -> Mangled name of the function where the entry was
3919         // identified.
3920         // - Entry 4 -> Line in the file where the entry was identified.
3921         // - Entry 5 -> Order the entry was created.
3922         // The first element of the metadata node is the kind.
3923         llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
3924                                  GetMDInt(FileID),      GetMDString(ParentName),
3925                                  GetMDInt(Line),        GetMDInt(E.getOrder())};
3926 
3927         // Save this entry in the right position of the ordered entries array.
3928         OrderedEntries[E.getOrder()] = &E;
3929 
3930         // Add metadata to the named metadata node.
3931         MD->addOperand(llvm::MDNode::get(C, Ops));
3932       };
3933 
3934   OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
3935       TargetRegionMetadataEmitter);
3936 
3937   // Create function that emits metadata for each device global variable entry;
3938   auto &&DeviceGlobalVarMetadataEmitter =
3939       [&C, &OrderedEntries, &GetMDInt, &GetMDString,
3940        MD](StringRef MangledName,
3941            const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
3942                &E) {
3943         // Generate metadata for global variables. Each entry of this metadata
3944         // contains:
3945         // - Entry 0 -> Kind of this type of metadata (1).
3946         // - Entry 1 -> Mangled name of the variable.
3947         // - Entry 2 -> Declare target kind.
3948         // - Entry 3 -> Order the entry was created.
3949         // The first element of the metadata node is the kind.
3950         llvm::Metadata *Ops[] = {
3951             GetMDInt(E.getKind()), GetMDString(MangledName),
3952             GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
3953 
3954         // Save this entry in the right position of the ordered entries array.
3955         OrderedEntries[E.getOrder()] = &E;
3956 
3957         // Add metadata to the named metadata node.
3958         MD->addOperand(llvm::MDNode::get(C, Ops));
3959       };
3960 
3961   OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
3962       DeviceGlobalVarMetadataEmitter);
3963 
3964   for (const auto *E : OrderedEntries) {
3965     assert(E && "All ordered entries must exist!");
3966     if (const auto *CE =
3967             dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
3968                 E)) {
3969       if (!CE->getID() || !CE->getAddress()) {
3970         unsigned DiagID = CGM.getDiags().getCustomDiagID(
3971             DiagnosticsEngine::Error,
3972             "Offloading entry for target region is incorect: either the "
3973             "address or the ID is invalid.");
3974         CGM.getDiags().Report(DiagID);
3975         continue;
3976       }
3977       createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
3978                          CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
3979     } else if (const auto *CE =
3980                    dyn_cast<OffloadEntriesInfoManagerTy::
3981                                 OffloadEntryInfoDeviceGlobalVar>(E)) {
3982       if (!CE->getAddress()) {
3983         unsigned DiagID = CGM.getDiags().getCustomDiagID(
3984             DiagnosticsEngine::Error,
3985             "Offloading entry for declare target varible is inccorect: the "
3986             "address is invalid.");
3987         CGM.getDiags().Report(DiagID);
3988         continue;
3989       }
3990       createOffloadEntry(CE->getAddress(), CE->getAddress(),
3991                          CE->getVarSize().getQuantity(), CE->getFlags(),
3992                          CE->getLinkage());
3993     } else {
3994       llvm_unreachable("Unsupported entry kind.");
3995     }
3996   }
3997 }
3998 
3999 /// Loads all the offload entries information from the host IR
4000 /// metadata.
4001 void CGOpenMPRuntime::loadOffloadInfoMetadata() {
4002   // If we are in target mode, load the metadata from the host IR. This code has
4003   // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
4004 
4005   if (!CGM.getLangOpts().OpenMPIsDevice)
4006     return;
4007 
4008   if (CGM.getLangOpts().OMPHostIRFile.empty())
4009     return;
4010 
4011   auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
4012   if (auto EC = Buf.getError()) {
4013     CGM.getDiags().Report(diag::err_cannot_open_file)
4014         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4015     return;
4016   }
4017 
4018   llvm::LLVMContext C;
4019   auto ME = expectedToErrorOrAndEmitErrors(
4020       C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
4021 
4022   if (auto EC = ME.getError()) {
4023     unsigned DiagID = CGM.getDiags().getCustomDiagID(
4024         DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
4025     CGM.getDiags().Report(DiagID)
4026         << CGM.getLangOpts().OMPHostIRFile << EC.message();
4027     return;
4028   }
4029 
4030   llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
4031   if (!MD)
4032     return;
4033 
4034   for (llvm::MDNode *MN : MD->operands()) {
4035     auto &&GetMDInt = [MN](unsigned Idx) {
4036       auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
4037       return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
4038     };
4039 
4040     auto &&GetMDString = [MN](unsigned Idx) {
4041       auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
4042       return V->getString();
4043     };
4044 
4045     switch (GetMDInt(0)) {
4046     default:
4047       llvm_unreachable("Unexpected metadata!");
4048       break;
4049     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4050         OffloadingEntryInfoTargetRegion:
4051       OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
4052           /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
4053           /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
4054           /*Order=*/GetMDInt(5));
4055       break;
4056     case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
4057         OffloadingEntryInfoDeviceGlobalVar:
4058       OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
4059           /*MangledName=*/GetMDString(1),
4060           static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
4061               /*Flags=*/GetMDInt(2)),
4062           /*Order=*/GetMDInt(3));
4063       break;
4064     }
4065   }
4066 }
4067 
4068 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
4069   if (!KmpRoutineEntryPtrTy) {
4070     // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
4071     ASTContext &C = CGM.getContext();
4072     QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
4073     FunctionProtoType::ExtProtoInfo EPI;
4074     KmpRoutineEntryPtrQTy = C.getPointerType(
4075         C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
4076     KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
4077   }
4078 }
4079 
4080 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
4081   // Make sure the type of the entry is already created. This is the type we
4082   // have to create:
4083   // struct __tgt_offload_entry{
4084   //   void      *addr;       // Pointer to the offload entry info.
4085   //                          // (function or global)
4086   //   char      *name;       // Name of the function or global.
4087   //   size_t     size;       // Size of the entry info (0 if it a function).
4088   //   int32_t    flags;      // Flags associated with the entry, e.g. 'link'.
4089   //   int32_t    reserved;   // Reserved, to use by the runtime library.
4090   // };
4091   if (TgtOffloadEntryQTy.isNull()) {
4092     ASTContext &C = CGM.getContext();
4093     RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
4094     RD->startDefinition();
4095     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4096     addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
4097     addFieldToRecordDecl(C, RD, C.getSizeType());
4098     addFieldToRecordDecl(
4099         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4100     addFieldToRecordDecl(
4101         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4102     RD->completeDefinition();
4103     RD->addAttr(PackedAttr::CreateImplicit(C));
4104     TgtOffloadEntryQTy = C.getRecordType(RD);
4105   }
4106   return TgtOffloadEntryQTy;
4107 }
4108 
4109 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
4110   // These are the types we need to build:
4111   // struct __tgt_device_image{
4112   // void   *ImageStart;       // Pointer to the target code start.
4113   // void   *ImageEnd;         // Pointer to the target code end.
4114   // // We also add the host entries to the device image, as it may be useful
4115   // // for the target runtime to have access to that information.
4116   // __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all
4117   //                                       // the entries.
4118   // __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4119   //                                       // entries (non inclusive).
4120   // };
4121   if (TgtDeviceImageQTy.isNull()) {
4122     ASTContext &C = CGM.getContext();
4123     RecordDecl *RD = C.buildImplicitRecord("__tgt_device_image");
4124     RD->startDefinition();
4125     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4126     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4127     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4128     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4129     RD->completeDefinition();
4130     TgtDeviceImageQTy = C.getRecordType(RD);
4131   }
4132   return TgtDeviceImageQTy;
4133 }
4134 
4135 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
4136   // struct __tgt_bin_desc{
4137   //   int32_t              NumDevices;      // Number of devices supported.
4138   //   __tgt_device_image   *DeviceImages;   // Arrays of device images
4139   //                                         // (one per device).
4140   //   __tgt_offload_entry  *EntriesBegin;   // Begin of the table with all the
4141   //                                         // entries.
4142   //   __tgt_offload_entry  *EntriesEnd;     // End of the table with all the
4143   //                                         // entries (non inclusive).
4144   // };
4145   if (TgtBinaryDescriptorQTy.isNull()) {
4146     ASTContext &C = CGM.getContext();
4147     RecordDecl *RD = C.buildImplicitRecord("__tgt_bin_desc");
4148     RD->startDefinition();
4149     addFieldToRecordDecl(
4150         C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
4151     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
4152     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4153     addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
4154     RD->completeDefinition();
4155     TgtBinaryDescriptorQTy = C.getRecordType(RD);
4156   }
4157   return TgtBinaryDescriptorQTy;
4158 }
4159 
4160 namespace {
4161 struct PrivateHelpersTy {
4162   PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
4163                    const VarDecl *PrivateElemInit)
4164       : Original(Original), PrivateCopy(PrivateCopy),
4165         PrivateElemInit(PrivateElemInit) {}
4166   const VarDecl *Original;
4167   const VarDecl *PrivateCopy;
4168   const VarDecl *PrivateElemInit;
4169 };
4170 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
4171 } // anonymous namespace
4172 
4173 static RecordDecl *
4174 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) {
4175   if (!Privates.empty()) {
4176     ASTContext &C = CGM.getContext();
4177     // Build struct .kmp_privates_t. {
4178     //         /*  private vars  */
4179     //       };
4180     RecordDecl *RD = C.buildImplicitRecord(".kmp_privates.t");
4181     RD->startDefinition();
4182     for (const auto &Pair : Privates) {
4183       const VarDecl *VD = Pair.second.Original;
4184       QualType Type = VD->getType().getNonReferenceType();
4185       FieldDecl *FD = addFieldToRecordDecl(C, RD, Type);
4186       if (VD->hasAttrs()) {
4187         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
4188              E(VD->getAttrs().end());
4189              I != E; ++I)
4190           FD->addAttr(*I);
4191       }
4192     }
4193     RD->completeDefinition();
4194     return RD;
4195   }
4196   return nullptr;
4197 }
4198 
4199 static RecordDecl *
4200 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
4201                          QualType KmpInt32Ty,
4202                          QualType KmpRoutineEntryPointerQTy) {
4203   ASTContext &C = CGM.getContext();
4204   // Build struct kmp_task_t {
4205   //         void *              shareds;
4206   //         kmp_routine_entry_t routine;
4207   //         kmp_int32           part_id;
4208   //         kmp_cmplrdata_t data1;
4209   //         kmp_cmplrdata_t data2;
4210   // For taskloops additional fields:
4211   //         kmp_uint64          lb;
4212   //         kmp_uint64          ub;
4213   //         kmp_int64           st;
4214   //         kmp_int32           liter;
4215   //         void *              reductions;
4216   //       };
4217   RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
4218   UD->startDefinition();
4219   addFieldToRecordDecl(C, UD, KmpInt32Ty);
4220   addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
4221   UD->completeDefinition();
4222   QualType KmpCmplrdataTy = C.getRecordType(UD);
4223   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
4224   RD->startDefinition();
4225   addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4226   addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
4227   addFieldToRecordDecl(C, RD, KmpInt32Ty);
4228   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4229   addFieldToRecordDecl(C, RD, KmpCmplrdataTy);
4230   if (isOpenMPTaskLoopDirective(Kind)) {
4231     QualType KmpUInt64Ty =
4232         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
4233     QualType KmpInt64Ty =
4234         CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
4235     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4236     addFieldToRecordDecl(C, RD, KmpUInt64Ty);
4237     addFieldToRecordDecl(C, RD, KmpInt64Ty);
4238     addFieldToRecordDecl(C, RD, KmpInt32Ty);
4239     addFieldToRecordDecl(C, RD, C.VoidPtrTy);
4240   }
4241   RD->completeDefinition();
4242   return RD;
4243 }
4244 
4245 static RecordDecl *
4246 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
4247                                      ArrayRef<PrivateDataTy> Privates) {
4248   ASTContext &C = CGM.getContext();
4249   // Build struct kmp_task_t_with_privates {
4250   //         kmp_task_t task_data;
4251   //         .kmp_privates_t. privates;
4252   //       };
4253   RecordDecl *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
4254   RD->startDefinition();
4255   addFieldToRecordDecl(C, RD, KmpTaskTQTy);
4256   if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
4257     addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
4258   RD->completeDefinition();
4259   return RD;
4260 }
4261 
4262 /// Emit a proxy function which accepts kmp_task_t as the second
4263 /// argument.
4264 /// \code
4265 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
4266 ///   TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt,
4267 ///   For taskloops:
4268 ///   tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4269 ///   tt->reductions, tt->shareds);
4270 ///   return 0;
4271 /// }
4272 /// \endcode
4273 static llvm::Value *
4274 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
4275                       OpenMPDirectiveKind Kind, QualType KmpInt32Ty,
4276                       QualType KmpTaskTWithPrivatesPtrQTy,
4277                       QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
4278                       QualType SharedsPtrTy, llvm::Value *TaskFunction,
4279                       llvm::Value *TaskPrivatesMap) {
4280   ASTContext &C = CGM.getContext();
4281   FunctionArgList Args;
4282   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4283                             ImplicitParamDecl::Other);
4284   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4285                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4286                                 ImplicitParamDecl::Other);
4287   Args.push_back(&GtidArg);
4288   Args.push_back(&TaskTypeArg);
4289   const auto &TaskEntryFnInfo =
4290       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4291   llvm::FunctionType *TaskEntryTy =
4292       CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
4293   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_entry", ""});
4294   auto *TaskEntry = llvm::Function::Create(
4295       TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4296   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskEntry, TaskEntryFnInfo);
4297   TaskEntry->setDoesNotRecurse();
4298   CodeGenFunction CGF(CGM);
4299   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args,
4300                     Loc, Loc);
4301 
4302   // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
4303   // tt,
4304   // For taskloops:
4305   // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter,
4306   // tt->task_data.shareds);
4307   llvm::Value *GtidParam = CGF.EmitLoadOfScalar(
4308       CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc);
4309   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4310       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4311       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4312   const auto *KmpTaskTWithPrivatesQTyRD =
4313       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4314   LValue Base =
4315       CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4316   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4317   auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
4318   LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
4319   llvm::Value *PartidParam = PartIdLVal.getPointer();
4320 
4321   auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
4322   LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
4323   llvm::Value *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4324       CGF.EmitLoadOfScalar(SharedsLVal, Loc),
4325       CGF.ConvertTypeForMem(SharedsPtrTy));
4326 
4327   auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4328   llvm::Value *PrivatesParam;
4329   if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
4330     LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
4331     PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4332         PrivatesLVal.getPointer(), CGF.VoidPtrTy);
4333   } else {
4334     PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
4335   }
4336 
4337   llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
4338                                TaskPrivatesMap,
4339                                CGF.Builder
4340                                    .CreatePointerBitCastOrAddrSpaceCast(
4341                                        TDBase.getAddress(), CGF.VoidPtrTy)
4342                                    .getPointer()};
4343   SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
4344                                           std::end(CommonArgs));
4345   if (isOpenMPTaskLoopDirective(Kind)) {
4346     auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
4347     LValue LBLVal = CGF.EmitLValueForField(Base, *LBFI);
4348     llvm::Value *LBParam = CGF.EmitLoadOfScalar(LBLVal, Loc);
4349     auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
4350     LValue UBLVal = CGF.EmitLValueForField(Base, *UBFI);
4351     llvm::Value *UBParam = CGF.EmitLoadOfScalar(UBLVal, Loc);
4352     auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
4353     LValue StLVal = CGF.EmitLValueForField(Base, *StFI);
4354     llvm::Value *StParam = CGF.EmitLoadOfScalar(StLVal, Loc);
4355     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4356     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4357     llvm::Value *LIParam = CGF.EmitLoadOfScalar(LILVal, Loc);
4358     auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
4359     LValue RLVal = CGF.EmitLValueForField(Base, *RFI);
4360     llvm::Value *RParam = CGF.EmitLoadOfScalar(RLVal, Loc);
4361     CallArgs.push_back(LBParam);
4362     CallArgs.push_back(UBParam);
4363     CallArgs.push_back(StParam);
4364     CallArgs.push_back(LIParam);
4365     CallArgs.push_back(RParam);
4366   }
4367   CallArgs.push_back(SharedsParam);
4368 
4369   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction,
4370                                                   CallArgs);
4371   CGF.EmitStoreThroughLValue(RValue::get(CGF.Builder.getInt32(/*C=*/0)),
4372                              CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty));
4373   CGF.FinishFunction();
4374   return TaskEntry;
4375 }
4376 
4377 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
4378                                             SourceLocation Loc,
4379                                             QualType KmpInt32Ty,
4380                                             QualType KmpTaskTWithPrivatesPtrQTy,
4381                                             QualType KmpTaskTWithPrivatesQTy) {
4382   ASTContext &C = CGM.getContext();
4383   FunctionArgList Args;
4384   ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
4385                             ImplicitParamDecl::Other);
4386   ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4387                                 KmpTaskTWithPrivatesPtrQTy.withRestrict(),
4388                                 ImplicitParamDecl::Other);
4389   Args.push_back(&GtidArg);
4390   Args.push_back(&TaskTypeArg);
4391   const auto &DestructorFnInfo =
4392       CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args);
4393   llvm::FunctionType *DestructorFnTy =
4394       CGM.getTypes().GetFunctionType(DestructorFnInfo);
4395   std::string Name =
4396       CGM.getOpenMPRuntime().getName({"omp_task_destructor", ""});
4397   auto *DestructorFn =
4398       llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
4399                              Name, &CGM.getModule());
4400   CGM.SetInternalFunctionAttributes(GlobalDecl(), DestructorFn,
4401                                     DestructorFnInfo);
4402   DestructorFn->setDoesNotRecurse();
4403   CodeGenFunction CGF(CGM);
4404   CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
4405                     Args, Loc, Loc);
4406 
4407   LValue Base = CGF.EmitLoadOfPointerLValue(
4408       CGF.GetAddrOfLocalVar(&TaskTypeArg),
4409       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4410   const auto *KmpTaskTWithPrivatesQTyRD =
4411       cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
4412   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4413   Base = CGF.EmitLValueForField(Base, *FI);
4414   for (const auto *Field :
4415        cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
4416     if (QualType::DestructionKind DtorKind =
4417             Field->getType().isDestructedType()) {
4418       LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
4419       CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
4420     }
4421   }
4422   CGF.FinishFunction();
4423   return DestructorFn;
4424 }
4425 
4426 /// Emit a privates mapping function for correct handling of private and
4427 /// firstprivate variables.
4428 /// \code
4429 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
4430 /// **noalias priv1,...,  <tyn> **noalias privn) {
4431 ///   *priv1 = &.privates.priv1;
4432 ///   ...;
4433 ///   *privn = &.privates.privn;
4434 /// }
4435 /// \endcode
4436 static llvm::Value *
4437 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
4438                                ArrayRef<const Expr *> PrivateVars,
4439                                ArrayRef<const Expr *> FirstprivateVars,
4440                                ArrayRef<const Expr *> LastprivateVars,
4441                                QualType PrivatesQTy,
4442                                ArrayRef<PrivateDataTy> Privates) {
4443   ASTContext &C = CGM.getContext();
4444   FunctionArgList Args;
4445   ImplicitParamDecl TaskPrivatesArg(
4446       C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4447       C.getPointerType(PrivatesQTy).withConst().withRestrict(),
4448       ImplicitParamDecl::Other);
4449   Args.push_back(&TaskPrivatesArg);
4450   llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
4451   unsigned Counter = 1;
4452   for (const Expr *E : PrivateVars) {
4453     Args.push_back(ImplicitParamDecl::Create(
4454         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4455         C.getPointerType(C.getPointerType(E->getType()))
4456             .withConst()
4457             .withRestrict(),
4458         ImplicitParamDecl::Other));
4459     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4460     PrivateVarsPos[VD] = Counter;
4461     ++Counter;
4462   }
4463   for (const Expr *E : FirstprivateVars) {
4464     Args.push_back(ImplicitParamDecl::Create(
4465         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4466         C.getPointerType(C.getPointerType(E->getType()))
4467             .withConst()
4468             .withRestrict(),
4469         ImplicitParamDecl::Other));
4470     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4471     PrivateVarsPos[VD] = Counter;
4472     ++Counter;
4473   }
4474   for (const Expr *E : LastprivateVars) {
4475     Args.push_back(ImplicitParamDecl::Create(
4476         C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4477         C.getPointerType(C.getPointerType(E->getType()))
4478             .withConst()
4479             .withRestrict(),
4480         ImplicitParamDecl::Other));
4481     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4482     PrivateVarsPos[VD] = Counter;
4483     ++Counter;
4484   }
4485   const auto &TaskPrivatesMapFnInfo =
4486       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4487   llvm::FunctionType *TaskPrivatesMapTy =
4488       CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
4489   std::string Name =
4490       CGM.getOpenMPRuntime().getName({"omp_task_privates_map", ""});
4491   auto *TaskPrivatesMap = llvm::Function::Create(
4492       TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
4493       &CGM.getModule());
4494   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskPrivatesMap,
4495                                     TaskPrivatesMapFnInfo);
4496   TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
4497   TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
4498   TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
4499   CodeGenFunction CGF(CGM);
4500   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
4501                     TaskPrivatesMapFnInfo, Args, Loc, Loc);
4502 
4503   // *privi = &.privates.privi;
4504   LValue Base = CGF.EmitLoadOfPointerLValue(
4505       CGF.GetAddrOfLocalVar(&TaskPrivatesArg),
4506       TaskPrivatesArg.getType()->castAs<PointerType>());
4507   const auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
4508   Counter = 0;
4509   for (const FieldDecl *Field : PrivatesQTyRD->fields()) {
4510     LValue FieldLVal = CGF.EmitLValueForField(Base, Field);
4511     const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
4512     LValue RefLVal =
4513         CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
4514     LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
4515         RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
4516     CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal);
4517     ++Counter;
4518   }
4519   CGF.FinishFunction();
4520   return TaskPrivatesMap;
4521 }
4522 
4523 static bool stable_sort_comparator(const PrivateDataTy P1,
4524                                    const PrivateDataTy P2) {
4525   return P1.first > P2.first;
4526 }
4527 
4528 /// Emit initialization for private variables in task-based directives.
4529 static void emitPrivatesInit(CodeGenFunction &CGF,
4530                              const OMPExecutableDirective &D,
4531                              Address KmpTaskSharedsPtr, LValue TDBase,
4532                              const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4533                              QualType SharedsTy, QualType SharedsPtrTy,
4534                              const OMPTaskDataTy &Data,
4535                              ArrayRef<PrivateDataTy> Privates, bool ForDup) {
4536   ASTContext &C = CGF.getContext();
4537   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4538   LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI);
4539   OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind())
4540                                  ? OMPD_taskloop
4541                                  : OMPD_task;
4542   const CapturedStmt &CS = *D.getCapturedStmt(Kind);
4543   CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS);
4544   LValue SrcBase;
4545   bool IsTargetTask =
4546       isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) ||
4547       isOpenMPTargetExecutionDirective(D.getDirectiveKind());
4548   // For target-based directives skip 3 firstprivate arrays BasePointersArray,
4549   // PointersArray and SizesArray. The original variables for these arrays are
4550   // not captured and we get their addresses explicitly.
4551   if ((!IsTargetTask && !Data.FirstprivateVars.empty()) ||
4552       (IsTargetTask && KmpTaskSharedsPtr.isValid())) {
4553     SrcBase = CGF.MakeAddrLValue(
4554         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4555             KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
4556         SharedsTy);
4557   }
4558   FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
4559   for (const PrivateDataTy &Pair : Privates) {
4560     const VarDecl *VD = Pair.second.PrivateCopy;
4561     const Expr *Init = VD->getAnyInitializer();
4562     if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) &&
4563                              !CGF.isTrivialInitializer(Init)))) {
4564       LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
4565       if (const VarDecl *Elem = Pair.second.PrivateElemInit) {
4566         const VarDecl *OriginalVD = Pair.second.Original;
4567         // Check if the variable is the target-based BasePointersArray,
4568         // PointersArray or SizesArray.
4569         LValue SharedRefLValue;
4570         QualType Type = OriginalVD->getType();
4571         const FieldDecl *SharedField = CapturesInfo.lookup(OriginalVD);
4572         if (IsTargetTask && !SharedField) {
4573           assert(isa<ImplicitParamDecl>(OriginalVD) &&
4574                  isa<CapturedDecl>(OriginalVD->getDeclContext()) &&
4575                  cast<CapturedDecl>(OriginalVD->getDeclContext())
4576                          ->getNumParams() == 0 &&
4577                  isa<TranslationUnitDecl>(
4578                      cast<CapturedDecl>(OriginalVD->getDeclContext())
4579                          ->getDeclContext()) &&
4580                  "Expected artificial target data variable.");
4581           SharedRefLValue =
4582               CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type);
4583         } else {
4584           SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
4585           SharedRefLValue = CGF.MakeAddrLValue(
4586               Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)),
4587               SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
4588               SharedRefLValue.getTBAAInfo());
4589         }
4590         if (Type->isArrayType()) {
4591           // Initialize firstprivate array.
4592           if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) {
4593             // Perform simple memcpy.
4594             CGF.EmitAggregateAssign(PrivateLValue, SharedRefLValue, Type);
4595           } else {
4596             // Initialize firstprivate array using element-by-element
4597             // initialization.
4598             CGF.EmitOMPAggregateAssign(
4599                 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
4600                 [&CGF, Elem, Init, &CapturesInfo](Address DestElement,
4601                                                   Address SrcElement) {
4602                   // Clean up any temporaries needed by the initialization.
4603                   CodeGenFunction::OMPPrivateScope InitScope(CGF);
4604                   InitScope.addPrivate(
4605                       Elem, [SrcElement]() -> Address { return SrcElement; });
4606                   (void)InitScope.Privatize();
4607                   // Emit initialization for single element.
4608                   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
4609                       CGF, &CapturesInfo);
4610                   CGF.EmitAnyExprToMem(Init, DestElement,
4611                                        Init->getType().getQualifiers(),
4612                                        /*IsInitializer=*/false);
4613                 });
4614           }
4615         } else {
4616           CodeGenFunction::OMPPrivateScope InitScope(CGF);
4617           InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address {
4618             return SharedRefLValue.getAddress();
4619           });
4620           (void)InitScope.Privatize();
4621           CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
4622           CGF.EmitExprAsInit(Init, VD, PrivateLValue,
4623                              /*capturedByInit=*/false);
4624         }
4625       } else {
4626         CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
4627       }
4628     }
4629     ++FI;
4630   }
4631 }
4632 
4633 /// Check if duplication function is required for taskloops.
4634 static bool checkInitIsRequired(CodeGenFunction &CGF,
4635                                 ArrayRef<PrivateDataTy> Privates) {
4636   bool InitRequired = false;
4637   for (const PrivateDataTy &Pair : Privates) {
4638     const VarDecl *VD = Pair.second.PrivateCopy;
4639     const Expr *Init = VD->getAnyInitializer();
4640     InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
4641                                     !CGF.isTrivialInitializer(Init));
4642     if (InitRequired)
4643       break;
4644   }
4645   return InitRequired;
4646 }
4647 
4648 
4649 /// Emit task_dup function (for initialization of
4650 /// private/firstprivate/lastprivate vars and last_iter flag)
4651 /// \code
4652 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int
4653 /// lastpriv) {
4654 /// // setup lastprivate flag
4655 ///    task_dst->last = lastpriv;
4656 /// // could be constructor calls here...
4657 /// }
4658 /// \endcode
4659 static llvm::Value *
4660 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
4661                     const OMPExecutableDirective &D,
4662                     QualType KmpTaskTWithPrivatesPtrQTy,
4663                     const RecordDecl *KmpTaskTWithPrivatesQTyRD,
4664                     const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy,
4665                     QualType SharedsPtrTy, const OMPTaskDataTy &Data,
4666                     ArrayRef<PrivateDataTy> Privates, bool WithLastIter) {
4667   ASTContext &C = CGM.getContext();
4668   FunctionArgList Args;
4669   ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4670                            KmpTaskTWithPrivatesPtrQTy,
4671                            ImplicitParamDecl::Other);
4672   ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
4673                            KmpTaskTWithPrivatesPtrQTy,
4674                            ImplicitParamDecl::Other);
4675   ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
4676                                 ImplicitParamDecl::Other);
4677   Args.push_back(&DstArg);
4678   Args.push_back(&SrcArg);
4679   Args.push_back(&LastprivArg);
4680   const auto &TaskDupFnInfo =
4681       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
4682   llvm::FunctionType *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo);
4683   std::string Name = CGM.getOpenMPRuntime().getName({"omp_task_dup", ""});
4684   auto *TaskDup = llvm::Function::Create(
4685       TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.getModule());
4686   CGM.SetInternalFunctionAttributes(GlobalDecl(), TaskDup, TaskDupFnInfo);
4687   TaskDup->setDoesNotRecurse();
4688   CodeGenFunction CGF(CGM);
4689   CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc,
4690                     Loc);
4691 
4692   LValue TDBase = CGF.EmitLoadOfPointerLValue(
4693       CGF.GetAddrOfLocalVar(&DstArg),
4694       KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4695   // task_dst->liter = lastpriv;
4696   if (WithLastIter) {
4697     auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
4698     LValue Base = CGF.EmitLValueForField(
4699         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4700     LValue LILVal = CGF.EmitLValueForField(Base, *LIFI);
4701     llvm::Value *Lastpriv = CGF.EmitLoadOfScalar(
4702         CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc);
4703     CGF.EmitStoreOfScalar(Lastpriv, LILVal);
4704   }
4705 
4706   // Emit initial values for private copies (if any).
4707   assert(!Privates.empty());
4708   Address KmpTaskSharedsPtr = Address::invalid();
4709   if (!Data.FirstprivateVars.empty()) {
4710     LValue TDBase = CGF.EmitLoadOfPointerLValue(
4711         CGF.GetAddrOfLocalVar(&SrcArg),
4712         KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>());
4713     LValue Base = CGF.EmitLValueForField(
4714         TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
4715     KmpTaskSharedsPtr = Address(
4716         CGF.EmitLoadOfScalar(CGF.EmitLValueForField(
4717                                  Base, *std::next(KmpTaskTQTyRD->field_begin(),
4718                                                   KmpTaskTShareds)),
4719                              Loc),
4720         CGF.getNaturalTypeAlignment(SharedsTy));
4721   }
4722   emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
4723                    SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
4724   CGF.FinishFunction();
4725   return TaskDup;
4726 }
4727 
4728 /// Checks if destructor function is required to be generated.
4729 /// \return true if cleanups are required, false otherwise.
4730 static bool
4731 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) {
4732   bool NeedsCleanup = false;
4733   auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
4734   const auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl());
4735   for (const FieldDecl *FD : PrivateRD->fields()) {
4736     NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType();
4737     if (NeedsCleanup)
4738       break;
4739   }
4740   return NeedsCleanup;
4741 }
4742 
4743 CGOpenMPRuntime::TaskResultTy
4744 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
4745                               const OMPExecutableDirective &D,
4746                               llvm::Value *TaskFunction, QualType SharedsTy,
4747                               Address Shareds, const OMPTaskDataTy &Data) {
4748   ASTContext &C = CGM.getContext();
4749   llvm::SmallVector<PrivateDataTy, 4> Privates;
4750   // Aggregate privates and sort them by the alignment.
4751   auto I = Data.PrivateCopies.begin();
4752   for (const Expr *E : Data.PrivateVars) {
4753     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4754     Privates.emplace_back(
4755         C.getDeclAlign(VD),
4756         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4757                          /*PrivateElemInit=*/nullptr));
4758     ++I;
4759   }
4760   I = Data.FirstprivateCopies.begin();
4761   auto IElemInitRef = Data.FirstprivateInits.begin();
4762   for (const Expr *E : Data.FirstprivateVars) {
4763     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4764     Privates.emplace_back(
4765         C.getDeclAlign(VD),
4766         PrivateHelpersTy(
4767             VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4768             cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())));
4769     ++I;
4770     ++IElemInitRef;
4771   }
4772   I = Data.LastprivateCopies.begin();
4773   for (const Expr *E : Data.LastprivateVars) {
4774     const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4775     Privates.emplace_back(
4776         C.getDeclAlign(VD),
4777         PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
4778                          /*PrivateElemInit=*/nullptr));
4779     ++I;
4780   }
4781   std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator);
4782   QualType KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4783   // Build type kmp_routine_entry_t (if not built yet).
4784   emitKmpRoutineEntryT(KmpInt32Ty);
4785   // Build type kmp_task_t (if not built yet).
4786   if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
4787     if (SavedKmpTaskloopTQTy.isNull()) {
4788       SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4789           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4790     }
4791     KmpTaskTQTy = SavedKmpTaskloopTQTy;
4792   } else {
4793     assert((D.getDirectiveKind() == OMPD_task ||
4794             isOpenMPTargetExecutionDirective(D.getDirectiveKind()) ||
4795             isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
4796            "Expected taskloop, task or target directive");
4797     if (SavedKmpTaskTQTy.isNull()) {
4798       SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
4799           CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
4800     }
4801     KmpTaskTQTy = SavedKmpTaskTQTy;
4802   }
4803   const auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
4804   // Build particular struct kmp_task_t for the given task.
4805   const RecordDecl *KmpTaskTWithPrivatesQTyRD =
4806       createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
4807   QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
4808   QualType KmpTaskTWithPrivatesPtrQTy =
4809       C.getPointerType(KmpTaskTWithPrivatesQTy);
4810   llvm::Type *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
4811   llvm::Type *KmpTaskTWithPrivatesPtrTy =
4812       KmpTaskTWithPrivatesTy->getPointerTo();
4813   llvm::Value *KmpTaskTWithPrivatesTySize =
4814       CGF.getTypeSize(KmpTaskTWithPrivatesQTy);
4815   QualType SharedsPtrTy = C.getPointerType(SharedsTy);
4816 
4817   // Emit initial values for private copies (if any).
4818   llvm::Value *TaskPrivatesMap = nullptr;
4819   llvm::Type *TaskPrivatesMapTy =
4820       std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType();
4821   if (!Privates.empty()) {
4822     auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
4823     TaskPrivatesMap = emitTaskPrivateMappingFunction(
4824         CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars,
4825         FI->getType(), Privates);
4826     TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4827         TaskPrivatesMap, TaskPrivatesMapTy);
4828   } else {
4829     TaskPrivatesMap = llvm::ConstantPointerNull::get(
4830         cast<llvm::PointerType>(TaskPrivatesMapTy));
4831   }
4832   // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
4833   // kmp_task_t *tt);
4834   llvm::Value *TaskEntry = emitProxyTaskFunction(
4835       CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4836       KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction,
4837       TaskPrivatesMap);
4838 
4839   // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
4840   // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
4841   // kmp_routine_entry_t *task_entry);
4842   // Task flags. Format is taken from
4843   // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
4844   // description of kmp_tasking_flags struct.
4845   enum {
4846     TiedFlag = 0x1,
4847     FinalFlag = 0x2,
4848     DestructorsFlag = 0x8,
4849     PriorityFlag = 0x20
4850   };
4851   unsigned Flags = Data.Tied ? TiedFlag : 0;
4852   bool NeedsCleanup = false;
4853   if (!Privates.empty()) {
4854     NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD);
4855     if (NeedsCleanup)
4856       Flags = Flags | DestructorsFlag;
4857   }
4858   if (Data.Priority.getInt())
4859     Flags = Flags | PriorityFlag;
4860   llvm::Value *TaskFlags =
4861       Data.Final.getPointer()
4862           ? CGF.Builder.CreateSelect(Data.Final.getPointer(),
4863                                      CGF.Builder.getInt32(FinalFlag),
4864                                      CGF.Builder.getInt32(/*C=*/0))
4865           : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0);
4866   TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
4867   llvm::Value *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy));
4868   llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc),
4869                               getThreadID(CGF, Loc), TaskFlags,
4870                               KmpTaskTWithPrivatesTySize, SharedsSize,
4871                               CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4872                                   TaskEntry, KmpRoutineEntryPtrTy)};
4873   llvm::Value *NewTask = CGF.EmitRuntimeCall(
4874       createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
4875   llvm::Value *NewTaskNewTaskTTy =
4876       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4877           NewTask, KmpTaskTWithPrivatesPtrTy);
4878   LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
4879                                                KmpTaskTWithPrivatesQTy);
4880   LValue TDBase =
4881       CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
4882   // Fill the data in the resulting kmp_task_t record.
4883   // Copy shareds if there are any.
4884   Address KmpTaskSharedsPtr = Address::invalid();
4885   if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
4886     KmpTaskSharedsPtr =
4887         Address(CGF.EmitLoadOfScalar(
4888                     CGF.EmitLValueForField(
4889                         TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
4890                                            KmpTaskTShareds)),
4891                     Loc),
4892                 CGF.getNaturalTypeAlignment(SharedsTy));
4893     LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
4894     LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
4895     CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
4896   }
4897   // Emit initial values for private copies (if any).
4898   TaskResultTy Result;
4899   if (!Privates.empty()) {
4900     emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD,
4901                      SharedsTy, SharedsPtrTy, Data, Privates,
4902                      /*ForDup=*/false);
4903     if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) &&
4904         (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) {
4905       Result.TaskDupFn = emitTaskDupFunction(
4906           CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
4907           KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates,
4908           /*WithLastIter=*/!Data.LastprivateVars.empty());
4909     }
4910   }
4911   // Fields of union "kmp_cmplrdata_t" for destructors and priority.
4912   enum { Priority = 0, Destructors = 1 };
4913   // Provide pointer to function with destructors for privates.
4914   auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
4915   const RecordDecl *KmpCmplrdataUD =
4916       (*FI)->getType()->getAsUnionType()->getDecl();
4917   if (NeedsCleanup) {
4918     llvm::Value *DestructorFn = emitDestructorsFunction(
4919         CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
4920         KmpTaskTWithPrivatesQTy);
4921     LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI);
4922     LValue DestructorsLV = CGF.EmitLValueForField(
4923         Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4924     CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4925                               DestructorFn, KmpRoutineEntryPtrTy),
4926                           DestructorsLV);
4927   }
4928   // Set priority.
4929   if (Data.Priority.getInt()) {
4930     LValue Data2LV = CGF.EmitLValueForField(
4931         TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4932     LValue PriorityLV = CGF.EmitLValueForField(
4933         Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4934     CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV);
4935   }
4936   Result.NewTask = NewTask;
4937   Result.TaskEntry = TaskEntry;
4938   Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4939   Result.TDBase = TDBase;
4940   Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4941   return Result;
4942 }
4943 
4944 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
4945                                    const OMPExecutableDirective &D,
4946                                    llvm::Value *TaskFunction,
4947                                    QualType SharedsTy, Address Shareds,
4948                                    const Expr *IfCond,
4949                                    const OMPTaskDataTy &Data) {
4950   if (!CGF.HaveInsertPoint())
4951     return;
4952 
4953   TaskResultTy Result =
4954       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
4955   llvm::Value *NewTask = Result.NewTask;
4956   llvm::Value *TaskEntry = Result.TaskEntry;
4957   llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy;
4958   LValue TDBase = Result.TDBase;
4959   const RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD;
4960   ASTContext &C = CGM.getContext();
4961   // Process list of dependences.
4962   Address DependenciesArray = Address::invalid();
4963   unsigned NumDependencies = Data.Dependences.size();
4964   if (NumDependencies) {
4965     // Dependence kind for RTL.
4966     enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 };
4967     enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
4968     RecordDecl *KmpDependInfoRD;
4969     QualType FlagsTy =
4970         C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false);
4971     llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
4972     if (KmpDependInfoTy.isNull()) {
4973       KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
4974       KmpDependInfoRD->startDefinition();
4975       addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
4976       addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
4977       addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
4978       KmpDependInfoRD->completeDefinition();
4979       KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
4980     } else {
4981       KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
4982     }
4983     CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy);
4984     // Define type kmp_depend_info[<Dependences.size()>];
4985     QualType KmpDependInfoArrayTy = C.getConstantArrayType(
4986         KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies),
4987         ArrayType::Normal, /*IndexTypeQuals=*/0);
4988     // kmp_depend_info[<Dependences.size()>] deps;
4989     DependenciesArray =
4990         CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
4991     for (unsigned I = 0; I < NumDependencies; ++I) {
4992       const Expr *E = Data.Dependences[I].second;
4993       LValue Addr = CGF.EmitLValue(E);
4994       llvm::Value *Size;
4995       QualType Ty = E->getType();
4996       if (const auto *ASE =
4997               dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
4998         LValue UpAddrLVal =
4999             CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false);
5000         llvm::Value *UpAddr =
5001             CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1);
5002         llvm::Value *LowIntPtr =
5003             CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy);
5004         llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy);
5005         Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
5006       } else {
5007         Size = CGF.getTypeSize(Ty);
5008       }
5009       LValue Base = CGF.MakeAddrLValue(
5010           CGF.Builder.CreateConstArrayGEP(DependenciesArray, I, DependencySize),
5011           KmpDependInfoTy);
5012       // deps[i].base_addr = &<Dependences[i].second>;
5013       LValue BaseAddrLVal = CGF.EmitLValueForField(
5014           Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
5015       CGF.EmitStoreOfScalar(
5016           CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy),
5017           BaseAddrLVal);
5018       // deps[i].len = sizeof(<Dependences[i].second>);
5019       LValue LenLVal = CGF.EmitLValueForField(
5020           Base, *std::next(KmpDependInfoRD->field_begin(), Len));
5021       CGF.EmitStoreOfScalar(Size, LenLVal);
5022       // deps[i].flags = <Dependences[i].first>;
5023       RTLDependenceKindTy DepKind;
5024       switch (Data.Dependences[I].first) {
5025       case OMPC_DEPEND_in:
5026         DepKind = DepIn;
5027         break;
5028       // Out and InOut dependencies must use the same code.
5029       case OMPC_DEPEND_out:
5030       case OMPC_DEPEND_inout:
5031         DepKind = DepInOut;
5032         break;
5033       case OMPC_DEPEND_source:
5034       case OMPC_DEPEND_sink:
5035       case OMPC_DEPEND_unknown:
5036         llvm_unreachable("Unknown task dependence type");
5037       }
5038       LValue FlagsLVal = CGF.EmitLValueForField(
5039           Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
5040       CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
5041                             FlagsLVal);
5042     }
5043     DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5044         CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()),
5045         CGF.VoidPtrTy);
5046   }
5047 
5048   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5049   // libcall.
5050   // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
5051   // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
5052   // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
5053   // list is not empty
5054   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5055   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5056   llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
5057   llvm::Value *DepTaskArgs[7];
5058   if (NumDependencies) {
5059     DepTaskArgs[0] = UpLoc;
5060     DepTaskArgs[1] = ThreadID;
5061     DepTaskArgs[2] = NewTask;
5062     DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies);
5063     DepTaskArgs[4] = DependenciesArray.getPointer();
5064     DepTaskArgs[5] = CGF.Builder.getInt32(0);
5065     DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5066   }
5067   auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies,
5068                         &TaskArgs,
5069                         &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) {
5070     if (!Data.Tied) {
5071       auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
5072       LValue PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI);
5073       CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal);
5074     }
5075     if (NumDependencies) {
5076       CGF.EmitRuntimeCall(
5077           createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs);
5078     } else {
5079       CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task),
5080                           TaskArgs);
5081     }
5082     // Check if parent region is untied and build return for untied task;
5083     if (auto *Region =
5084             dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
5085       Region->emitUntiedSwitch(CGF);
5086   };
5087 
5088   llvm::Value *DepWaitTaskArgs[6];
5089   if (NumDependencies) {
5090     DepWaitTaskArgs[0] = UpLoc;
5091     DepWaitTaskArgs[1] = ThreadID;
5092     DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies);
5093     DepWaitTaskArgs[3] = DependenciesArray.getPointer();
5094     DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
5095     DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5096   }
5097   auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
5098                         NumDependencies, &DepWaitTaskArgs,
5099                         Loc](CodeGenFunction &CGF, PrePostActionTy &) {
5100     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5101     CodeGenFunction::RunCleanupsScope LocalScope(CGF);
5102     // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
5103     // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
5104     // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
5105     // is specified.
5106     if (NumDependencies)
5107       CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
5108                           DepWaitTaskArgs);
5109     // Call proxy_task_entry(gtid, new_task);
5110     auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
5111                       Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
5112       Action.Enter(CGF);
5113       llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
5114       CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
5115                                                           OutlinedFnArgs);
5116     };
5117 
5118     // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
5119     // kmp_task_t *new_task);
5120     // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
5121     // kmp_task_t *new_task);
5122     RegionCodeGenTy RCG(CodeGen);
5123     CommonActionTy Action(
5124         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs,
5125         RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs);
5126     RCG.setAction(Action);
5127     RCG(CGF);
5128   };
5129 
5130   if (IfCond) {
5131     emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
5132   } else {
5133     RegionCodeGenTy ThenRCG(ThenCodeGen);
5134     ThenRCG(CGF);
5135   }
5136 }
5137 
5138 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
5139                                        const OMPLoopDirective &D,
5140                                        llvm::Value *TaskFunction,
5141                                        QualType SharedsTy, Address Shareds,
5142                                        const Expr *IfCond,
5143                                        const OMPTaskDataTy &Data) {
5144   if (!CGF.HaveInsertPoint())
5145     return;
5146   TaskResultTy Result =
5147       emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data);
5148   // NOTE: routine and part_id fields are initialized by __kmpc_omp_task_alloc()
5149   // libcall.
5150   // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int
5151   // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int
5152   // sched, kmp_uint64 grainsize, void *task_dup);
5153   llvm::Value *ThreadID = getThreadID(CGF, Loc);
5154   llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
5155   llvm::Value *IfVal;
5156   if (IfCond) {
5157     IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy,
5158                                       /*isSigned=*/true);
5159   } else {
5160     IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1);
5161   }
5162 
5163   LValue LBLVal = CGF.EmitLValueForField(
5164       Result.TDBase,
5165       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
5166   const auto *LBVar =
5167       cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
5168   CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
5169                        /*IsInitializer=*/true);
5170   LValue UBLVal = CGF.EmitLValueForField(
5171       Result.TDBase,
5172       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
5173   const auto *UBVar =
5174       cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
5175   CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
5176                        /*IsInitializer=*/true);
5177   LValue StLVal = CGF.EmitLValueForField(
5178       Result.TDBase,
5179       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
5180   const auto *StVar =
5181       cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
5182   CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
5183                        /*IsInitializer=*/true);
5184   // Store reductions address.
5185   LValue RedLVal = CGF.EmitLValueForField(
5186       Result.TDBase,
5187       *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
5188   if (Data.Reductions) {
5189     CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
5190   } else {
5191     CGF.EmitNullInitialization(RedLVal.getAddress(),
5192                                CGF.getContext().VoidPtrTy);
5193   }
5194   enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
5195   llvm::Value *TaskArgs[] = {
5196       UpLoc,
5197       ThreadID,
5198       Result.NewTask,
5199       IfVal,
5200       LBLVal.getPointer(),
5201       UBLVal.getPointer(),
5202       CGF.EmitLoadOfScalar(StLVal, Loc),
5203       llvm::ConstantInt::getNullValue(
5204           CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
5205       llvm::ConstantInt::getSigned(
5206           CGF.IntTy, Data.Schedule.getPointer()
5207                          ? Data.Schedule.getInt() ? NumTasks : Grainsize
5208                          : NoSchedule),
5209       Data.Schedule.getPointer()
5210           ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty,
5211                                       /*isSigned=*/false)
5212           : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0),
5213       Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5214                              Result.TaskDupFn, CGF.VoidPtrTy)
5215                        : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)};
5216   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs);
5217 }
5218 
5219 /// Emit reduction operation for each element of array (required for
5220 /// array sections) LHS op = RHS.
5221 /// \param Type Type of array.
5222 /// \param LHSVar Variable on the left side of the reduction operation
5223 /// (references element of array in original variable).
5224 /// \param RHSVar Variable on the right side of the reduction operation
5225 /// (references element of array in original variable).
5226 /// \param RedOpGen Generator of reduction operation with use of LHSVar and
5227 /// RHSVar.
5228 static void EmitOMPAggregateReduction(
5229     CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar,
5230     const VarDecl *RHSVar,
5231     const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *,
5232                                   const Expr *, const Expr *)> &RedOpGen,
5233     const Expr *XExpr = nullptr, const Expr *EExpr = nullptr,
5234     const Expr *UpExpr = nullptr) {
5235   // Perform element-by-element initialization.
5236   QualType ElementTy;
5237   Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar);
5238   Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar);
5239 
5240   // Drill down to the base element type on both arrays.
5241   const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
5242   llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr);
5243 
5244   llvm::Value *RHSBegin = RHSAddr.getPointer();
5245   llvm::Value *LHSBegin = LHSAddr.getPointer();
5246   // Cast from pointer to array type to pointer to single element.
5247   llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
5248   // The basic structure here is a while-do loop.
5249   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
5250   llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
5251   llvm::Value *IsEmpty =
5252       CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty");
5253   CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
5254 
5255   // Enter the loop body, making that address the current address.
5256   llvm::BasicBlock *EntryBB = CGF.Builder.GetInsertBlock();
5257   CGF.EmitBlock(BodyBB);
5258 
5259   CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy);
5260 
5261   llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
5262       RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
5263   RHSElementPHI->addIncoming(RHSBegin, EntryBB);
5264   Address RHSElementCurrent =
5265       Address(RHSElementPHI,
5266               RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5267 
5268   llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
5269       LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
5270   LHSElementPHI->addIncoming(LHSBegin, EntryBB);
5271   Address LHSElementCurrent =
5272       Address(LHSElementPHI,
5273               LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
5274 
5275   // Emit copy.
5276   CodeGenFunction::OMPPrivateScope Scope(CGF);
5277   Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
5278   Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
5279   Scope.Privatize();
5280   RedOpGen(CGF, XExpr, EExpr, UpExpr);
5281   Scope.ForceCleanup();
5282 
5283   // Shift the address forward by one element.
5284   llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
5285       LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
5286   llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
5287       RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
5288   // Check whether we've reached the end.
5289   llvm::Value *Done =
5290       CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
5291   CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB);
5292   LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock());
5293   RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock());
5294 
5295   // Done.
5296   CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
5297 }
5298 
5299 /// Emit reduction combiner. If the combiner is a simple expression emit it as
5300 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of
5301 /// UDR combiner function.
5302 static void emitReductionCombiner(CodeGenFunction &CGF,
5303                                   const Expr *ReductionOp) {
5304   if (const auto *CE = dyn_cast<CallExpr>(ReductionOp))
5305     if (const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
5306       if (const auto *DRE =
5307               dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
5308         if (const auto *DRD =
5309                 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
5310           std::pair<llvm::Function *, llvm::Function *> Reduction =
5311               CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD);
5312           RValue Func = RValue::get(Reduction.first);
5313           CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
5314           CGF.EmitIgnoredExpr(ReductionOp);
5315           return;
5316         }
5317   CGF.EmitIgnoredExpr(ReductionOp);
5318 }
5319 
5320 llvm::Value *CGOpenMPRuntime::emitReductionFunction(
5321     CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType,
5322     ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
5323     ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
5324   ASTContext &C = CGM.getContext();
5325 
5326   // void reduction_func(void *LHSArg, void *RHSArg);
5327   FunctionArgList Args;
5328   ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5329                            ImplicitParamDecl::Other);
5330   ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5331                            ImplicitParamDecl::Other);
5332   Args.push_back(&LHSArg);
5333   Args.push_back(&RHSArg);
5334   const auto &CGFI =
5335       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5336   std::string Name = getName({"omp", "reduction", "reduction_func"});
5337   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
5338                                     llvm::GlobalValue::InternalLinkage, Name,
5339                                     &CGM.getModule());
5340   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
5341   Fn->setDoesNotRecurse();
5342   CodeGenFunction CGF(CGM);
5343   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
5344 
5345   // Dst = (void*[n])(LHSArg);
5346   // Src = (void*[n])(RHSArg);
5347   Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5348       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
5349       ArgsType), CGF.getPointerAlign());
5350   Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5351       CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
5352       ArgsType), CGF.getPointerAlign());
5353 
5354   //  ...
5355   //  *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
5356   //  ...
5357   CodeGenFunction::OMPPrivateScope Scope(CGF);
5358   auto IPriv = Privates.begin();
5359   unsigned Idx = 0;
5360   for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
5361     const auto *RHSVar =
5362         cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
5363     Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
5364       return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
5365     });
5366     const auto *LHSVar =
5367         cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
5368     Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
5369       return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
5370     });
5371     QualType PrivTy = (*IPriv)->getType();
5372     if (PrivTy->isVariablyModifiedType()) {
5373       // Get array size and emit VLA type.
5374       ++Idx;
5375       Address Elem =
5376           CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize());
5377       llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem);
5378       const VariableArrayType *VLA =
5379           CGF.getContext().getAsVariableArrayType(PrivTy);
5380       const auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr());
5381       CodeGenFunction::OpaqueValueMapping OpaqueMap(
5382           CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy)));
5383       CGF.EmitVariablyModifiedType(PrivTy);
5384     }
5385   }
5386   Scope.Privatize();
5387   IPriv = Privates.begin();
5388   auto ILHS = LHSExprs.begin();
5389   auto IRHS = RHSExprs.begin();
5390   for (const Expr *E : ReductionOps) {
5391     if ((*IPriv)->getType()->isArrayType()) {
5392       // Emit reduction for array section.
5393       const auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5394       const auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5395       EmitOMPAggregateReduction(
5396           CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5397           [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5398             emitReductionCombiner(CGF, E);
5399           });
5400     } else {
5401       // Emit reduction for array subscript or single variable.
5402       emitReductionCombiner(CGF, E);
5403     }
5404     ++IPriv;
5405     ++ILHS;
5406     ++IRHS;
5407   }
5408   Scope.ForceCleanup();
5409   CGF.FinishFunction();
5410   return Fn;
5411 }
5412 
5413 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF,
5414                                                   const Expr *ReductionOp,
5415                                                   const Expr *PrivateRef,
5416                                                   const DeclRefExpr *LHS,
5417                                                   const DeclRefExpr *RHS) {
5418   if (PrivateRef->getType()->isArrayType()) {
5419     // Emit reduction for array section.
5420     const auto *LHSVar = cast<VarDecl>(LHS->getDecl());
5421     const auto *RHSVar = cast<VarDecl>(RHS->getDecl());
5422     EmitOMPAggregateReduction(
5423         CGF, PrivateRef->getType(), LHSVar, RHSVar,
5424         [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) {
5425           emitReductionCombiner(CGF, ReductionOp);
5426         });
5427   } else {
5428     // Emit reduction for array subscript or single variable.
5429     emitReductionCombiner(CGF, ReductionOp);
5430   }
5431 }
5432 
5433 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
5434                                     ArrayRef<const Expr *> Privates,
5435                                     ArrayRef<const Expr *> LHSExprs,
5436                                     ArrayRef<const Expr *> RHSExprs,
5437                                     ArrayRef<const Expr *> ReductionOps,
5438                                     ReductionOptionsTy Options) {
5439   if (!CGF.HaveInsertPoint())
5440     return;
5441 
5442   bool WithNowait = Options.WithNowait;
5443   bool SimpleReduction = Options.SimpleReduction;
5444 
5445   // Next code should be emitted for reduction:
5446   //
5447   // static kmp_critical_name lock = { 0 };
5448   //
5449   // void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
5450   //  *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
5451   //  ...
5452   //  *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
5453   //  *(Type<n>-1*)rhs[<n>-1]);
5454   // }
5455   //
5456   // ...
5457   // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
5458   // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5459   // RedList, reduce_func, &<lock>)) {
5460   // case 1:
5461   //  ...
5462   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5463   //  ...
5464   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5465   // break;
5466   // case 2:
5467   //  ...
5468   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5469   //  ...
5470   // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
5471   // break;
5472   // default:;
5473   // }
5474   //
5475   // if SimpleReduction is true, only the next code is generated:
5476   //  ...
5477   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5478   //  ...
5479 
5480   ASTContext &C = CGM.getContext();
5481 
5482   if (SimpleReduction) {
5483     CodeGenFunction::RunCleanupsScope Scope(CGF);
5484     auto IPriv = Privates.begin();
5485     auto ILHS = LHSExprs.begin();
5486     auto IRHS = RHSExprs.begin();
5487     for (const Expr *E : ReductionOps) {
5488       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5489                                   cast<DeclRefExpr>(*IRHS));
5490       ++IPriv;
5491       ++ILHS;
5492       ++IRHS;
5493     }
5494     return;
5495   }
5496 
5497   // 1. Build a list of reduction variables.
5498   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
5499   auto Size = RHSExprs.size();
5500   for (const Expr *E : Privates) {
5501     if (E->getType()->isVariablyModifiedType())
5502       // Reserve place for array size.
5503       ++Size;
5504   }
5505   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
5506   QualType ReductionArrayTy =
5507       C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
5508                              /*IndexTypeQuals=*/0);
5509   Address ReductionList =
5510       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
5511   auto IPriv = Privates.begin();
5512   unsigned Idx = 0;
5513   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5514     Address Elem =
5515       CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize());
5516     CGF.Builder.CreateStore(
5517         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5518             CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
5519         Elem);
5520     if ((*IPriv)->getType()->isVariablyModifiedType()) {
5521       // Store array size.
5522       ++Idx;
5523       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
5524                                              CGF.getPointerSize());
5525       llvm::Value *Size = CGF.Builder.CreateIntCast(
5526           CGF.getVLASize(
5527                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
5528               .NumElts,
5529           CGF.SizeTy, /*isSigned=*/false);
5530       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
5531                               Elem);
5532     }
5533   }
5534 
5535   // 2. Emit reduce_func().
5536   llvm::Value *ReductionFn = emitReductionFunction(
5537       CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
5538       Privates, LHSExprs, RHSExprs, ReductionOps);
5539 
5540   // 3. Create static kmp_critical_name lock = { 0 };
5541   std::string Name = getName({"reduction"});
5542   llvm::Value *Lock = getCriticalRegionLock(Name);
5543 
5544   // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
5545   // RedList, reduce_func, &<lock>);
5546   llvm::Value *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
5547   llvm::Value *ThreadId = getThreadID(CGF, Loc);
5548   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
5549   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
5550       ReductionList.getPointer(), CGF.VoidPtrTy);
5551   llvm::Value *Args[] = {
5552       IdentTLoc,                             // ident_t *<loc>
5553       ThreadId,                              // i32 <gtid>
5554       CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
5555       ReductionArrayTySize,                  // size_type sizeof(RedList)
5556       RL,                                    // void *RedList
5557       ReductionFn, // void (*) (void *, void *) <reduce_func>
5558       Lock         // kmp_critical_name *&<lock>
5559   };
5560   llvm::Value *Res = CGF.EmitRuntimeCall(
5561       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
5562                                        : OMPRTL__kmpc_reduce),
5563       Args);
5564 
5565   // 5. Build switch(res)
5566   llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
5567   llvm::SwitchInst *SwInst =
5568       CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
5569 
5570   // 6. Build case 1:
5571   //  ...
5572   //  <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
5573   //  ...
5574   // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5575   // break;
5576   llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
5577   SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
5578   CGF.EmitBlock(Case1BB);
5579 
5580   // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
5581   llvm::Value *EndArgs[] = {
5582       IdentTLoc, // ident_t *<loc>
5583       ThreadId,  // i32 <gtid>
5584       Lock       // kmp_critical_name *&<lock>
5585   };
5586   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5587                        CodeGenFunction &CGF, PrePostActionTy &Action) {
5588     CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5589     auto IPriv = Privates.begin();
5590     auto ILHS = LHSExprs.begin();
5591     auto IRHS = RHSExprs.begin();
5592     for (const Expr *E : ReductionOps) {
5593       RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
5594                                      cast<DeclRefExpr>(*IRHS));
5595       ++IPriv;
5596       ++ILHS;
5597       ++IRHS;
5598     }
5599   };
5600   RegionCodeGenTy RCG(CodeGen);
5601   CommonActionTy Action(
5602       nullptr, llvm::None,
5603       createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
5604                                        : OMPRTL__kmpc_end_reduce),
5605       EndArgs);
5606   RCG.setAction(Action);
5607   RCG(CGF);
5608 
5609   CGF.EmitBranch(DefaultBB);
5610 
5611   // 7. Build case 2:
5612   //  ...
5613   //  Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
5614   //  ...
5615   // break;
5616   llvm::BasicBlock *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
5617   SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
5618   CGF.EmitBlock(Case2BB);
5619 
5620   auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5621                              CodeGenFunction &CGF, PrePostActionTy &Action) {
5622     auto ILHS = LHSExprs.begin();
5623     auto IRHS = RHSExprs.begin();
5624     auto IPriv = Privates.begin();
5625     for (const Expr *E : ReductionOps) {
5626       const Expr *XExpr = nullptr;
5627       const Expr *EExpr = nullptr;
5628       const Expr *UpExpr = nullptr;
5629       BinaryOperatorKind BO = BO_Comma;
5630       if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
5631         if (BO->getOpcode() == BO_Assign) {
5632           XExpr = BO->getLHS();
5633           UpExpr = BO->getRHS();
5634         }
5635       }
5636       // Try to emit update expression as a simple atomic.
5637       const Expr *RHSExpr = UpExpr;
5638       if (RHSExpr) {
5639         // Analyze RHS part of the whole expression.
5640         if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5641                 RHSExpr->IgnoreParenImpCasts())) {
5642           // If this is a conditional operator, analyze its condition for
5643           // min/max reduction operator.
5644           RHSExpr = ACO->getCond();
5645         }
5646         if (const auto *BORHS =
5647                 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
5648           EExpr = BORHS->getRHS();
5649           BO = BORHS->getOpcode();
5650         }
5651       }
5652       if (XExpr) {
5653         const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5654         auto &&AtomicRedGen = [BO, VD,
5655                                Loc](CodeGenFunction &CGF, const Expr *XExpr,
5656                                     const Expr *EExpr, const Expr *UpExpr) {
5657           LValue X = CGF.EmitLValue(XExpr);
5658           RValue E;
5659           if (EExpr)
5660             E = CGF.EmitAnyExpr(EExpr);
5661           CGF.EmitOMPAtomicSimpleUpdateExpr(
5662               X, E, BO, /*IsXLHSInRHSPart=*/true,
5663               llvm::AtomicOrdering::Monotonic, Loc,
5664               [&CGF, UpExpr, VD, Loc](RValue XRValue) {
5665                 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5666                 PrivateScope.addPrivate(
5667                     VD, [&CGF, VD, XRValue, Loc]() {
5668                       Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5669                       CGF.emitOMPSimpleStore(
5670                           CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
5671                           VD->getType().getNonReferenceType(), Loc);
5672                       return LHSTemp;
5673                     });
5674                 (void)PrivateScope.Privatize();
5675                 return CGF.EmitAnyExpr(UpExpr);
5676               });
5677         };
5678         if ((*IPriv)->getType()->isArrayType()) {
5679           // Emit atomic reduction for array section.
5680           const auto *RHSVar =
5681               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5682           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar,
5683                                     AtomicRedGen, XExpr, EExpr, UpExpr);
5684         } else {
5685           // Emit atomic reduction for array subscript or single variable.
5686           AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5687         }
5688       } else {
5689         // Emit as a critical region.
5690         auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *,
5691                                            const Expr *, const Expr *) {
5692           CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
5693           std::string Name = RT.getName({"atomic_reduction"});
5694           RT.emitCriticalRegion(
5695               CGF, Name,
5696               [=](CodeGenFunction &CGF, PrePostActionTy &Action) {
5697                 Action.Enter(CGF);
5698                 emitReductionCombiner(CGF, E);
5699               },
5700               Loc);
5701         };
5702         if ((*IPriv)->getType()->isArrayType()) {
5703           const auto *LHSVar =
5704               cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
5705           const auto *RHSVar =
5706               cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
5707           EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar,
5708                                     CritRedGen);
5709         } else {
5710           CritRedGen(CGF, nullptr, nullptr, nullptr);
5711         }
5712       }
5713       ++ILHS;
5714       ++IRHS;
5715       ++IPriv;
5716     }
5717   };
5718   RegionCodeGenTy AtomicRCG(AtomicCodeGen);
5719   if (!WithNowait) {
5720     // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
5721     llvm::Value *EndArgs[] = {
5722         IdentTLoc, // ident_t *<loc>
5723         ThreadId,  // i32 <gtid>
5724         Lock       // kmp_critical_name *&<lock>
5725     };
5726     CommonActionTy Action(nullptr, llvm::None,
5727                           createRuntimeFunction(OMPRTL__kmpc_end_reduce),
5728                           EndArgs);
5729     AtomicRCG.setAction(Action);
5730     AtomicRCG(CGF);
5731   } else {
5732     AtomicRCG(CGF);
5733   }
5734 
5735   CGF.EmitBranch(DefaultBB);
5736   CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
5737 }
5738 
5739 /// Generates unique name for artificial threadprivate variables.
5740 /// Format is: <Prefix> "." <Decl_mangled_name> "_" "<Decl_start_loc_raw_enc>"
5741 static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
5742                                       const Expr *Ref) {
5743   SmallString<256> Buffer;
5744   llvm::raw_svector_ostream Out(Buffer);
5745   const clang::DeclRefExpr *DE;
5746   const VarDecl *D = ::getBaseDecl(Ref, DE);
5747   if (!D)
5748     D = cast<VarDecl>(cast<DeclRefExpr>(Ref)->getDecl());
5749   D = D->getCanonicalDecl();
5750   std::string Name = CGM.getOpenMPRuntime().getName(
5751       {D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
5752   Out << Prefix << Name << "_"
5753       << D->getCanonicalDecl()->getLocStart().getRawEncoding();
5754   return Out.str();
5755 }
5756 
5757 /// Emits reduction initializer function:
5758 /// \code
5759 /// void @.red_init(void* %arg) {
5760 /// %0 = bitcast void* %arg to <type>*
5761 /// store <type> <init>, <type>* %0
5762 /// ret void
5763 /// }
5764 /// \endcode
5765 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
5766                                            SourceLocation Loc,
5767                                            ReductionCodeGen &RCG, unsigned N) {
5768   ASTContext &C = CGM.getContext();
5769   FunctionArgList Args;
5770   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5771                           ImplicitParamDecl::Other);
5772   Args.emplace_back(&Param);
5773   const auto &FnInfo =
5774       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5775   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5776   std::string Name = CGM.getOpenMPRuntime().getName({"red_init", ""});
5777   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5778                                     Name, &CGM.getModule());
5779   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5780   Fn->setDoesNotRecurse();
5781   CodeGenFunction CGF(CGM);
5782   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5783   Address PrivateAddr = CGF.EmitLoadOfPointer(
5784       CGF.GetAddrOfLocalVar(&Param),
5785       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5786   llvm::Value *Size = nullptr;
5787   // If the size of the reduction item is non-constant, load it from global
5788   // threadprivate variable.
5789   if (RCG.getSizes(N).second) {
5790     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5791         CGF, CGM.getContext().getSizeType(),
5792         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5793     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5794                                 CGM.getContext().getSizeType(), Loc);
5795   }
5796   RCG.emitAggregateType(CGF, N, Size);
5797   LValue SharedLVal;
5798   // If initializer uses initializer from declare reduction construct, emit a
5799   // pointer to the address of the original reduction item (reuired by reduction
5800   // initializer)
5801   if (RCG.usesReductionInitializer(N)) {
5802     Address SharedAddr =
5803         CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5804             CGF, CGM.getContext().VoidPtrTy,
5805             generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
5806     SharedAddr = CGF.EmitLoadOfPointer(
5807         SharedAddr,
5808         CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
5809     SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
5810   } else {
5811     SharedLVal = CGF.MakeNaturalAlignAddrLValue(
5812         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
5813         CGM.getContext().VoidPtrTy);
5814   }
5815   // Emit the initializer:
5816   // %0 = bitcast void* %arg to <type>*
5817   // store <type> <init>, <type>* %0
5818   RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal,
5819                          [](CodeGenFunction &) { return false; });
5820   CGF.FinishFunction();
5821   return Fn;
5822 }
5823 
5824 /// Emits reduction combiner function:
5825 /// \code
5826 /// void @.red_comb(void* %arg0, void* %arg1) {
5827 /// %lhs = bitcast void* %arg0 to <type>*
5828 /// %rhs = bitcast void* %arg1 to <type>*
5829 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs)
5830 /// store <type> %2, <type>* %lhs
5831 /// ret void
5832 /// }
5833 /// \endcode
5834 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
5835                                            SourceLocation Loc,
5836                                            ReductionCodeGen &RCG, unsigned N,
5837                                            const Expr *ReductionOp,
5838                                            const Expr *LHS, const Expr *RHS,
5839                                            const Expr *PrivateRef) {
5840   ASTContext &C = CGM.getContext();
5841   const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl());
5842   const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
5843   FunctionArgList Args;
5844   ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
5845                                C.VoidPtrTy, ImplicitParamDecl::Other);
5846   ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5847                             ImplicitParamDecl::Other);
5848   Args.emplace_back(&ParamInOut);
5849   Args.emplace_back(&ParamIn);
5850   const auto &FnInfo =
5851       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5852   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5853   std::string Name = CGM.getOpenMPRuntime().getName({"red_comb", ""});
5854   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5855                                     Name, &CGM.getModule());
5856   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5857   Fn->setDoesNotRecurse();
5858   CodeGenFunction CGF(CGM);
5859   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5860   llvm::Value *Size = nullptr;
5861   // If the size of the reduction item is non-constant, load it from global
5862   // threadprivate variable.
5863   if (RCG.getSizes(N).second) {
5864     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5865         CGF, CGM.getContext().getSizeType(),
5866         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5867     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5868                                 CGM.getContext().getSizeType(), Loc);
5869   }
5870   RCG.emitAggregateType(CGF, N, Size);
5871   // Remap lhs and rhs variables to the addresses of the function arguments.
5872   // %lhs = bitcast void* %arg0 to <type>*
5873   // %rhs = bitcast void* %arg1 to <type>*
5874   CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
5875   PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
5876     // Pull out the pointer to the variable.
5877     Address PtrAddr = CGF.EmitLoadOfPointer(
5878         CGF.GetAddrOfLocalVar(&ParamInOut),
5879         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5880     return CGF.Builder.CreateElementBitCast(
5881         PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
5882   });
5883   PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
5884     // Pull out the pointer to the variable.
5885     Address PtrAddr = CGF.EmitLoadOfPointer(
5886         CGF.GetAddrOfLocalVar(&ParamIn),
5887         C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5888     return CGF.Builder.CreateElementBitCast(
5889         PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
5890   });
5891   PrivateScope.Privatize();
5892   // Emit the combiner body:
5893   // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
5894   // store <type> %2, <type>* %lhs
5895   CGM.getOpenMPRuntime().emitSingleReductionCombiner(
5896       CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS),
5897       cast<DeclRefExpr>(RHS));
5898   CGF.FinishFunction();
5899   return Fn;
5900 }
5901 
5902 /// Emits reduction finalizer function:
5903 /// \code
5904 /// void @.red_fini(void* %arg) {
5905 /// %0 = bitcast void* %arg to <type>*
5906 /// <destroy>(<type>* %0)
5907 /// ret void
5908 /// }
5909 /// \endcode
5910 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
5911                                            SourceLocation Loc,
5912                                            ReductionCodeGen &RCG, unsigned N) {
5913   if (!RCG.needCleanups(N))
5914     return nullptr;
5915   ASTContext &C = CGM.getContext();
5916   FunctionArgList Args;
5917   ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
5918                           ImplicitParamDecl::Other);
5919   Args.emplace_back(&Param);
5920   const auto &FnInfo =
5921       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
5922   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
5923   std::string Name = CGM.getOpenMPRuntime().getName({"red_fini", ""});
5924   auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5925                                     Name, &CGM.getModule());
5926   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FnInfo);
5927   Fn->setDoesNotRecurse();
5928   CodeGenFunction CGF(CGM);
5929   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
5930   Address PrivateAddr = CGF.EmitLoadOfPointer(
5931       CGF.GetAddrOfLocalVar(&Param),
5932       C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
5933   llvm::Value *Size = nullptr;
5934   // If the size of the reduction item is non-constant, load it from global
5935   // threadprivate variable.
5936   if (RCG.getSizes(N).second) {
5937     Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate(
5938         CGF, CGM.getContext().getSizeType(),
5939         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
5940     Size = CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false,
5941                                 CGM.getContext().getSizeType(), Loc);
5942   }
5943   RCG.emitAggregateType(CGF, N, Size);
5944   // Emit the finalizer body:
5945   // <destroy>(<type>* %0)
5946   RCG.emitCleanups(CGF, N, PrivateAddr);
5947   CGF.FinishFunction();
5948   return Fn;
5949 }
5950 
5951 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
5952     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
5953     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
5954   if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty())
5955     return nullptr;
5956 
5957   // Build typedef struct:
5958   // kmp_task_red_input {
5959   //   void *reduce_shar; // shared reduction item
5960   //   size_t reduce_size; // size of data item
5961   //   void *reduce_init; // data initialization routine
5962   //   void *reduce_fini; // data finalization routine
5963   //   void *reduce_comb; // data combiner routine
5964   //   kmp_task_red_flags_t flags; // flags for additional info from compiler
5965   // } kmp_task_red_input_t;
5966   ASTContext &C = CGM.getContext();
5967   RecordDecl *RD = C.buildImplicitRecord("kmp_task_red_input_t");
5968   RD->startDefinition();
5969   const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5970   const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType());
5971   const FieldDecl *InitFD  = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5972   const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5973   const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy);
5974   const FieldDecl *FlagsFD = addFieldToRecordDecl(
5975       C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
5976   RD->completeDefinition();
5977   QualType RDType = C.getRecordType(RD);
5978   unsigned Size = Data.ReductionVars.size();
5979   llvm::APInt ArraySize(/*numBits=*/64, Size);
5980   QualType ArrayRDType = C.getConstantArrayType(
5981       RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
5982   // kmp_task_red_input_t .rd_input.[Size];
5983   Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
5984   ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies,
5985                        Data.ReductionOps);
5986   for (unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5987     // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt];
5988     llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
5989                            llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
5990     llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
5991         TaskRedInput.getPointer(), Idxs,
5992         /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
5993         ".rd_input.gep.");
5994     LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
5995     // ElemLVal.reduce_shar = &Shareds[Cnt];
5996     LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
5997     RCG.emitSharedLValue(CGF, Cnt);
5998     llvm::Value *CastedShared =
5999         CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer());
6000     CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
6001     RCG.emitAggregateType(CGF, Cnt);
6002     llvm::Value *SizeValInChars;
6003     llvm::Value *SizeVal;
6004     std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt);
6005     // We use delayed creation/initialization for VLAs, array sections and
6006     // custom reduction initializations. It is required because runtime does not
6007     // provide the way to pass the sizes of VLAs/array sections to
6008     // initializer/combiner/finalizer functions and does not pass the pointer to
6009     // original reduction item to the initializer. Instead threadprivate global
6010     // variables are used to store these values and use them in the functions.
6011     bool DelayedCreation = !!SizeVal;
6012     SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy,
6013                                                /*isSigned=*/false);
6014     LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD);
6015     CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
6016     // ElemLVal.reduce_init = init;
6017     LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
6018     llvm::Value *InitAddr =
6019         CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
6020     CGF.EmitStoreOfScalar(InitAddr, InitLVal);
6021     DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt);
6022     // ElemLVal.reduce_fini = fini;
6023     LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
6024     llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
6025     llvm::Value *FiniAddr = Fini
6026                                 ? CGF.EmitCastToVoidPtr(Fini)
6027                                 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
6028     CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
6029     // ElemLVal.reduce_comb = comb;
6030     LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
6031     llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
6032         CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
6033         RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
6034     CGF.EmitStoreOfScalar(CombAddr, CombLVal);
6035     // ElemLVal.flags = 0;
6036     LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
6037     if (DelayedCreation) {
6038       CGF.EmitStoreOfScalar(
6039           llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true),
6040           FlagsLVal);
6041     } else
6042       CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
6043   }
6044   // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void
6045   // *data);
6046   llvm::Value *Args[] = {
6047       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6048                                 /*isSigned=*/true),
6049       llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true),
6050       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(),
6051                                                       CGM.VoidPtrTy)};
6052   return CGF.EmitRuntimeCall(
6053       createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args);
6054 }
6055 
6056 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
6057                                               SourceLocation Loc,
6058                                               ReductionCodeGen &RCG,
6059                                               unsigned N) {
6060   auto Sizes = RCG.getSizes(N);
6061   // Emit threadprivate global variable if the type is non-constant
6062   // (Sizes.second = nullptr).
6063   if (Sizes.second) {
6064     llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy,
6065                                                      /*isSigned=*/false);
6066     Address SizeAddr = getAddrOfArtificialThreadPrivate(
6067         CGF, CGM.getContext().getSizeType(),
6068         generateUniqueName(CGM, "reduction_size", RCG.getRefExpr(N)));
6069     CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false);
6070   }
6071   // Store address of the original reduction item if custom initializer is used.
6072   if (RCG.usesReductionInitializer(N)) {
6073     Address SharedAddr = getAddrOfArtificialThreadPrivate(
6074         CGF, CGM.getContext().VoidPtrTy,
6075         generateUniqueName(CGM, "reduction", RCG.getRefExpr(N)));
6076     CGF.Builder.CreateStore(
6077         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
6078             RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy),
6079         SharedAddr, /*IsVolatile=*/false);
6080   }
6081 }
6082 
6083 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
6084                                               SourceLocation Loc,
6085                                               llvm::Value *ReductionsPtr,
6086                                               LValue SharedLVal) {
6087   // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void
6088   // *d);
6089   llvm::Value *Args[] = {
6090       CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy,
6091                                 /*isSigned=*/true),
6092       ReductionsPtr,
6093       CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(),
6094                                                       CGM.VoidPtrTy)};
6095   return Address(
6096       CGF.EmitRuntimeCall(
6097           createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args),
6098       SharedLVal.getAlignment());
6099 }
6100 
6101 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
6102                                        SourceLocation Loc) {
6103   if (!CGF.HaveInsertPoint())
6104     return;
6105   // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
6106   // global_tid);
6107   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
6108   // Ignore return result until untied tasks are supported.
6109   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
6110   if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
6111     Region->emitUntiedSwitch(CGF);
6112 }
6113 
6114 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
6115                                            OpenMPDirectiveKind InnerKind,
6116                                            const RegionCodeGenTy &CodeGen,
6117                                            bool HasCancel) {
6118   if (!CGF.HaveInsertPoint())
6119     return;
6120   InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
6121   CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
6122 }
6123 
6124 namespace {
6125 enum RTCancelKind {
6126   CancelNoreq = 0,
6127   CancelParallel = 1,
6128   CancelLoop = 2,
6129   CancelSections = 3,
6130   CancelTaskgroup = 4
6131 };
6132 } // anonymous namespace
6133 
6134 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
6135   RTCancelKind CancelKind = CancelNoreq;
6136   if (CancelRegion == OMPD_parallel)
6137     CancelKind = CancelParallel;
6138   else if (CancelRegion == OMPD_for)
6139     CancelKind = CancelLoop;
6140   else if (CancelRegion == OMPD_sections)
6141     CancelKind = CancelSections;
6142   else {
6143     assert(CancelRegion == OMPD_taskgroup);
6144     CancelKind = CancelTaskgroup;
6145   }
6146   return CancelKind;
6147 }
6148 
6149 void CGOpenMPRuntime::emitCancellationPointCall(
6150     CodeGenFunction &CGF, SourceLocation Loc,
6151     OpenMPDirectiveKind CancelRegion) {
6152   if (!CGF.HaveInsertPoint())
6153     return;
6154   // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
6155   // global_tid, kmp_int32 cncl_kind);
6156   if (auto *OMPRegionInfo =
6157           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6158     // For 'cancellation point taskgroup', the task region info may not have a
6159     // cancel. This may instead happen in another adjacent task.
6160     if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6161       llvm::Value *Args[] = {
6162           emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
6163           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6164       // Ignore return result until untied tasks are supported.
6165       llvm::Value *Result = CGF.EmitRuntimeCall(
6166           createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
6167       // if (__kmpc_cancellationpoint()) {
6168       //   exit from construct;
6169       // }
6170       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6171       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6172       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6173       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6174       CGF.EmitBlock(ExitBB);
6175       // exit from construct;
6176       CodeGenFunction::JumpDest CancelDest =
6177           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6178       CGF.EmitBranchThroughCleanup(CancelDest);
6179       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6180     }
6181   }
6182 }
6183 
6184 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
6185                                      const Expr *IfCond,
6186                                      OpenMPDirectiveKind CancelRegion) {
6187   if (!CGF.HaveInsertPoint())
6188     return;
6189   // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
6190   // kmp_int32 cncl_kind);
6191   if (auto *OMPRegionInfo =
6192           dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
6193     auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF,
6194                                                         PrePostActionTy &) {
6195       CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
6196       llvm::Value *Args[] = {
6197           RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc),
6198           CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
6199       // Ignore return result until untied tasks are supported.
6200       llvm::Value *Result = CGF.EmitRuntimeCall(
6201           RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
6202       // if (__kmpc_cancel()) {
6203       //   exit from construct;
6204       // }
6205       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
6206       llvm::BasicBlock *ContBB = CGF.createBasicBlock(".cancel.continue");
6207       llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
6208       CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6209       CGF.EmitBlock(ExitBB);
6210       // exit from construct;
6211       CodeGenFunction::JumpDest CancelDest =
6212           CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6213       CGF.EmitBranchThroughCleanup(CancelDest);
6214       CGF.EmitBlock(ContBB, /*IsFinished=*/true);
6215     };
6216     if (IfCond) {
6217       emitOMPIfClause(CGF, IfCond, ThenGen,
6218                       [](CodeGenFunction &, PrePostActionTy &) {});
6219     } else {
6220       RegionCodeGenTy ThenRCG(ThenGen);
6221       ThenRCG(CGF);
6222     }
6223   }
6224 }
6225 
6226 void CGOpenMPRuntime::emitTargetOutlinedFunction(
6227     const OMPExecutableDirective &D, StringRef ParentName,
6228     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6229     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6230   assert(!ParentName.empty() && "Invalid target region parent name!");
6231   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
6232                                    IsOffloadEntry, CodeGen);
6233 }
6234 
6235 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
6236     const OMPExecutableDirective &D, StringRef ParentName,
6237     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6238     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
6239   // Create a unique name for the entry function using the source location
6240   // information of the current target region. The name will be something like:
6241   //
6242   // __omp_offloading_DD_FFFF_PP_lBB
6243   //
6244   // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
6245   // mangled name of the function that encloses the target region and BB is the
6246   // line number of the target region.
6247 
6248   unsigned DeviceID;
6249   unsigned FileID;
6250   unsigned Line;
6251   getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
6252                            Line);
6253   SmallString<64> EntryFnName;
6254   {
6255     llvm::raw_svector_ostream OS(EntryFnName);
6256     OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
6257        << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
6258   }
6259 
6260   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6261 
6262   CodeGenFunction CGF(CGM, true);
6263   CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
6264   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6265 
6266   OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
6267 
6268   // If this target outline function is not an offload entry, we don't need to
6269   // register it.
6270   if (!IsOffloadEntry)
6271     return;
6272 
6273   // The target region ID is used by the runtime library to identify the current
6274   // target region, so it only has to be unique and not necessarily point to
6275   // anything. It could be the pointer to the outlined function that implements
6276   // the target region, but we aren't using that so that the compiler doesn't
6277   // need to keep that, and could therefore inline the host function if proven
6278   // worthwhile during optimization. In the other hand, if emitting code for the
6279   // device, the ID has to be the function address so that it can retrieved from
6280   // the offloading entry and launched by the runtime library. We also mark the
6281   // outlined function to have external linkage in case we are emitting code for
6282   // the device, because these functions will be entry points to the device.
6283 
6284   if (CGM.getLangOpts().OpenMPIsDevice) {
6285     OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
6286     OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
6287     OutlinedFn->setDSOLocal(false);
6288   } else {
6289     std::string Name = getName({EntryFnName, "region_id"});
6290     OutlinedFnID = new llvm::GlobalVariable(
6291         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
6292         llvm::GlobalValue::WeakAnyLinkage,
6293         llvm::Constant::getNullValue(CGM.Int8Ty), Name);
6294   }
6295 
6296   // Register the information for the entry associated with this target region.
6297   OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
6298       DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
6299       OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
6300 }
6301 
6302 /// discard all CompoundStmts intervening between two constructs
6303 static const Stmt *ignoreCompoundStmts(const Stmt *Body) {
6304   while (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body))
6305     Body = CS->body_front();
6306 
6307   return Body;
6308 }
6309 
6310 /// Emit the number of teams for a target directive.  Inspect the num_teams
6311 /// clause associated with a teams construct combined or closely nested
6312 /// with the target directive.
6313 ///
6314 /// Emit a team of size one for directives such as 'target parallel' that
6315 /// have no associated teams construct.
6316 ///
6317 /// Otherwise, return nullptr.
6318 static llvm::Value *
6319 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6320                                CodeGenFunction &CGF,
6321                                const OMPExecutableDirective &D) {
6322   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6323                                               "teams directive expected to be "
6324                                               "emitted only for the host!");
6325 
6326   CGBuilderTy &Bld = CGF.Builder;
6327 
6328   // If the target directive is combined with a teams directive:
6329   //   Return the value in the num_teams clause, if any.
6330   //   Otherwise, return 0 to denote the runtime default.
6331   if (isOpenMPTeamsDirective(D.getDirectiveKind())) {
6332     if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) {
6333       CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
6334       llvm::Value *NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(),
6335                                                  /*IgnoreResultAssign*/ true);
6336       return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6337                                /*IsSigned=*/true);
6338     }
6339 
6340     // The default value is 0.
6341     return Bld.getInt32(0);
6342   }
6343 
6344   // If the target directive is combined with a parallel directive but not a
6345   // teams directive, start one team.
6346   if (isOpenMPParallelDirective(D.getDirectiveKind()))
6347     return Bld.getInt32(1);
6348 
6349   // If the current target region has a teams region enclosed, we need to get
6350   // the number of teams to pass to the runtime function call. This is done
6351   // by generating the expression in a inlined region. This is required because
6352   // the expression is captured in the enclosing target environment when the
6353   // teams directive is not combined with target.
6354 
6355   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6356 
6357   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6358           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6359     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6360       if (const auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) {
6361         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6362         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6363         llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams());
6364         return Bld.CreateIntCast(NumTeams, CGF.Int32Ty,
6365                                  /*IsSigned=*/true);
6366       }
6367 
6368       // If we have an enclosed teams directive but no num_teams clause we use
6369       // the default value 0.
6370       return Bld.getInt32(0);
6371     }
6372   }
6373 
6374   // No teams associated with the directive.
6375   return nullptr;
6376 }
6377 
6378 /// Emit the number of threads for a target directive.  Inspect the
6379 /// thread_limit clause associated with a teams construct combined or closely
6380 /// nested with the target directive.
6381 ///
6382 /// Emit the num_threads clause for directives such as 'target parallel' that
6383 /// have no associated teams construct.
6384 ///
6385 /// Otherwise, return nullptr.
6386 static llvm::Value *
6387 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime,
6388                                  CodeGenFunction &CGF,
6389                                  const OMPExecutableDirective &D) {
6390   assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the "
6391                                               "teams directive expected to be "
6392                                               "emitted only for the host!");
6393 
6394   CGBuilderTy &Bld = CGF.Builder;
6395 
6396   //
6397   // If the target directive is combined with a teams directive:
6398   //   Return the value in the thread_limit clause, if any.
6399   //
6400   // If the target directive is combined with a parallel directive:
6401   //   Return the value in the num_threads clause, if any.
6402   //
6403   // If both clauses are set, select the minimum of the two.
6404   //
6405   // If neither teams or parallel combined directives set the number of threads
6406   // in a team, return 0 to denote the runtime default.
6407   //
6408   // If this is not a teams directive return nullptr.
6409 
6410   if (isOpenMPTeamsDirective(D.getDirectiveKind()) ||
6411       isOpenMPParallelDirective(D.getDirectiveKind())) {
6412     llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0);
6413     llvm::Value *NumThreadsVal = nullptr;
6414     llvm::Value *ThreadLimitVal = nullptr;
6415 
6416     if (const auto *ThreadLimitClause =
6417             D.getSingleClause<OMPThreadLimitClause>()) {
6418       CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
6419       llvm::Value *ThreadLimit =
6420           CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(),
6421                              /*IgnoreResultAssign*/ true);
6422       ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6423                                          /*IsSigned=*/true);
6424     }
6425 
6426     if (const auto *NumThreadsClause =
6427             D.getSingleClause<OMPNumThreadsClause>()) {
6428       CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
6429       llvm::Value *NumThreads =
6430           CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
6431                              /*IgnoreResultAssign*/ true);
6432       NumThreadsVal =
6433           Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true);
6434     }
6435 
6436     // Select the lesser of thread_limit and num_threads.
6437     if (NumThreadsVal)
6438       ThreadLimitVal = ThreadLimitVal
6439                            ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal,
6440                                                                 ThreadLimitVal),
6441                                               NumThreadsVal, ThreadLimitVal)
6442                            : NumThreadsVal;
6443 
6444     // Set default value passed to the runtime if either teams or a target
6445     // parallel type directive is found but no clause is specified.
6446     if (!ThreadLimitVal)
6447       ThreadLimitVal = DefaultThreadLimitVal;
6448 
6449     return ThreadLimitVal;
6450   }
6451 
6452   // If the current target region has a teams region enclosed, we need to get
6453   // the thread limit to pass to the runtime function call. This is done
6454   // by generating the expression in a inlined region. This is required because
6455   // the expression is captured in the enclosing target environment when the
6456   // teams directive is not combined with target.
6457 
6458   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6459 
6460   if (const auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>(
6461           ignoreCompoundStmts(CS.getCapturedStmt()))) {
6462     if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) {
6463       if (const auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) {
6464         CGOpenMPInnerExprInfo CGInfo(CGF, CS);
6465         CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
6466         llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit());
6467         return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty,
6468                                          /*IsSigned=*/true);
6469       }
6470 
6471       // If we have an enclosed teams directive but no thread_limit clause we
6472       // use the default value 0.
6473       return CGF.Builder.getInt32(0);
6474     }
6475   }
6476 
6477   // No teams associated with the directive.
6478   return nullptr;
6479 }
6480 
6481 namespace {
6482 // Utility to handle information from clauses associated with a given
6483 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause).
6484 // It provides a convenient interface to obtain the information and generate
6485 // code for that information.
6486 class MappableExprsHandler {
6487 public:
6488   /// Values for bit flags used to specify the mapping type for
6489   /// offloading.
6490   enum OpenMPOffloadMappingFlags {
6491     /// Allocate memory on the device and move data from host to device.
6492     OMP_MAP_TO = 0x01,
6493     /// Allocate memory on the device and move data from device to host.
6494     OMP_MAP_FROM = 0x02,
6495     /// Always perform the requested mapping action on the element, even
6496     /// if it was already mapped before.
6497     OMP_MAP_ALWAYS = 0x04,
6498     /// Delete the element from the device environment, ignoring the
6499     /// current reference count associated with the element.
6500     OMP_MAP_DELETE = 0x08,
6501     /// The element being mapped is a pointer-pointee pair; both the
6502     /// pointer and the pointee should be mapped.
6503     OMP_MAP_PTR_AND_OBJ = 0x10,
6504     /// This flags signals that the base address of an entry should be
6505     /// passed to the target kernel as an argument.
6506     OMP_MAP_TARGET_PARAM = 0x20,
6507     /// Signal that the runtime library has to return the device pointer
6508     /// in the current position for the data being mapped. Used when we have the
6509     /// use_device_ptr clause.
6510     OMP_MAP_RETURN_PARAM = 0x40,
6511     /// This flag signals that the reference being passed is a pointer to
6512     /// private data.
6513     OMP_MAP_PRIVATE = 0x80,
6514     /// Pass the element to the device by value.
6515     OMP_MAP_LITERAL = 0x100,
6516     /// Implicit map
6517     OMP_MAP_IMPLICIT = 0x200,
6518   };
6519 
6520   /// Class that associates information with a base pointer to be passed to the
6521   /// runtime library.
6522   class BasePointerInfo {
6523     /// The base pointer.
6524     llvm::Value *Ptr = nullptr;
6525     /// The base declaration that refers to this device pointer, or null if
6526     /// there is none.
6527     const ValueDecl *DevPtrDecl = nullptr;
6528 
6529   public:
6530     BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
6531         : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
6532     llvm::Value *operator*() const { return Ptr; }
6533     const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
6534     void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
6535   };
6536 
6537   typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy;
6538   typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy;
6539   typedef SmallVector<uint64_t, 16> MapFlagsArrayTy;
6540 
6541 private:
6542   /// Directive from where the map clauses were extracted.
6543   const OMPExecutableDirective &CurDir;
6544 
6545   /// Function the directive is being generated for.
6546   CodeGenFunction &CGF;
6547 
6548   /// Set of all first private variables in the current directive.
6549   llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
6550   /// Set of all reduction variables in the current directive.
6551   llvm::SmallPtrSet<const VarDecl *, 8> ReductionDecls;
6552 
6553   /// Map between device pointer declarations and their expression components.
6554   /// The key value for declarations in 'this' is null.
6555   llvm::DenseMap<
6556       const ValueDecl *,
6557       SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
6558       DevPointersMap;
6559 
6560   llvm::Value *getExprTypeSize(const Expr *E) const {
6561     QualType ExprTy = E->getType().getCanonicalType();
6562 
6563     // Reference types are ignored for mapping purposes.
6564     if (const auto *RefTy = ExprTy->getAs<ReferenceType>())
6565       ExprTy = RefTy->getPointeeType().getCanonicalType();
6566 
6567     // Given that an array section is considered a built-in type, we need to
6568     // do the calculation based on the length of the section instead of relying
6569     // on CGF.getTypeSize(E->getType()).
6570     if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) {
6571       QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(
6572                             OAE->getBase()->IgnoreParenImpCasts())
6573                             .getCanonicalType();
6574 
6575       // If there is no length associated with the expression, that means we
6576       // are using the whole length of the base.
6577       if (!OAE->getLength() && OAE->getColonLoc().isValid())
6578         return CGF.getTypeSize(BaseTy);
6579 
6580       llvm::Value *ElemSize;
6581       if (const auto *PTy = BaseTy->getAs<PointerType>()) {
6582         ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType());
6583       } else {
6584         const auto *ATy = cast<ArrayType>(BaseTy.getTypePtr());
6585         assert(ATy && "Expecting array type if not a pointer type.");
6586         ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType());
6587       }
6588 
6589       // If we don't have a length at this point, that is because we have an
6590       // array section with a single element.
6591       if (!OAE->getLength())
6592         return ElemSize;
6593 
6594       llvm::Value *LengthVal = CGF.EmitScalarExpr(OAE->getLength());
6595       LengthVal =
6596           CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false);
6597       return CGF.Builder.CreateNUWMul(LengthVal, ElemSize);
6598     }
6599     return CGF.getTypeSize(ExprTy);
6600   }
6601 
6602   /// Return the corresponding bits for a given map clause modifier. Add
6603   /// a flag marking the map as a pointer if requested. Add a flag marking the
6604   /// map as the first one of a series of maps that relate to the same map
6605   /// expression.
6606   uint64_t getMapTypeBits(OpenMPMapClauseKind MapType,
6607                           OpenMPMapClauseKind MapTypeModifier, bool AddPtrFlag,
6608                           bool AddIsTargetParamFlag) const {
6609     uint64_t Bits = 0u;
6610     switch (MapType) {
6611     case OMPC_MAP_alloc:
6612     case OMPC_MAP_release:
6613       // alloc and release is the default behavior in the runtime library,  i.e.
6614       // if we don't pass any bits alloc/release that is what the runtime is
6615       // going to do. Therefore, we don't need to signal anything for these two
6616       // type modifiers.
6617       break;
6618     case OMPC_MAP_to:
6619       Bits = OMP_MAP_TO;
6620       break;
6621     case OMPC_MAP_from:
6622       Bits = OMP_MAP_FROM;
6623       break;
6624     case OMPC_MAP_tofrom:
6625       Bits = OMP_MAP_TO | OMP_MAP_FROM;
6626       break;
6627     case OMPC_MAP_delete:
6628       Bits = OMP_MAP_DELETE;
6629       break;
6630     default:
6631       llvm_unreachable("Unexpected map type!");
6632       break;
6633     }
6634     if (AddPtrFlag)
6635       Bits |= OMP_MAP_PTR_AND_OBJ;
6636     if (AddIsTargetParamFlag)
6637       Bits |= OMP_MAP_TARGET_PARAM;
6638     if (MapTypeModifier == OMPC_MAP_always)
6639       Bits |= OMP_MAP_ALWAYS;
6640     return Bits;
6641   }
6642 
6643   /// Return true if the provided expression is a final array section. A
6644   /// final array section, is one whose length can't be proved to be one.
6645   bool isFinalArraySectionExpression(const Expr *E) const {
6646     const auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
6647 
6648     // It is not an array section and therefore not a unity-size one.
6649     if (!OASE)
6650       return false;
6651 
6652     // An array section with no colon always refer to a single element.
6653     if (OASE->getColonLoc().isInvalid())
6654       return false;
6655 
6656     const Expr *Length = OASE->getLength();
6657 
6658     // If we don't have a length we have to check if the array has size 1
6659     // for this dimension. Also, we should always expect a length if the
6660     // base type is pointer.
6661     if (!Length) {
6662       QualType BaseQTy = OMPArraySectionExpr::getBaseOriginalType(
6663                              OASE->getBase()->IgnoreParenImpCasts())
6664                              .getCanonicalType();
6665       if (const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr()))
6666         return ATy->getSize().getSExtValue() != 1;
6667       // If we don't have a constant dimension length, we have to consider
6668       // the current section as having any size, so it is not necessarily
6669       // unitary. If it happen to be unity size, that's user fault.
6670       return true;
6671     }
6672 
6673     // Check if the length evaluates to 1.
6674     llvm::APSInt ConstLength;
6675     if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
6676       return true; // Can have more that size 1.
6677 
6678     return ConstLength.getSExtValue() != 1;
6679   }
6680 
6681   /// Return the adjusted map modifiers if the declaration a capture
6682   /// refers to appears in a first-private clause. This is expected to be used
6683   /// only with directives that start with 'target'.
6684   unsigned adjustMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap,
6685                                                unsigned CurrentModifiers) {
6686     assert(Cap.capturesVariable() && "Expected capture by reference only!");
6687 
6688     // A first private variable captured by reference will use only the
6689     // 'private ptr' and 'map to' flag. Return the right flags if the captured
6690     // declaration is known as first-private in this handler.
6691     if (FirstPrivateDecls.count(Cap.getCapturedVar()))
6692       return MappableExprsHandler::OMP_MAP_PRIVATE |
6693              MappableExprsHandler::OMP_MAP_TO;
6694     // Reduction variable  will use only the 'private ptr' and 'map to_from'
6695     // flag.
6696     if (ReductionDecls.count(Cap.getCapturedVar())) {
6697       return MappableExprsHandler::OMP_MAP_TO |
6698              MappableExprsHandler::OMP_MAP_FROM;
6699     }
6700 
6701     // We didn't modify anything.
6702     return CurrentModifiers;
6703   }
6704 
6705 public:
6706   MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
6707       : CurDir(Dir), CGF(CGF) {
6708     // Extract firstprivate clause information.
6709     for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
6710       for (const Expr *D : C->varlists())
6711         FirstPrivateDecls.insert(
6712             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
6713     for (const auto *C : Dir.getClausesOfKind<OMPReductionClause>()) {
6714       for (const Expr *D : C->varlists()) {
6715         ReductionDecls.insert(
6716             cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
6717       }
6718     }
6719     // Extract device pointer clause information.
6720     for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
6721       for (const auto &L : C->component_lists())
6722         DevPointersMap[L.first].push_back(L.second);
6723   }
6724 
6725   /// Generate the base pointers, section pointers, sizes and map type
6726   /// bits for the provided map type, map modifier, and expression components.
6727   /// \a IsFirstComponent should be set to true if the provided set of
6728   /// components is the first associated with a capture.
6729   void generateInfoForComponentList(
6730       OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
6731       OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
6732       MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
6733       MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
6734       bool IsFirstComponentList, bool IsImplicit) const {
6735 
6736     // The following summarizes what has to be generated for each map and the
6737     // types below. The generated information is expressed in this order:
6738     // base pointer, section pointer, size, flags
6739     // (to add to the ones that come from the map type and modifier).
6740     //
6741     // double d;
6742     // int i[100];
6743     // float *p;
6744     //
6745     // struct S1 {
6746     //   int i;
6747     //   float f[50];
6748     // }
6749     // struct S2 {
6750     //   int i;
6751     //   float f[50];
6752     //   S1 s;
6753     //   double *p;
6754     //   struct S2 *ps;
6755     // }
6756     // S2 s;
6757     // S2 *ps;
6758     //
6759     // map(d)
6760     // &d, &d, sizeof(double), noflags
6761     //
6762     // map(i)
6763     // &i, &i, 100*sizeof(int), noflags
6764     //
6765     // map(i[1:23])
6766     // &i(=&i[0]), &i[1], 23*sizeof(int), noflags
6767     //
6768     // map(p)
6769     // &p, &p, sizeof(float*), noflags
6770     //
6771     // map(p[1:24])
6772     // p, &p[1], 24*sizeof(float), noflags
6773     //
6774     // map(s)
6775     // &s, &s, sizeof(S2), noflags
6776     //
6777     // map(s.i)
6778     // &s, &(s.i), sizeof(int), noflags
6779     //
6780     // map(s.s.f)
6781     // &s, &(s.i.f), 50*sizeof(int), noflags
6782     //
6783     // map(s.p)
6784     // &s, &(s.p), sizeof(double*), noflags
6785     //
6786     // map(s.p[:22], s.a s.b)
6787     // &s, &(s.p), sizeof(double*), noflags
6788     // &(s.p), &(s.p[0]), 22*sizeof(double), ptr_flag
6789     //
6790     // map(s.ps)
6791     // &s, &(s.ps), sizeof(S2*), noflags
6792     //
6793     // map(s.ps->s.i)
6794     // &s, &(s.ps), sizeof(S2*), noflags
6795     // &(s.ps), &(s.ps->s.i), sizeof(int), ptr_flag
6796     //
6797     // map(s.ps->ps)
6798     // &s, &(s.ps), sizeof(S2*), noflags
6799     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6800     //
6801     // map(s.ps->ps->ps)
6802     // &s, &(s.ps), sizeof(S2*), noflags
6803     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6804     // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), ptr_flag
6805     //
6806     // map(s.ps->ps->s.f[:22])
6807     // &s, &(s.ps), sizeof(S2*), noflags
6808     // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag
6809     // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
6810     //
6811     // map(ps)
6812     // &ps, &ps, sizeof(S2*), noflags
6813     //
6814     // map(ps->i)
6815     // ps, &(ps->i), sizeof(int), noflags
6816     //
6817     // map(ps->s.f)
6818     // ps, &(ps->s.f[0]), 50*sizeof(float), noflags
6819     //
6820     // map(ps->p)
6821     // ps, &(ps->p), sizeof(double*), noflags
6822     //
6823     // map(ps->p[:22])
6824     // ps, &(ps->p), sizeof(double*), noflags
6825     // &(ps->p), &(ps->p[0]), 22*sizeof(double), ptr_flag
6826     //
6827     // map(ps->ps)
6828     // ps, &(ps->ps), sizeof(S2*), noflags
6829     //
6830     // map(ps->ps->s.i)
6831     // ps, &(ps->ps), sizeof(S2*), noflags
6832     // &(ps->ps), &(ps->ps->s.i), sizeof(int), ptr_flag
6833     //
6834     // map(ps->ps->ps)
6835     // ps, &(ps->ps), sizeof(S2*), noflags
6836     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6837     //
6838     // map(ps->ps->ps->ps)
6839     // ps, &(ps->ps), sizeof(S2*), noflags
6840     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6841     // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), ptr_flag
6842     //
6843     // map(ps->ps->ps->s.f[:22])
6844     // ps, &(ps->ps), sizeof(S2*), noflags
6845     // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag
6846     // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), ptr_flag
6847 
6848     // Track if the map information being generated is the first for a capture.
6849     bool IsCaptureFirstInfo = IsFirstComponentList;
6850     bool IsLink = false; // Is this variable a "declare target link"?
6851 
6852     // Scan the components from the base to the complete expression.
6853     auto CI = Components.rbegin();
6854     auto CE = Components.rend();
6855     auto I = CI;
6856 
6857     // Track if the map information being generated is the first for a list of
6858     // components.
6859     bool IsExpressionFirstInfo = true;
6860     llvm::Value *BP = nullptr;
6861 
6862     if (const auto *ME = dyn_cast<MemberExpr>(I->getAssociatedExpression())) {
6863       // The base is the 'this' pointer. The content of the pointer is going
6864       // to be the base of the field being mapped.
6865       BP = CGF.EmitScalarExpr(ME->getBase());
6866     } else {
6867       // The base is the reference to the variable.
6868       // BP = &Var.
6869       BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
6870       if (const auto *VD =
6871               dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
6872         if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
6873             isDeclareTargetDeclaration(VD))
6874           if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
6875             IsLink = true;
6876             BP = CGF.CGM.getOpenMPRuntime()
6877                      .getAddrOfDeclareTargetLink(VD)
6878                      .getPointer();
6879           }
6880       }
6881 
6882       // If the variable is a pointer and is being dereferenced (i.e. is not
6883       // the last component), the base has to be the pointer itself, not its
6884       // reference. References are ignored for mapping purposes.
6885       QualType Ty =
6886           I->getAssociatedDeclaration()->getType().getNonReferenceType();
6887       if (Ty->isAnyPointerType() && std::next(I) != CE) {
6888         LValue PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty);
6889         BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(),
6890                                          Ty->castAs<PointerType>())
6891                  .getPointer();
6892 
6893         // We do not need to generate individual map information for the
6894         // pointer, it can be associated with the combined storage.
6895         ++I;
6896       }
6897     }
6898 
6899     uint64_t DefaultFlags = IsImplicit ? OMP_MAP_IMPLICIT : 0;
6900     for (; I != CE; ++I) {
6901       auto Next = std::next(I);
6902 
6903       // We need to generate the addresses and sizes if this is the last
6904       // component, if the component is a pointer or if it is an array section
6905       // whose length can't be proved to be one. If this is a pointer, it
6906       // becomes the base address for the following components.
6907 
6908       // A final array section, is one whose length can't be proved to be one.
6909       bool IsFinalArraySection =
6910           isFinalArraySectionExpression(I->getAssociatedExpression());
6911 
6912       // Get information on whether the element is a pointer. Have to do a
6913       // special treatment for array sections given that they are built-in
6914       // types.
6915       const auto *OASE =
6916           dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression());
6917       bool IsPointer =
6918           (OASE &&
6919            OMPArraySectionExpr::getBaseOriginalType(OASE)
6920                .getCanonicalType()
6921                ->isAnyPointerType()) ||
6922           I->getAssociatedExpression()->getType()->isAnyPointerType();
6923 
6924       if (Next == CE || IsPointer || IsFinalArraySection) {
6925         // If this is not the last component, we expect the pointer to be
6926         // associated with an array expression or member expression.
6927         assert((Next == CE ||
6928                 isa<MemberExpr>(Next->getAssociatedExpression()) ||
6929                 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) ||
6930                 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
6931                "Unexpected expression");
6932 
6933         llvm::Value *LB =
6934             CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer();
6935         llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
6936 
6937         // If we have a member expression and the current component is a
6938         // reference, we have to map the reference too. Whenever we have a
6939         // reference, the section that reference refers to is going to be a
6940         // load instruction from the storage assigned to the reference.
6941         if (isa<MemberExpr>(I->getAssociatedExpression()) &&
6942             I->getAssociatedDeclaration()->getType()->isReferenceType()) {
6943           auto *LI = cast<llvm::LoadInst>(LB);
6944           llvm::Value *RefAddr = LI->getPointerOperand();
6945 
6946           BasePointers.push_back(BP);
6947           Pointers.push_back(RefAddr);
6948           Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
6949           Types.push_back(DefaultFlags |
6950                           getMapTypeBits(
6951                               /*MapType*/ OMPC_MAP_alloc,
6952                               /*MapTypeModifier=*/OMPC_MAP_unknown,
6953                               !IsExpressionFirstInfo, IsCaptureFirstInfo));
6954           IsExpressionFirstInfo = false;
6955           IsCaptureFirstInfo = false;
6956           // The reference will be the next base address.
6957           BP = RefAddr;
6958         }
6959 
6960         BasePointers.push_back(BP);
6961         Pointers.push_back(LB);
6962         Sizes.push_back(Size);
6963 
6964         // We need to add a pointer flag for each map that comes from the
6965         // same expression except for the first one. We also need to signal
6966         // this map is the first one that relates with the current capture
6967         // (there is a set of entries for each capture).
6968         Types.push_back(DefaultFlags |
6969                         getMapTypeBits(MapType, MapTypeModifier,
6970                                        !IsExpressionFirstInfo || IsLink,
6971                                        IsCaptureFirstInfo && !IsLink));
6972 
6973         // If we have a final array section, we are done with this expression.
6974         if (IsFinalArraySection)
6975           break;
6976 
6977         // The pointer becomes the base for the next element.
6978         if (Next != CE)
6979           BP = LB;
6980 
6981         IsExpressionFirstInfo = false;
6982         IsCaptureFirstInfo = false;
6983       }
6984     }
6985   }
6986 
6987   /// Generate all the base pointers, section pointers, sizes and map
6988   /// types for the extracted mappable expressions. Also, for each item that
6989   /// relates with a device pointer, a pair of the relevant declaration and
6990   /// index where it occurs is appended to the device pointers info array.
6991   void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
6992                        MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
6993                        MapFlagsArrayTy &Types) const {
6994     BasePointers.clear();
6995     Pointers.clear();
6996     Sizes.clear();
6997     Types.clear();
6998 
6999     struct MapInfo {
7000       /// Kind that defines how a device pointer has to be returned.
7001       enum ReturnPointerKind {
7002         // Don't have to return any pointer.
7003         RPK_None,
7004         // Pointer is the base of the declaration.
7005         RPK_Base,
7006         // Pointer is a member of the base declaration - 'this'
7007         RPK_Member,
7008         // Pointer is a reference and a member of the base declaration - 'this'
7009         RPK_MemberReference,
7010       };
7011       OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
7012       OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
7013       OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
7014       ReturnPointerKind ReturnDevicePointer = RPK_None;
7015       bool IsImplicit = false;
7016 
7017       MapInfo() = default;
7018       MapInfo(
7019           OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
7020           OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
7021           ReturnPointerKind ReturnDevicePointer, bool IsImplicit)
7022           : Components(Components), MapType(MapType),
7023             MapTypeModifier(MapTypeModifier),
7024             ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
7025     };
7026 
7027     // We have to process the component lists that relate with the same
7028     // declaration in a single chunk so that we can generate the map flags
7029     // correctly. Therefore, we organize all lists in a map.
7030     llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
7031 
7032     // Helper function to fill the information map for the different supported
7033     // clauses.
7034     auto &&InfoGen = [&Info](
7035         const ValueDecl *D,
7036         OMPClauseMappableExprCommon::MappableExprComponentListRef L,
7037         OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
7038         MapInfo::ReturnPointerKind ReturnDevicePointer, bool IsImplicit) {
7039       const ValueDecl *VD =
7040           D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
7041       Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
7042                             IsImplicit);
7043     };
7044 
7045     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7046     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7047       for (const auto &L : C->component_lists()) {
7048         InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
7049                 MapInfo::RPK_None, C->isImplicit());
7050       }
7051     for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
7052       for (const auto &L : C->component_lists()) {
7053         InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
7054                 MapInfo::RPK_None, C->isImplicit());
7055       }
7056     for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
7057       for (const auto &L : C->component_lists()) {
7058         InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
7059                 MapInfo::RPK_None, C->isImplicit());
7060       }
7061 
7062     // Look at the use_device_ptr clause information and mark the existing map
7063     // entries as such. If there is no map information for an entry in the
7064     // use_device_ptr list, we create one with map type 'alloc' and zero size
7065     // section. It is the user fault if that was not mapped before.
7066     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7067     for (const auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>())
7068       for (const auto &L : C->component_lists()) {
7069         assert(!L.second.empty() && "Not expecting empty list of components!");
7070         const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
7071         VD = cast<ValueDecl>(VD->getCanonicalDecl());
7072         const Expr *IE = L.second.back().getAssociatedExpression();
7073         // If the first component is a member expression, we have to look into
7074         // 'this', which maps to null in the map of map information. Otherwise
7075         // look directly for the information.
7076         auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
7077 
7078         // We potentially have map information for this declaration already.
7079         // Look for the first set of components that refer to it.
7080         if (It != Info.end()) {
7081           auto CI = std::find_if(
7082               It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
7083                 return MI.Components.back().getAssociatedDeclaration() == VD;
7084               });
7085           // If we found a map entry, signal that the pointer has to be returned
7086           // and move on to the next declaration.
7087           if (CI != It->second.end()) {
7088             CI->ReturnDevicePointer = isa<MemberExpr>(IE)
7089                                           ? (VD->getType()->isReferenceType()
7090                                                  ? MapInfo::RPK_MemberReference
7091                                                  : MapInfo::RPK_Member)
7092                                           : MapInfo::RPK_Base;
7093             continue;
7094           }
7095         }
7096 
7097         // We didn't find any match in our map information - generate a zero
7098         // size array section.
7099         // FIXME: MSVC 2013 seems to require this-> to find member CGF.
7100         llvm::Value *Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(IE),
7101                                                       IE->getExprLoc());
7102         BasePointers.push_back({Ptr, VD});
7103         Pointers.push_back(Ptr);
7104         Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
7105         Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM);
7106       }
7107 
7108     for (const auto &M : Info) {
7109       // We need to know when we generate information for the first component
7110       // associated with a capture, because the mapping flags depend on it.
7111       bool IsFirstComponentList = true;
7112       for (const MapInfo &L : M.second) {
7113         assert(!L.Components.empty() &&
7114                "Not expecting declaration with no component lists.");
7115 
7116         // Remember the current base pointer index.
7117         unsigned CurrentBasePointersIdx = BasePointers.size();
7118         // FIXME: MSVC 2013 seems to require this-> to find the member method.
7119         this->generateInfoForComponentList(
7120             L.MapType, L.MapTypeModifier, L.Components, BasePointers, Pointers,
7121             Sizes, Types, IsFirstComponentList, L.IsImplicit);
7122 
7123         // If this entry relates with a device pointer, set the relevant
7124         // declaration and add the 'return pointer' flag.
7125         if (IsFirstComponentList &&
7126             L.ReturnDevicePointer != MapInfo::RPK_None) {
7127           // If the pointer is not the base of the map, we need to skip the
7128           // base. If it is a reference in a member field, we also need to skip
7129           // the map of the reference.
7130           if (L.ReturnDevicePointer != MapInfo::RPK_Base) {
7131             ++CurrentBasePointersIdx;
7132             if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference)
7133               ++CurrentBasePointersIdx;
7134           }
7135           assert(BasePointers.size() > CurrentBasePointersIdx &&
7136                  "Unexpected number of mapped base pointers.");
7137 
7138           const ValueDecl *RelevantVD =
7139               L.Components.back().getAssociatedDeclaration();
7140           assert(RelevantVD &&
7141                  "No relevant declaration related with device pointer??");
7142 
7143           BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
7144           Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
7145         }
7146         IsFirstComponentList = false;
7147       }
7148     }
7149   }
7150 
7151   /// Generate the base pointers, section pointers, sizes and map types
7152   /// associated to a given capture.
7153   void generateInfoForCapture(const CapturedStmt::Capture *Cap,
7154                               llvm::Value *Arg,
7155                               MapBaseValuesArrayTy &BasePointers,
7156                               MapValuesArrayTy &Pointers,
7157                               MapValuesArrayTy &Sizes,
7158                               MapFlagsArrayTy &Types) const {
7159     assert(!Cap->capturesVariableArrayType() &&
7160            "Not expecting to generate map info for a variable array type!");
7161 
7162     BasePointers.clear();
7163     Pointers.clear();
7164     Sizes.clear();
7165     Types.clear();
7166 
7167     // We need to know when we generating information for the first component
7168     // associated with a capture, because the mapping flags depend on it.
7169     bool IsFirstComponentList = true;
7170 
7171     const ValueDecl *VD =
7172         Cap->capturesThis()
7173             ? nullptr
7174             : Cap->getCapturedVar()->getCanonicalDecl();
7175 
7176     // If this declaration appears in a is_device_ptr clause we just have to
7177     // pass the pointer by value. If it is a reference to a declaration, we just
7178     // pass its value, otherwise, if it is a member expression, we need to map
7179     // 'to' the field.
7180     if (!VD) {
7181       auto It = DevPointersMap.find(VD);
7182       if (It != DevPointersMap.end()) {
7183         for (ArrayRef<OMPClauseMappableExprCommon::MappableComponent> L :
7184              It->second) {
7185           generateInfoForComponentList(
7186               /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L,
7187               BasePointers, Pointers, Sizes, Types, IsFirstComponentList,
7188               /*IsImplicit=*/false);
7189           IsFirstComponentList = false;
7190         }
7191         return;
7192       }
7193     } else if (DevPointersMap.count(VD)) {
7194       BasePointers.emplace_back(Arg, VD);
7195       Pointers.push_back(Arg);
7196       Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
7197       Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM);
7198       return;
7199     }
7200 
7201     // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
7202     for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
7203       for (const auto &L : C->decl_component_lists(VD)) {
7204         assert(L.first == VD &&
7205                "We got information for the wrong declaration??");
7206         assert(!L.second.empty() &&
7207                "Not expecting declaration with no component lists.");
7208         generateInfoForComponentList(
7209             C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
7210             Pointers, Sizes, Types, IsFirstComponentList, C->isImplicit());
7211         IsFirstComponentList = false;
7212       }
7213 
7214     return;
7215   }
7216 
7217   /// Generate the default map information for a given capture \a CI,
7218   /// record field declaration \a RI and captured value \a CV.
7219   void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
7220                               const FieldDecl &RI, llvm::Value *CV,
7221                               MapBaseValuesArrayTy &CurBasePointers,
7222                               MapValuesArrayTy &CurPointers,
7223                               MapValuesArrayTy &CurSizes,
7224                               MapFlagsArrayTy &CurMapTypes) {
7225 
7226     // Do the default mapping.
7227     if (CI.capturesThis()) {
7228       CurBasePointers.push_back(CV);
7229       CurPointers.push_back(CV);
7230       const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
7231       CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
7232       // Default map type.
7233       CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
7234     } else if (CI.capturesVariableByCopy()) {
7235       CurBasePointers.push_back(CV);
7236       CurPointers.push_back(CV);
7237       if (!RI.getType()->isAnyPointerType()) {
7238         // We have to signal to the runtime captures passed by value that are
7239         // not pointers.
7240         CurMapTypes.push_back(OMP_MAP_LITERAL);
7241         CurSizes.push_back(CGF.getTypeSize(RI.getType()));
7242       } else {
7243         // Pointers are implicitly mapped with a zero size and no flags
7244         // (other than first map that is added for all implicit maps).
7245         CurMapTypes.push_back(0u);
7246         CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy));
7247       }
7248     } else {
7249       assert(CI.capturesVariable() && "Expected captured reference.");
7250       CurBasePointers.push_back(CV);
7251       CurPointers.push_back(CV);
7252 
7253       const auto *PtrTy = cast<ReferenceType>(RI.getType().getTypePtr());
7254       QualType ElementType = PtrTy->getPointeeType();
7255       CurSizes.push_back(CGF.getTypeSize(ElementType));
7256       // The default map type for a scalar/complex type is 'to' because by
7257       // default the value doesn't have to be retrieved. For an aggregate
7258       // type, the default is 'tofrom'.
7259       CurMapTypes.emplace_back(adjustMapModifiersForPrivateClauses(
7260           CI, ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM)
7261                                              : OMP_MAP_TO));
7262     }
7263     // Every default map produces a single argument which is a target parameter.
7264     CurMapTypes.back() |= OMP_MAP_TARGET_PARAM;
7265   }
7266 };
7267 
7268 enum OpenMPOffloadingReservedDeviceIDs {
7269   /// Device ID if the device was not defined, runtime should get it
7270   /// from environment variables in the spec.
7271   OMP_DEVICEID_UNDEF = -1,
7272 };
7273 } // anonymous namespace
7274 
7275 /// Emit the arrays used to pass the captures and map information to the
7276 /// offloading runtime library. If there is no map or capture information,
7277 /// return nullptr by reference.
7278 static void
7279 emitOffloadingArrays(CodeGenFunction &CGF,
7280                      MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
7281                      MappableExprsHandler::MapValuesArrayTy &Pointers,
7282                      MappableExprsHandler::MapValuesArrayTy &Sizes,
7283                      MappableExprsHandler::MapFlagsArrayTy &MapTypes,
7284                      CGOpenMPRuntime::TargetDataInfo &Info) {
7285   CodeGenModule &CGM = CGF.CGM;
7286   ASTContext &Ctx = CGF.getContext();
7287 
7288   // Reset the array information.
7289   Info.clearArrayInfo();
7290   Info.NumberOfPtrs = BasePointers.size();
7291 
7292   if (Info.NumberOfPtrs) {
7293     // Detect if we have any capture size requiring runtime evaluation of the
7294     // size so that a constant array could be eventually used.
7295     bool hasRuntimeEvaluationCaptureSize = false;
7296     for (llvm::Value *S : Sizes)
7297       if (!isa<llvm::Constant>(S)) {
7298         hasRuntimeEvaluationCaptureSize = true;
7299         break;
7300       }
7301 
7302     llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
7303     QualType PointerArrayType =
7304         Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
7305                                  /*IndexTypeQuals=*/0);
7306 
7307     Info.BasePointersArray =
7308         CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
7309     Info.PointersArray =
7310         CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
7311 
7312     // If we don't have any VLA types or other types that require runtime
7313     // evaluation, we can use a constant array for the map sizes, otherwise we
7314     // need to fill up the arrays as we do for the pointers.
7315     if (hasRuntimeEvaluationCaptureSize) {
7316       QualType SizeArrayType = Ctx.getConstantArrayType(
7317           Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
7318           /*IndexTypeQuals=*/0);
7319       Info.SizesArray =
7320           CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
7321     } else {
7322       // We expect all the sizes to be constant, so we collect them to create
7323       // a constant array.
7324       SmallVector<llvm::Constant *, 16> ConstSizes;
7325       for (llvm::Value *S : Sizes)
7326         ConstSizes.push_back(cast<llvm::Constant>(S));
7327 
7328       auto *SizesArrayInit = llvm::ConstantArray::get(
7329           llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes);
7330       std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
7331       auto *SizesArrayGbl = new llvm::GlobalVariable(
7332           CGM.getModule(), SizesArrayInit->getType(),
7333           /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7334           SizesArrayInit, Name);
7335       SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7336       Info.SizesArray = SizesArrayGbl;
7337     }
7338 
7339     // The map types are always constant so we don't need to generate code to
7340     // fill arrays. Instead, we create an array constant.
7341     llvm::Constant *MapTypesArrayInit =
7342         llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes);
7343     std::string MaptypesName =
7344         CGM.getOpenMPRuntime().getName({"offload_maptypes"});
7345     auto *MapTypesArrayGbl = new llvm::GlobalVariable(
7346         CGM.getModule(), MapTypesArrayInit->getType(),
7347         /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
7348         MapTypesArrayInit, MaptypesName);
7349     MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
7350     Info.MapTypesArray = MapTypesArrayGbl;
7351 
7352     for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
7353       llvm::Value *BPVal = *BasePointers[I];
7354       llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
7355           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7356           Info.BasePointersArray, 0, I);
7357       BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7358           BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
7359       Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7360       CGF.Builder.CreateStore(BPVal, BPAddr);
7361 
7362       if (Info.requiresDevicePointerInfo())
7363         if (const ValueDecl *DevVD = BasePointers[I].getDevicePtrDecl())
7364           Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
7365 
7366       llvm::Value *PVal = Pointers[I];
7367       llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
7368           llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7369           Info.PointersArray, 0, I);
7370       P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
7371           P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
7372       Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
7373       CGF.Builder.CreateStore(PVal, PAddr);
7374 
7375       if (hasRuntimeEvaluationCaptureSize) {
7376         llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
7377             llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
7378             Info.SizesArray,
7379             /*Idx0=*/0,
7380             /*Idx1=*/I);
7381         Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
7382         CGF.Builder.CreateStore(
7383             CGF.Builder.CreateIntCast(Sizes[I], CGM.SizeTy, /*isSigned=*/true),
7384             SAddr);
7385       }
7386     }
7387   }
7388 }
7389 /// Emit the arguments to be passed to the runtime library based on the
7390 /// arrays of pointers, sizes and map types.
7391 static void emitOffloadingArraysArgument(
7392     CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
7393     llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
7394     llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
7395   CodeGenModule &CGM = CGF.CGM;
7396   if (Info.NumberOfPtrs) {
7397     BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7398         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7399         Info.BasePointersArray,
7400         /*Idx0=*/0, /*Idx1=*/0);
7401     PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7402         llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
7403         Info.PointersArray,
7404         /*Idx0=*/0,
7405         /*Idx1=*/0);
7406     SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7407         llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
7408         /*Idx0=*/0, /*Idx1=*/0);
7409     MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
7410         llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
7411         Info.MapTypesArray,
7412         /*Idx0=*/0,
7413         /*Idx1=*/0);
7414   } else {
7415     BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7416     PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
7417     SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo());
7418     MapTypesArrayArg =
7419         llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
7420   }
7421 }
7422 
7423 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
7424                                      const OMPExecutableDirective &D,
7425                                      llvm::Value *OutlinedFn,
7426                                      llvm::Value *OutlinedFnID,
7427                                      const Expr *IfCond, const Expr *Device) {
7428   if (!CGF.HaveInsertPoint())
7429     return;
7430 
7431   assert(OutlinedFn && "Invalid outlined function!");
7432 
7433   const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>();
7434   llvm::SmallVector<llvm::Value *, 16> CapturedVars;
7435   const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
7436   auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
7437                                             PrePostActionTy &) {
7438     CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7439   };
7440   emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen);
7441 
7442   CodeGenFunction::OMPTargetDataInfo InputInfo;
7443   llvm::Value *MapTypesArray = nullptr;
7444   // Fill up the pointer arrays and transfer execution to the device.
7445   auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
7446                     &MapTypesArray, &CS, RequiresOuterTask,
7447                     &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) {
7448     // On top of the arrays that were filled up, the target offloading call
7449     // takes as arguments the device id as well as the host pointer. The host
7450     // pointer is used by the runtime library to identify the current target
7451     // region, so it only has to be unique and not necessarily point to
7452     // anything. It could be the pointer to the outlined function that
7453     // implements the target region, but we aren't using that so that the
7454     // compiler doesn't need to keep that, and could therefore inline the host
7455     // function if proven worthwhile during optimization.
7456 
7457     // From this point on, we need to have an ID of the target region defined.
7458     assert(OutlinedFnID && "Invalid outlined function ID!");
7459 
7460     // Emit device ID if any.
7461     llvm::Value *DeviceID;
7462     if (Device) {
7463       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
7464                                            CGF.Int64Ty, /*isSigned=*/true);
7465     } else {
7466       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
7467     }
7468 
7469     // Emit the number of elements in the offloading arrays.
7470     llvm::Value *PointerNum =
7471         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
7472 
7473     // Return value of the runtime offloading call.
7474     llvm::Value *Return;
7475 
7476     llvm::Value *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D);
7477     llvm::Value *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D);
7478 
7479     bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
7480     // The target region is an outlined function launched by the runtime
7481     // via calls __tgt_target() or __tgt_target_teams().
7482     //
7483     // __tgt_target() launches a target region with one team and one thread,
7484     // executing a serial region.  This master thread may in turn launch
7485     // more threads within its team upon encountering a parallel region,
7486     // however, no additional teams can be launched on the device.
7487     //
7488     // __tgt_target_teams() launches a target region with one or more teams,
7489     // each with one or more threads.  This call is required for target
7490     // constructs such as:
7491     //  'target teams'
7492     //  'target' / 'teams'
7493     //  'target teams distribute parallel for'
7494     //  'target parallel'
7495     // and so on.
7496     //
7497     // Note that on the host and CPU targets, the runtime implementation of
7498     // these calls simply call the outlined function without forking threads.
7499     // The outlined functions themselves have runtime calls to
7500     // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
7501     // the compiler in emitTeamsCall() and emitParallelCall().
7502     //
7503     // In contrast, on the NVPTX target, the implementation of
7504     // __tgt_target_teams() launches a GPU kernel with the requested number
7505     // of teams and threads so no additional calls to the runtime are required.
7506     if (NumTeams) {
7507       // If we have NumTeams defined this means that we have an enclosed teams
7508       // region. Therefore we also expect to have NumThreads defined. These two
7509       // values should be defined in the presence of a teams directive,
7510       // regardless of having any clauses associated. If the user is using teams
7511       // but no clauses, these two values will be the default that should be
7512       // passed to the runtime library - a 32-bit integer with the value zero.
7513       assert(NumThreads && "Thread limit expression should be available along "
7514                            "with number of teams.");
7515       llvm::Value *OffloadingArgs[] = {DeviceID,
7516                                        OutlinedFnID,
7517                                        PointerNum,
7518                                        InputInfo.BasePointersArray.getPointer(),
7519                                        InputInfo.PointersArray.getPointer(),
7520                                        InputInfo.SizesArray.getPointer(),
7521                                        MapTypesArray,
7522                                        NumTeams,
7523                                        NumThreads};
7524       Return = CGF.EmitRuntimeCall(
7525           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait
7526                                           : OMPRTL__tgt_target_teams),
7527           OffloadingArgs);
7528     } else {
7529       llvm::Value *OffloadingArgs[] = {DeviceID,
7530                                        OutlinedFnID,
7531                                        PointerNum,
7532                                        InputInfo.BasePointersArray.getPointer(),
7533                                        InputInfo.PointersArray.getPointer(),
7534                                        InputInfo.SizesArray.getPointer(),
7535                                        MapTypesArray};
7536       Return = CGF.EmitRuntimeCall(
7537           createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait
7538                                           : OMPRTL__tgt_target),
7539           OffloadingArgs);
7540     }
7541 
7542     // Check the error code and execute the host version if required.
7543     llvm::BasicBlock *OffloadFailedBlock =
7544         CGF.createBasicBlock("omp_offload.failed");
7545     llvm::BasicBlock *OffloadContBlock =
7546         CGF.createBasicBlock("omp_offload.cont");
7547     llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
7548     CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
7549 
7550     CGF.EmitBlock(OffloadFailedBlock);
7551     if (RequiresOuterTask) {
7552       CapturedVars.clear();
7553       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7554     }
7555     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7556     CGF.EmitBranch(OffloadContBlock);
7557 
7558     CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
7559   };
7560 
7561   // Notify that the host version must be executed.
7562   auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
7563                     RequiresOuterTask](CodeGenFunction &CGF,
7564                                        PrePostActionTy &) {
7565     if (RequiresOuterTask) {
7566       CapturedVars.clear();
7567       CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
7568     }
7569     emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
7570   };
7571 
7572   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
7573                           &CapturedVars, RequiresOuterTask,
7574                           &CS](CodeGenFunction &CGF, PrePostActionTy &) {
7575     // Fill up the arrays with all the captured variables.
7576     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
7577     MappableExprsHandler::MapValuesArrayTy Pointers;
7578     MappableExprsHandler::MapValuesArrayTy Sizes;
7579     MappableExprsHandler::MapFlagsArrayTy MapTypes;
7580 
7581     MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
7582     MappableExprsHandler::MapValuesArrayTy CurPointers;
7583     MappableExprsHandler::MapValuesArrayTy CurSizes;
7584     MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
7585 
7586     // Get mappable expression information.
7587     MappableExprsHandler MEHandler(D, CGF);
7588 
7589     auto RI = CS.getCapturedRecordDecl()->field_begin();
7590     auto CV = CapturedVars.begin();
7591     for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
7592                                               CE = CS.capture_end();
7593          CI != CE; ++CI, ++RI, ++CV) {
7594       CurBasePointers.clear();
7595       CurPointers.clear();
7596       CurSizes.clear();
7597       CurMapTypes.clear();
7598 
7599       // VLA sizes are passed to the outlined region by copy and do not have map
7600       // information associated.
7601       if (CI->capturesVariableArrayType()) {
7602         CurBasePointers.push_back(*CV);
7603         CurPointers.push_back(*CV);
7604         CurSizes.push_back(CGF.getTypeSize(RI->getType()));
7605         // Copy to the device as an argument. No need to retrieve it.
7606         CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
7607                               MappableExprsHandler::OMP_MAP_TARGET_PARAM);
7608       } else {
7609         // If we have any information in the map clause, we use it, otherwise we
7610         // just do a default mapping.
7611         MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
7612                                          CurSizes, CurMapTypes);
7613         if (CurBasePointers.empty())
7614           MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
7615                                            CurPointers, CurSizes, CurMapTypes);
7616       }
7617       // We expect to have at least an element of information for this capture.
7618       assert(!CurBasePointers.empty() &&
7619              "Non-existing map pointer for capture!");
7620       assert(CurBasePointers.size() == CurPointers.size() &&
7621              CurBasePointers.size() == CurSizes.size() &&
7622              CurBasePointers.size() == CurMapTypes.size() &&
7623              "Inconsistent map information sizes!");
7624 
7625       // We need to append the results of this capture to what we already have.
7626       BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
7627       Pointers.append(CurPointers.begin(), CurPointers.end());
7628       Sizes.append(CurSizes.begin(), CurSizes.end());
7629       MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
7630     }
7631     // Map other list items in the map clause which are not captured variables
7632     // but "declare target link" global variables.
7633     for (const auto *C : D.getClausesOfKind<OMPMapClause>()) {
7634       for (const auto &L : C->component_lists()) {
7635         if (!L.first)
7636           continue;
7637         const auto *VD = dyn_cast<VarDecl>(L.first);
7638         if (!VD)
7639           continue;
7640         llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7641             isDeclareTargetDeclaration(VD);
7642         if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
7643           continue;
7644         MEHandler.generateInfoForComponentList(
7645             C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
7646             Pointers, Sizes, MapTypes, /*IsFirstComponentList=*/true,
7647             C->isImplicit());
7648       }
7649     }
7650 
7651     TargetDataInfo Info;
7652     // Fill up the arrays and create the arguments.
7653     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
7654     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
7655                                  Info.PointersArray, Info.SizesArray,
7656                                  Info.MapTypesArray, Info);
7657     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
7658     InputInfo.BasePointersArray =
7659         Address(Info.BasePointersArray, CGM.getPointerAlign());
7660     InputInfo.PointersArray =
7661         Address(Info.PointersArray, CGM.getPointerAlign());
7662     InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
7663     MapTypesArray = Info.MapTypesArray;
7664     if (RequiresOuterTask)
7665       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
7666     else
7667       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
7668   };
7669 
7670   auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
7671                              CodeGenFunction &CGF, PrePostActionTy &) {
7672     if (RequiresOuterTask) {
7673       CodeGenFunction::OMPTargetDataInfo InputInfo;
7674       CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
7675     } else {
7676       emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
7677     }
7678   };
7679 
7680   // If we have a target function ID it means that we need to support
7681   // offloading, otherwise, just execute on the host. We need to execute on host
7682   // regardless of the conditional in the if clause if, e.g., the user do not
7683   // specify target triples.
7684   if (OutlinedFnID) {
7685     if (IfCond) {
7686       emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
7687     } else {
7688       RegionCodeGenTy ThenRCG(TargetThenGen);
7689       ThenRCG(CGF);
7690     }
7691   } else {
7692     RegionCodeGenTy ElseRCG(TargetElseGen);
7693     ElseRCG(CGF);
7694   }
7695 }
7696 
7697 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
7698                                                     StringRef ParentName) {
7699   if (!S)
7700     return;
7701 
7702   // Codegen OMP target directives that offload compute to the device.
7703   bool RequiresDeviceCodegen =
7704       isa<OMPExecutableDirective>(S) &&
7705       isOpenMPTargetExecutionDirective(
7706           cast<OMPExecutableDirective>(S)->getDirectiveKind());
7707 
7708   if (RequiresDeviceCodegen) {
7709     const auto &E = *cast<OMPExecutableDirective>(S);
7710     unsigned DeviceID;
7711     unsigned FileID;
7712     unsigned Line;
7713     getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
7714                              FileID, Line);
7715 
7716     // Is this a target region that should not be emitted as an entry point? If
7717     // so just signal we are done with this target region.
7718     if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
7719                                                             ParentName, Line))
7720       return;
7721 
7722     switch (E.getDirectiveKind()) {
7723     case OMPD_target:
7724       CodeGenFunction::EmitOMPTargetDeviceFunction(CGM, ParentName,
7725                                                    cast<OMPTargetDirective>(E));
7726       break;
7727     case OMPD_target_parallel:
7728       CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7729           CGM, ParentName, cast<OMPTargetParallelDirective>(E));
7730       break;
7731     case OMPD_target_teams:
7732       CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
7733           CGM, ParentName, cast<OMPTargetTeamsDirective>(E));
7734       break;
7735     case OMPD_target_teams_distribute:
7736       CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7737           CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(E));
7738       break;
7739     case OMPD_target_teams_distribute_simd:
7740       CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7741           CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(E));
7742       break;
7743     case OMPD_target_parallel_for:
7744       CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7745           CGM, ParentName, cast<OMPTargetParallelForDirective>(E));
7746       break;
7747     case OMPD_target_parallel_for_simd:
7748       CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7749           CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(E));
7750       break;
7751     case OMPD_target_simd:
7752       CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
7753           CGM, ParentName, cast<OMPTargetSimdDirective>(E));
7754       break;
7755     case OMPD_target_teams_distribute_parallel_for:
7756       CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7757           CGM, ParentName,
7758           cast<OMPTargetTeamsDistributeParallelForDirective>(E));
7759       break;
7760     case OMPD_target_teams_distribute_parallel_for_simd:
7761       CodeGenFunction::
7762           EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7763               CGM, ParentName,
7764               cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
7765       break;
7766     case OMPD_parallel:
7767     case OMPD_for:
7768     case OMPD_parallel_for:
7769     case OMPD_parallel_sections:
7770     case OMPD_for_simd:
7771     case OMPD_parallel_for_simd:
7772     case OMPD_cancel:
7773     case OMPD_cancellation_point:
7774     case OMPD_ordered:
7775     case OMPD_threadprivate:
7776     case OMPD_task:
7777     case OMPD_simd:
7778     case OMPD_sections:
7779     case OMPD_section:
7780     case OMPD_single:
7781     case OMPD_master:
7782     case OMPD_critical:
7783     case OMPD_taskyield:
7784     case OMPD_barrier:
7785     case OMPD_taskwait:
7786     case OMPD_taskgroup:
7787     case OMPD_atomic:
7788     case OMPD_flush:
7789     case OMPD_teams:
7790     case OMPD_target_data:
7791     case OMPD_target_exit_data:
7792     case OMPD_target_enter_data:
7793     case OMPD_distribute:
7794     case OMPD_distribute_simd:
7795     case OMPD_distribute_parallel_for:
7796     case OMPD_distribute_parallel_for_simd:
7797     case OMPD_teams_distribute:
7798     case OMPD_teams_distribute_simd:
7799     case OMPD_teams_distribute_parallel_for:
7800     case OMPD_teams_distribute_parallel_for_simd:
7801     case OMPD_target_update:
7802     case OMPD_declare_simd:
7803     case OMPD_declare_target:
7804     case OMPD_end_declare_target:
7805     case OMPD_declare_reduction:
7806     case OMPD_taskloop:
7807     case OMPD_taskloop_simd:
7808     case OMPD_unknown:
7809       llvm_unreachable("Unknown target directive for OpenMP device codegen.");
7810     }
7811     return;
7812   }
7813 
7814   if (const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
7815     if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
7816       return;
7817 
7818     scanForTargetRegionsFunctions(
7819         E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName);
7820     return;
7821   }
7822 
7823   // If this is a lambda function, look into its body.
7824   if (const auto *L = dyn_cast<LambdaExpr>(S))
7825     S = L->getBody();
7826 
7827   // Keep looking for target regions recursively.
7828   for (const Stmt *II : S->children())
7829     scanForTargetRegionsFunctions(II, ParentName);
7830 }
7831 
7832 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
7833   const auto *FD = cast<FunctionDecl>(GD.getDecl());
7834 
7835   // If emitting code for the host, we do not process FD here. Instead we do
7836   // the normal code generation.
7837   if (!CGM.getLangOpts().OpenMPIsDevice)
7838     return false;
7839 
7840   // Try to detect target regions in the function.
7841   scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
7842 
7843   // Do not to emit function if it is not marked as declare target.
7844   return !isDeclareTargetDeclaration(FD) &&
7845          AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
7846 }
7847 
7848 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
7849   if (!CGM.getLangOpts().OpenMPIsDevice)
7850     return false;
7851 
7852   // Check if there are Ctors/Dtors in this declaration and look for target
7853   // regions in it. We use the complete variant to produce the kernel name
7854   // mangling.
7855   QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
7856   if (const auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
7857     for (const CXXConstructorDecl *Ctor : RD->ctors()) {
7858       StringRef ParentName =
7859           CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
7860       scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
7861     }
7862     if (const CXXDestructorDecl *Dtor = RD->getDestructor()) {
7863       StringRef ParentName =
7864           CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
7865       scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
7866     }
7867   }
7868 
7869   // Do not to emit variable if it is not marked as declare target.
7870   llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7871       isDeclareTargetDeclaration(cast<VarDecl>(GD.getDecl()));
7872   return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
7873 }
7874 
7875 void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
7876                                                    llvm::Constant *Addr) {
7877   if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7878           isDeclareTargetDeclaration(VD)) {
7879     OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
7880     StringRef VarName;
7881     CharUnits VarSize;
7882     llvm::GlobalValue::LinkageTypes Linkage;
7883     switch (*Res) {
7884     case OMPDeclareTargetDeclAttr::MT_To:
7885       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
7886       VarName = CGM.getMangledName(VD);
7887       VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
7888       Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
7889       break;
7890     case OMPDeclareTargetDeclAttr::MT_Link:
7891       // Map type 'to' because we do not map the original variable but the
7892       // reference.
7893       Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
7894       if (!CGM.getLangOpts().OpenMPIsDevice) {
7895         Addr =
7896             cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
7897       }
7898       VarName = Addr->getName();
7899       VarSize = CGM.getPointerSize();
7900       Linkage = llvm::GlobalValue::WeakAnyLinkage;
7901       break;
7902     }
7903     OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
7904         VarName, Addr, VarSize, Flags, Linkage);
7905   }
7906 }
7907 
7908 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
7909   if (isa<FunctionDecl>(GD.getDecl()))
7910     return emitTargetFunctions(GD);
7911 
7912   return emitTargetGlobalVariable(GD);
7913 }
7914 
7915 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
7916     CodeGenModule &CGM)
7917     : CGM(CGM) {
7918   if (CGM.getLangOpts().OpenMPIsDevice) {
7919     SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
7920     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
7921   }
7922 }
7923 
7924 CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
7925   if (CGM.getLangOpts().OpenMPIsDevice)
7926     CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
7927 }
7928 
7929 bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
7930   if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
7931     return true;
7932 
7933   const auto *D = cast<FunctionDecl>(GD.getDecl());
7934   const FunctionDecl *FD = D->getCanonicalDecl();
7935   // Do not to emit function if it is marked as declare target as it was already
7936   // emitted.
7937   if (isDeclareTargetDeclaration(D)) {
7938     if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
7939       if (auto *F = dyn_cast_or_null<llvm::Function>(
7940               CGM.GetGlobalValue(CGM.getMangledName(GD))))
7941         return !F->isDeclaration();
7942       return false;
7943     }
7944     return true;
7945   }
7946 
7947   return !AlreadyEmittedTargetFunctions.insert(FD).second;
7948 }
7949 
7950 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
7951   // If we have offloading in the current module, we need to emit the entries
7952   // now and register the offloading descriptor.
7953   createOffloadEntriesAndInfoMetadata();
7954 
7955   // Create and register the offloading binary descriptors. This is the main
7956   // entity that captures all the information about offloading in the current
7957   // compilation unit.
7958   return createOffloadingBinaryDescriptorRegistration();
7959 }
7960 
7961 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF,
7962                                     const OMPExecutableDirective &D,
7963                                     SourceLocation Loc,
7964                                     llvm::Value *OutlinedFn,
7965                                     ArrayRef<llvm::Value *> CapturedVars) {
7966   if (!CGF.HaveInsertPoint())
7967     return;
7968 
7969   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
7970   CodeGenFunction::RunCleanupsScope Scope(CGF);
7971 
7972   // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn);
7973   llvm::Value *Args[] = {
7974       RTLoc,
7975       CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars
7976       CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())};
7977   llvm::SmallVector<llvm::Value *, 16> RealArgs;
7978   RealArgs.append(std::begin(Args), std::end(Args));
7979   RealArgs.append(CapturedVars.begin(), CapturedVars.end());
7980 
7981   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams);
7982   CGF.EmitRuntimeCall(RTLFn, RealArgs);
7983 }
7984 
7985 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
7986                                          const Expr *NumTeams,
7987                                          const Expr *ThreadLimit,
7988                                          SourceLocation Loc) {
7989   if (!CGF.HaveInsertPoint())
7990     return;
7991 
7992   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
7993 
7994   llvm::Value *NumTeamsVal =
7995       NumTeams
7996           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams),
7997                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
7998           : CGF.Builder.getInt32(0);
7999 
8000   llvm::Value *ThreadLimitVal =
8001       ThreadLimit
8002           ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
8003                                       CGF.CGM.Int32Ty, /* isSigned = */ true)
8004           : CGF.Builder.getInt32(0);
8005 
8006   // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit)
8007   llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal,
8008                                      ThreadLimitVal};
8009   CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams),
8010                       PushNumTeamsArgs);
8011 }
8012 
8013 void CGOpenMPRuntime::emitTargetDataCalls(
8014     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8015     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
8016   if (!CGF.HaveInsertPoint())
8017     return;
8018 
8019   // Action used to replace the default codegen action and turn privatization
8020   // off.
8021   PrePostActionTy NoPrivAction;
8022 
8023   // Generate the code for the opening of the data environment. Capture all the
8024   // arguments of the runtime call by reference because they are used in the
8025   // closing of the region.
8026   auto &&BeginThenGen = [this, &D, Device, &Info,
8027                          &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
8028     // Fill up the arrays with all the mapped variables.
8029     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8030     MappableExprsHandler::MapValuesArrayTy Pointers;
8031     MappableExprsHandler::MapValuesArrayTy Sizes;
8032     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8033 
8034     // Get map clause information.
8035     MappableExprsHandler MCHandler(D, CGF);
8036     MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8037 
8038     // Fill up the arrays and create the arguments.
8039     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8040 
8041     llvm::Value *BasePointersArrayArg = nullptr;
8042     llvm::Value *PointersArrayArg = nullptr;
8043     llvm::Value *SizesArrayArg = nullptr;
8044     llvm::Value *MapTypesArrayArg = nullptr;
8045     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8046                                  SizesArrayArg, MapTypesArrayArg, Info);
8047 
8048     // Emit device ID if any.
8049     llvm::Value *DeviceID = nullptr;
8050     if (Device) {
8051       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8052                                            CGF.Int64Ty, /*isSigned=*/true);
8053     } else {
8054       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8055     }
8056 
8057     // Emit the number of elements in the offloading arrays.
8058     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8059 
8060     llvm::Value *OffloadingArgs[] = {
8061         DeviceID,         PointerNum,    BasePointersArrayArg,
8062         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8063     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin),
8064                         OffloadingArgs);
8065 
8066     // If device pointer privatization is required, emit the body of the region
8067     // here. It will have to be duplicated: with and without privatization.
8068     if (!Info.CaptureDeviceAddrMap.empty())
8069       CodeGen(CGF);
8070   };
8071 
8072   // Generate code for the closing of the data region.
8073   auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF,
8074                                             PrePostActionTy &) {
8075     assert(Info.isValid() && "Invalid data environment closing arguments.");
8076 
8077     llvm::Value *BasePointersArrayArg = nullptr;
8078     llvm::Value *PointersArrayArg = nullptr;
8079     llvm::Value *SizesArrayArg = nullptr;
8080     llvm::Value *MapTypesArrayArg = nullptr;
8081     emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
8082                                  SizesArrayArg, MapTypesArrayArg, Info);
8083 
8084     // Emit device ID if any.
8085     llvm::Value *DeviceID = nullptr;
8086     if (Device) {
8087       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8088                                            CGF.Int64Ty, /*isSigned=*/true);
8089     } else {
8090       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8091     }
8092 
8093     // Emit the number of elements in the offloading arrays.
8094     llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
8095 
8096     llvm::Value *OffloadingArgs[] = {
8097         DeviceID,         PointerNum,    BasePointersArrayArg,
8098         PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
8099     CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end),
8100                         OffloadingArgs);
8101   };
8102 
8103   // If we need device pointer privatization, we need to emit the body of the
8104   // region with no privatization in the 'else' branch of the conditional.
8105   // Otherwise, we don't have to do anything.
8106   auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
8107                                                          PrePostActionTy &) {
8108     if (!Info.CaptureDeviceAddrMap.empty()) {
8109       CodeGen.setAction(NoPrivAction);
8110       CodeGen(CGF);
8111     }
8112   };
8113 
8114   // We don't have to do anything to close the region if the if clause evaluates
8115   // to false.
8116   auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
8117 
8118   if (IfCond) {
8119     emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
8120   } else {
8121     RegionCodeGenTy RCG(BeginThenGen);
8122     RCG(CGF);
8123   }
8124 
8125   // If we don't require privatization of device pointers, we emit the body in
8126   // between the runtime calls. This avoids duplicating the body code.
8127   if (Info.CaptureDeviceAddrMap.empty()) {
8128     CodeGen.setAction(NoPrivAction);
8129     CodeGen(CGF);
8130   }
8131 
8132   if (IfCond) {
8133     emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
8134   } else {
8135     RegionCodeGenTy RCG(EndThenGen);
8136     RCG(CGF);
8137   }
8138 }
8139 
8140 void CGOpenMPRuntime::emitTargetDataStandAloneCall(
8141     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8142     const Expr *Device) {
8143   if (!CGF.HaveInsertPoint())
8144     return;
8145 
8146   assert((isa<OMPTargetEnterDataDirective>(D) ||
8147           isa<OMPTargetExitDataDirective>(D) ||
8148           isa<OMPTargetUpdateDirective>(D)) &&
8149          "Expecting either target enter, exit data, or update directives.");
8150 
8151   CodeGenFunction::OMPTargetDataInfo InputInfo;
8152   llvm::Value *MapTypesArray = nullptr;
8153   // Generate the code for the opening of the data environment.
8154   auto &&ThenGen = [this, &D, Device, &InputInfo,
8155                     &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) {
8156     // Emit device ID if any.
8157     llvm::Value *DeviceID = nullptr;
8158     if (Device) {
8159       DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
8160                                            CGF.Int64Ty, /*isSigned=*/true);
8161     } else {
8162       DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
8163     }
8164 
8165     // Emit the number of elements in the offloading arrays.
8166     llvm::Constant *PointerNum =
8167         CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
8168 
8169     llvm::Value *OffloadingArgs[] = {DeviceID,
8170                                      PointerNum,
8171                                      InputInfo.BasePointersArray.getPointer(),
8172                                      InputInfo.PointersArray.getPointer(),
8173                                      InputInfo.SizesArray.getPointer(),
8174                                      MapTypesArray};
8175 
8176     // Select the right runtime function call for each expected standalone
8177     // directive.
8178     const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
8179     OpenMPRTLFunction RTLFn;
8180     switch (D.getDirectiveKind()) {
8181     case OMPD_target_enter_data:
8182       RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait
8183                         : OMPRTL__tgt_target_data_begin;
8184       break;
8185     case OMPD_target_exit_data:
8186       RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait
8187                         : OMPRTL__tgt_target_data_end;
8188       break;
8189     case OMPD_target_update:
8190       RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait
8191                         : OMPRTL__tgt_target_data_update;
8192       break;
8193     case OMPD_parallel:
8194     case OMPD_for:
8195     case OMPD_parallel_for:
8196     case OMPD_parallel_sections:
8197     case OMPD_for_simd:
8198     case OMPD_parallel_for_simd:
8199     case OMPD_cancel:
8200     case OMPD_cancellation_point:
8201     case OMPD_ordered:
8202     case OMPD_threadprivate:
8203     case OMPD_task:
8204     case OMPD_simd:
8205     case OMPD_sections:
8206     case OMPD_section:
8207     case OMPD_single:
8208     case OMPD_master:
8209     case OMPD_critical:
8210     case OMPD_taskyield:
8211     case OMPD_barrier:
8212     case OMPD_taskwait:
8213     case OMPD_taskgroup:
8214     case OMPD_atomic:
8215     case OMPD_flush:
8216     case OMPD_teams:
8217     case OMPD_target_data:
8218     case OMPD_distribute:
8219     case OMPD_distribute_simd:
8220     case OMPD_distribute_parallel_for:
8221     case OMPD_distribute_parallel_for_simd:
8222     case OMPD_teams_distribute:
8223     case OMPD_teams_distribute_simd:
8224     case OMPD_teams_distribute_parallel_for:
8225     case OMPD_teams_distribute_parallel_for_simd:
8226     case OMPD_declare_simd:
8227     case OMPD_declare_target:
8228     case OMPD_end_declare_target:
8229     case OMPD_declare_reduction:
8230     case OMPD_taskloop:
8231     case OMPD_taskloop_simd:
8232     case OMPD_target:
8233     case OMPD_target_simd:
8234     case OMPD_target_teams_distribute:
8235     case OMPD_target_teams_distribute_simd:
8236     case OMPD_target_teams_distribute_parallel_for:
8237     case OMPD_target_teams_distribute_parallel_for_simd:
8238     case OMPD_target_teams:
8239     case OMPD_target_parallel:
8240     case OMPD_target_parallel_for:
8241     case OMPD_target_parallel_for_simd:
8242     case OMPD_unknown:
8243       llvm_unreachable("Unexpected standalone target data directive.");
8244       break;
8245     }
8246     CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs);
8247   };
8248 
8249   auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray](
8250                              CodeGenFunction &CGF, PrePostActionTy &) {
8251     // Fill up the arrays with all the mapped variables.
8252     MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
8253     MappableExprsHandler::MapValuesArrayTy Pointers;
8254     MappableExprsHandler::MapValuesArrayTy Sizes;
8255     MappableExprsHandler::MapFlagsArrayTy MapTypes;
8256 
8257     // Get map clause information.
8258     MappableExprsHandler MEHandler(D, CGF);
8259     MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
8260 
8261     TargetDataInfo Info;
8262     // Fill up the arrays and create the arguments.
8263     emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
8264     emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
8265                                  Info.PointersArray, Info.SizesArray,
8266                                  Info.MapTypesArray, Info);
8267     InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
8268     InputInfo.BasePointersArray =
8269         Address(Info.BasePointersArray, CGM.getPointerAlign());
8270     InputInfo.PointersArray =
8271         Address(Info.PointersArray, CGM.getPointerAlign());
8272     InputInfo.SizesArray =
8273         Address(Info.SizesArray, CGM.getPointerAlign());
8274     MapTypesArray = Info.MapTypesArray;
8275     if (D.hasClausesOfKind<OMPDependClause>())
8276       CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
8277     else
8278       emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
8279   };
8280 
8281   if (IfCond) {
8282     emitOMPIfClause(CGF, IfCond, TargetThenGen,
8283                     [](CodeGenFunction &CGF, PrePostActionTy &) {});
8284   } else {
8285     RegionCodeGenTy ThenRCG(TargetThenGen);
8286     ThenRCG(CGF);
8287   }
8288 }
8289 
8290 namespace {
8291   /// Kind of parameter in a function with 'declare simd' directive.
8292   enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
8293   /// Attribute set of the parameter.
8294   struct ParamAttrTy {
8295     ParamKindTy Kind = Vector;
8296     llvm::APSInt StrideOrArg;
8297     llvm::APSInt Alignment;
8298   };
8299 } // namespace
8300 
8301 static unsigned evaluateCDTSize(const FunctionDecl *FD,
8302                                 ArrayRef<ParamAttrTy> ParamAttrs) {
8303   // Every vector variant of a SIMD-enabled function has a vector length (VLEN).
8304   // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument
8305   // of that clause. The VLEN value must be power of 2.
8306   // In other case the notion of the function`s "characteristic data type" (CDT)
8307   // is used to compute the vector length.
8308   // CDT is defined in the following order:
8309   //   a) For non-void function, the CDT is the return type.
8310   //   b) If the function has any non-uniform, non-linear parameters, then the
8311   //   CDT is the type of the first such parameter.
8312   //   c) If the CDT determined by a) or b) above is struct, union, or class
8313   //   type which is pass-by-value (except for the type that maps to the
8314   //   built-in complex data type), the characteristic data type is int.
8315   //   d) If none of the above three cases is applicable, the CDT is int.
8316   // The VLEN is then determined based on the CDT and the size of vector
8317   // register of that ISA for which current vector version is generated. The
8318   // VLEN is computed using the formula below:
8319   //   VLEN  = sizeof(vector_register) / sizeof(CDT),
8320   // where vector register size specified in section 3.2.1 Registers and the
8321   // Stack Frame of original AMD64 ABI document.
8322   QualType RetType = FD->getReturnType();
8323   if (RetType.isNull())
8324     return 0;
8325   ASTContext &C = FD->getASTContext();
8326   QualType CDT;
8327   if (!RetType.isNull() && !RetType->isVoidType()) {
8328     CDT = RetType;
8329   } else {
8330     unsigned Offset = 0;
8331     if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
8332       if (ParamAttrs[Offset].Kind == Vector)
8333         CDT = C.getPointerType(C.getRecordType(MD->getParent()));
8334       ++Offset;
8335     }
8336     if (CDT.isNull()) {
8337       for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) {
8338         if (ParamAttrs[I + Offset].Kind == Vector) {
8339           CDT = FD->getParamDecl(I)->getType();
8340           break;
8341         }
8342       }
8343     }
8344   }
8345   if (CDT.isNull())
8346     CDT = C.IntTy;
8347   CDT = CDT->getCanonicalTypeUnqualified();
8348   if (CDT->isRecordType() || CDT->isUnionType())
8349     CDT = C.IntTy;
8350   return C.getTypeSize(CDT);
8351 }
8352 
8353 static void
8354 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
8355                            const llvm::APSInt &VLENVal,
8356                            ArrayRef<ParamAttrTy> ParamAttrs,
8357                            OMPDeclareSimdDeclAttr::BranchStateTy State) {
8358   struct ISADataTy {
8359     char ISA;
8360     unsigned VecRegSize;
8361   };
8362   ISADataTy ISAData[] = {
8363       {
8364           'b', 128
8365       }, // SSE
8366       {
8367           'c', 256
8368       }, // AVX
8369       {
8370           'd', 256
8371       }, // AVX2
8372       {
8373           'e', 512
8374       }, // AVX512
8375   };
8376   llvm::SmallVector<char, 2> Masked;
8377   switch (State) {
8378   case OMPDeclareSimdDeclAttr::BS_Undefined:
8379     Masked.push_back('N');
8380     Masked.push_back('M');
8381     break;
8382   case OMPDeclareSimdDeclAttr::BS_Notinbranch:
8383     Masked.push_back('N');
8384     break;
8385   case OMPDeclareSimdDeclAttr::BS_Inbranch:
8386     Masked.push_back('M');
8387     break;
8388   }
8389   for (char Mask : Masked) {
8390     for (const ISADataTy &Data : ISAData) {
8391       SmallString<256> Buffer;
8392       llvm::raw_svector_ostream Out(Buffer);
8393       Out << "_ZGV" << Data.ISA << Mask;
8394       if (!VLENVal) {
8395         Out << llvm::APSInt::getUnsigned(Data.VecRegSize /
8396                                          evaluateCDTSize(FD, ParamAttrs));
8397       } else {
8398         Out << VLENVal;
8399       }
8400       for (const ParamAttrTy &ParamAttr : ParamAttrs) {
8401         switch (ParamAttr.Kind){
8402         case LinearWithVarStride:
8403           Out << 's' << ParamAttr.StrideOrArg;
8404           break;
8405         case Linear:
8406           Out << 'l';
8407           if (!!ParamAttr.StrideOrArg)
8408             Out << ParamAttr.StrideOrArg;
8409           break;
8410         case Uniform:
8411           Out << 'u';
8412           break;
8413         case Vector:
8414           Out << 'v';
8415           break;
8416         }
8417         if (!!ParamAttr.Alignment)
8418           Out << 'a' << ParamAttr.Alignment;
8419       }
8420       Out << '_' << Fn->getName();
8421       Fn->addFnAttr(Out.str());
8422     }
8423   }
8424 }
8425 
8426 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
8427                                               llvm::Function *Fn) {
8428   ASTContext &C = CGM.getContext();
8429   FD = FD->getMostRecentDecl();
8430   // Map params to their positions in function decl.
8431   llvm::DenseMap<const Decl *, unsigned> ParamPositions;
8432   if (isa<CXXMethodDecl>(FD))
8433     ParamPositions.try_emplace(FD, 0);
8434   unsigned ParamPos = ParamPositions.size();
8435   for (const ParmVarDecl *P : FD->parameters()) {
8436     ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
8437     ++ParamPos;
8438   }
8439   while (FD) {
8440     for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
8441       llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
8442       // Mark uniform parameters.
8443       for (const Expr *E : Attr->uniforms()) {
8444         E = E->IgnoreParenImpCasts();
8445         unsigned Pos;
8446         if (isa<CXXThisExpr>(E)) {
8447           Pos = ParamPositions[FD];
8448         } else {
8449           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8450                                 ->getCanonicalDecl();
8451           Pos = ParamPositions[PVD];
8452         }
8453         ParamAttrs[Pos].Kind = Uniform;
8454       }
8455       // Get alignment info.
8456       auto NI = Attr->alignments_begin();
8457       for (const Expr *E : Attr->aligneds()) {
8458         E = E->IgnoreParenImpCasts();
8459         unsigned Pos;
8460         QualType ParmTy;
8461         if (isa<CXXThisExpr>(E)) {
8462           Pos = ParamPositions[FD];
8463           ParmTy = E->getType();
8464         } else {
8465           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8466                                 ->getCanonicalDecl();
8467           Pos = ParamPositions[PVD];
8468           ParmTy = PVD->getType();
8469         }
8470         ParamAttrs[Pos].Alignment =
8471             (*NI)
8472                 ? (*NI)->EvaluateKnownConstInt(C)
8473                 : llvm::APSInt::getUnsigned(
8474                       C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy))
8475                           .getQuantity());
8476         ++NI;
8477       }
8478       // Mark linear parameters.
8479       auto SI = Attr->steps_begin();
8480       auto MI = Attr->modifiers_begin();
8481       for (const Expr *E : Attr->linears()) {
8482         E = E->IgnoreParenImpCasts();
8483         unsigned Pos;
8484         if (isa<CXXThisExpr>(E)) {
8485           Pos = ParamPositions[FD];
8486         } else {
8487           const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
8488                                 ->getCanonicalDecl();
8489           Pos = ParamPositions[PVD];
8490         }
8491         ParamAttrTy &ParamAttr = ParamAttrs[Pos];
8492         ParamAttr.Kind = Linear;
8493         if (*SI) {
8494           if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
8495                                     Expr::SE_AllowSideEffects)) {
8496             if (const auto *DRE =
8497                     cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
8498               if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
8499                 ParamAttr.Kind = LinearWithVarStride;
8500                 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
8501                     ParamPositions[StridePVD->getCanonicalDecl()]);
8502               }
8503             }
8504           }
8505         }
8506         ++SI;
8507         ++MI;
8508       }
8509       llvm::APSInt VLENVal;
8510       if (const Expr *VLEN = Attr->getSimdlen())
8511         VLENVal = VLEN->EvaluateKnownConstInt(C);
8512       OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState();
8513       if (CGM.getTriple().getArch() == llvm::Triple::x86 ||
8514           CGM.getTriple().getArch() == llvm::Triple::x86_64)
8515         emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State);
8516     }
8517     FD = FD->getPreviousDecl();
8518   }
8519 }
8520 
8521 namespace {
8522 /// Cleanup action for doacross support.
8523 class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
8524 public:
8525   static const int DoacrossFinArgs = 2;
8526 
8527 private:
8528   llvm::Value *RTLFn;
8529   llvm::Value *Args[DoacrossFinArgs];
8530 
8531 public:
8532   DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs)
8533       : RTLFn(RTLFn) {
8534     assert(CallArgs.size() == DoacrossFinArgs);
8535     std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
8536   }
8537   void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
8538     if (!CGF.HaveInsertPoint())
8539       return;
8540     CGF.EmitRuntimeCall(RTLFn, Args);
8541   }
8542 };
8543 } // namespace
8544 
8545 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
8546                                        const OMPLoopDirective &D) {
8547   if (!CGF.HaveInsertPoint())
8548     return;
8549 
8550   ASTContext &C = CGM.getContext();
8551   QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true);
8552   RecordDecl *RD;
8553   if (KmpDimTy.isNull()) {
8554     // Build struct kmp_dim {  // loop bounds info casted to kmp_int64
8555     //  kmp_int64 lo; // lower
8556     //  kmp_int64 up; // upper
8557     //  kmp_int64 st; // stride
8558     // };
8559     RD = C.buildImplicitRecord("kmp_dim");
8560     RD->startDefinition();
8561     addFieldToRecordDecl(C, RD, Int64Ty);
8562     addFieldToRecordDecl(C, RD, Int64Ty);
8563     addFieldToRecordDecl(C, RD, Int64Ty);
8564     RD->completeDefinition();
8565     KmpDimTy = C.getRecordType(RD);
8566   } else {
8567     RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
8568   }
8569 
8570   Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
8571   CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
8572   enum { LowerFD = 0, UpperFD, StrideFD };
8573   // Fill dims with data.
8574   LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy);
8575   // dims.upper = num_iterations;
8576   LValue UpperLVal =
8577       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD));
8578   llvm::Value *NumIterVal = CGF.EmitScalarConversion(
8579       CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(),
8580       Int64Ty, D.getNumIterations()->getExprLoc());
8581   CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
8582   // dims.stride = 1;
8583   LValue StrideLVal =
8584       CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD));
8585   CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
8586                         StrideLVal);
8587 
8588   // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
8589   // kmp_int32 num_dims, struct kmp_dim * dims);
8590   llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()),
8591                          getThreadID(CGF, D.getLocStart()),
8592                          llvm::ConstantInt::getSigned(CGM.Int32Ty, 1),
8593                          CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
8594                              DimsAddr.getPointer(), CGM.VoidPtrTy)};
8595 
8596   llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
8597   CGF.EmitRuntimeCall(RTLFn, Args);
8598   llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
8599       emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
8600   llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
8601   CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
8602                                              llvm::makeArrayRef(FiniArgs));
8603 }
8604 
8605 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
8606                                           const OMPDependClause *C) {
8607   QualType Int64Ty =
8608       CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
8609   const Expr *CounterVal = C->getCounterValue();
8610   assert(CounterVal);
8611   llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal),
8612                                                  CounterVal->getType(), Int64Ty,
8613                                                  CounterVal->getExprLoc());
8614   Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr");
8615   CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty);
8616   llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()),
8617                          getThreadID(CGF, C->getLocStart()),
8618                          CntAddr.getPointer()};
8619   llvm::Value *RTLFn;
8620   if (C->getDependencyKind() == OMPC_DEPEND_source) {
8621     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
8622   } else {
8623     assert(C->getDependencyKind() == OMPC_DEPEND_sink);
8624     RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait);
8625   }
8626   CGF.EmitRuntimeCall(RTLFn, Args);
8627 }
8628 
8629 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
8630                                llvm::Value *Callee,
8631                                ArrayRef<llvm::Value *> Args) const {
8632   assert(Loc.isValid() && "Outlined function call location must be valid.");
8633   auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
8634 
8635   if (auto *Fn = dyn_cast<llvm::Function>(Callee)) {
8636     if (Fn->doesNotThrow()) {
8637       CGF.EmitNounwindRuntimeCall(Fn, Args);
8638       return;
8639     }
8640   }
8641   CGF.EmitRuntimeCall(Callee, Args);
8642 }
8643 
8644 void CGOpenMPRuntime::emitOutlinedFunctionCall(
8645     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
8646     ArrayRef<llvm::Value *> Args) const {
8647   emitCall(CGF, Loc, OutlinedFn, Args);
8648 }
8649 
8650 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
8651                                              const VarDecl *NativeParam,
8652                                              const VarDecl *TargetParam) const {
8653   return CGF.GetAddrOfLocalVar(NativeParam);
8654 }
8655 
8656 Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
8657                                                    const VarDecl *VD) {
8658   return Address::invalid();
8659 }
8660 
8661 llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
8662     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8663     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8664   llvm_unreachable("Not supported in SIMD-only mode");
8665 }
8666 
8667 llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
8668     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8669     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
8670   llvm_unreachable("Not supported in SIMD-only mode");
8671 }
8672 
8673 llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction(
8674     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
8675     const VarDecl *PartIDVar, const VarDecl *TaskTVar,
8676     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
8677     bool Tied, unsigned &NumberOfParts) {
8678   llvm_unreachable("Not supported in SIMD-only mode");
8679 }
8680 
8681 void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
8682                                            SourceLocation Loc,
8683                                            llvm::Value *OutlinedFn,
8684                                            ArrayRef<llvm::Value *> CapturedVars,
8685                                            const Expr *IfCond) {
8686   llvm_unreachable("Not supported in SIMD-only mode");
8687 }
8688 
8689 void CGOpenMPSIMDRuntime::emitCriticalRegion(
8690     CodeGenFunction &CGF, StringRef CriticalName,
8691     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
8692     const Expr *Hint) {
8693   llvm_unreachable("Not supported in SIMD-only mode");
8694 }
8695 
8696 void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
8697                                            const RegionCodeGenTy &MasterOpGen,
8698                                            SourceLocation Loc) {
8699   llvm_unreachable("Not supported in SIMD-only mode");
8700 }
8701 
8702 void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
8703                                             SourceLocation Loc) {
8704   llvm_unreachable("Not supported in SIMD-only mode");
8705 }
8706 
8707 void CGOpenMPSIMDRuntime::emitTaskgroupRegion(
8708     CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen,
8709     SourceLocation Loc) {
8710   llvm_unreachable("Not supported in SIMD-only mode");
8711 }
8712 
8713 void CGOpenMPSIMDRuntime::emitSingleRegion(
8714     CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen,
8715     SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars,
8716     ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs,
8717     ArrayRef<const Expr *> AssignmentOps) {
8718   llvm_unreachable("Not supported in SIMD-only mode");
8719 }
8720 
8721 void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF,
8722                                             const RegionCodeGenTy &OrderedOpGen,
8723                                             SourceLocation Loc,
8724                                             bool IsThreads) {
8725   llvm_unreachable("Not supported in SIMD-only mode");
8726 }
8727 
8728 void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF,
8729                                           SourceLocation Loc,
8730                                           OpenMPDirectiveKind Kind,
8731                                           bool EmitChecks,
8732                                           bool ForceSimpleCall) {
8733   llvm_unreachable("Not supported in SIMD-only mode");
8734 }
8735 
8736 void CGOpenMPSIMDRuntime::emitForDispatchInit(
8737     CodeGenFunction &CGF, SourceLocation Loc,
8738     const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
8739     bool Ordered, const DispatchRTInput &DispatchValues) {
8740   llvm_unreachable("Not supported in SIMD-only mode");
8741 }
8742 
8743 void CGOpenMPSIMDRuntime::emitForStaticInit(
8744     CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind,
8745     const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) {
8746   llvm_unreachable("Not supported in SIMD-only mode");
8747 }
8748 
8749 void CGOpenMPSIMDRuntime::emitDistributeStaticInit(
8750     CodeGenFunction &CGF, SourceLocation Loc,
8751     OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) {
8752   llvm_unreachable("Not supported in SIMD-only mode");
8753 }
8754 
8755 void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
8756                                                      SourceLocation Loc,
8757                                                      unsigned IVSize,
8758                                                      bool IVSigned) {
8759   llvm_unreachable("Not supported in SIMD-only mode");
8760 }
8761 
8762 void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF,
8763                                               SourceLocation Loc,
8764                                               OpenMPDirectiveKind DKind) {
8765   llvm_unreachable("Not supported in SIMD-only mode");
8766 }
8767 
8768 llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF,
8769                                               SourceLocation Loc,
8770                                               unsigned IVSize, bool IVSigned,
8771                                               Address IL, Address LB,
8772                                               Address UB, Address ST) {
8773   llvm_unreachable("Not supported in SIMD-only mode");
8774 }
8775 
8776 void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
8777                                                llvm::Value *NumThreads,
8778                                                SourceLocation Loc) {
8779   llvm_unreachable("Not supported in SIMD-only mode");
8780 }
8781 
8782 void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF,
8783                                              OpenMPProcBindClauseKind ProcBind,
8784                                              SourceLocation Loc) {
8785   llvm_unreachable("Not supported in SIMD-only mode");
8786 }
8787 
8788 Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
8789                                                     const VarDecl *VD,
8790                                                     Address VDAddr,
8791                                                     SourceLocation Loc) {
8792   llvm_unreachable("Not supported in SIMD-only mode");
8793 }
8794 
8795 llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition(
8796     const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit,
8797     CodeGenFunction *CGF) {
8798   llvm_unreachable("Not supported in SIMD-only mode");
8799 }
8800 
8801 Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate(
8802     CodeGenFunction &CGF, QualType VarType, StringRef Name) {
8803   llvm_unreachable("Not supported in SIMD-only mode");
8804 }
8805 
8806 void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF,
8807                                     ArrayRef<const Expr *> Vars,
8808                                     SourceLocation Loc) {
8809   llvm_unreachable("Not supported in SIMD-only mode");
8810 }
8811 
8812 void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
8813                                        const OMPExecutableDirective &D,
8814                                        llvm::Value *TaskFunction,
8815                                        QualType SharedsTy, Address Shareds,
8816                                        const Expr *IfCond,
8817                                        const OMPTaskDataTy &Data) {
8818   llvm_unreachable("Not supported in SIMD-only mode");
8819 }
8820 
8821 void CGOpenMPSIMDRuntime::emitTaskLoopCall(
8822     CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D,
8823     llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds,
8824     const Expr *IfCond, const OMPTaskDataTy &Data) {
8825   llvm_unreachable("Not supported in SIMD-only mode");
8826 }
8827 
8828 void CGOpenMPSIMDRuntime::emitReduction(
8829     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
8830     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
8831     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
8832   assert(Options.SimpleReduction && "Only simple reduction is expected.");
8833   CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
8834                                  ReductionOps, Options);
8835 }
8836 
8837 llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit(
8838     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs,
8839     ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) {
8840   llvm_unreachable("Not supported in SIMD-only mode");
8841 }
8842 
8843 void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF,
8844                                                   SourceLocation Loc,
8845                                                   ReductionCodeGen &RCG,
8846                                                   unsigned N) {
8847   llvm_unreachable("Not supported in SIMD-only mode");
8848 }
8849 
8850 Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
8851                                                   SourceLocation Loc,
8852                                                   llvm::Value *ReductionsPtr,
8853                                                   LValue SharedLVal) {
8854   llvm_unreachable("Not supported in SIMD-only mode");
8855 }
8856 
8857 void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
8858                                            SourceLocation Loc) {
8859   llvm_unreachable("Not supported in SIMD-only mode");
8860 }
8861 
8862 void CGOpenMPSIMDRuntime::emitCancellationPointCall(
8863     CodeGenFunction &CGF, SourceLocation Loc,
8864     OpenMPDirectiveKind CancelRegion) {
8865   llvm_unreachable("Not supported in SIMD-only mode");
8866 }
8867 
8868 void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF,
8869                                          SourceLocation Loc, const Expr *IfCond,
8870                                          OpenMPDirectiveKind CancelRegion) {
8871   llvm_unreachable("Not supported in SIMD-only mode");
8872 }
8873 
8874 void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction(
8875     const OMPExecutableDirective &D, StringRef ParentName,
8876     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
8877     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
8878   llvm_unreachable("Not supported in SIMD-only mode");
8879 }
8880 
8881 void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF,
8882                                          const OMPExecutableDirective &D,
8883                                          llvm::Value *OutlinedFn,
8884                                          llvm::Value *OutlinedFnID,
8885                                          const Expr *IfCond, const Expr *Device) {
8886   llvm_unreachable("Not supported in SIMD-only mode");
8887 }
8888 
8889 bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) {
8890   llvm_unreachable("Not supported in SIMD-only mode");
8891 }
8892 
8893 bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
8894   llvm_unreachable("Not supported in SIMD-only mode");
8895 }
8896 
8897 bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) {
8898   return false;
8899 }
8900 
8901 llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() {
8902   return nullptr;
8903 }
8904 
8905 void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF,
8906                                         const OMPExecutableDirective &D,
8907                                         SourceLocation Loc,
8908                                         llvm::Value *OutlinedFn,
8909                                         ArrayRef<llvm::Value *> CapturedVars) {
8910   llvm_unreachable("Not supported in SIMD-only mode");
8911 }
8912 
8913 void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
8914                                              const Expr *NumTeams,
8915                                              const Expr *ThreadLimit,
8916                                              SourceLocation Loc) {
8917   llvm_unreachable("Not supported in SIMD-only mode");
8918 }
8919 
8920 void CGOpenMPSIMDRuntime::emitTargetDataCalls(
8921     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8922     const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
8923   llvm_unreachable("Not supported in SIMD-only mode");
8924 }
8925 
8926 void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
8927     CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
8928     const Expr *Device) {
8929   llvm_unreachable("Not supported in SIMD-only mode");
8930 }
8931 
8932 void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
8933                                            const OMPLoopDirective &D) {
8934   llvm_unreachable("Not supported in SIMD-only mode");
8935 }
8936 
8937 void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
8938                                               const OMPDependClause *C) {
8939   llvm_unreachable("Not supported in SIMD-only mode");
8940 }
8941 
8942 const VarDecl *
8943 CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
8944                                         const VarDecl *NativeParam) const {
8945   llvm_unreachable("Not supported in SIMD-only mode");
8946 }
8947 
8948 Address
8949 CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF,
8950                                          const VarDecl *NativeParam,
8951                                          const VarDecl *TargetParam) const {
8952   llvm_unreachable("Not supported in SIMD-only mode");
8953 }
8954 
8955