1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // OpenMP specific optimizations:
10 //
11 // - Deduplication of runtime calls, e.g., omp_get_thread_num.
12 // - Replacing globalized device memory with stack memory.
13 // - Replacing globalized device memory with shared memory.
14 // - Parallel region merging.
15 // - Transforming generic-mode device kernels to SPMD mode.
16 // - Specializing the state machine for generic-mode device kernels.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/IPO/OpenMPOpt.h"
21 
22 #include "llvm/ADT/EnumeratedArray.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/CallGraph.h"
28 #include "llvm/Analysis/CallGraphSCCPass.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Frontend/OpenMP/OMPConstants.h"
33 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DiagnosticInfo.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/IntrinsicsAMDGPU.h"
43 #include "llvm/IR/IntrinsicsNVPTX.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Transforms/IPO.h"
49 #include "llvm/Transforms/IPO/Attributor.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
52 #include "llvm/Transforms/Utils/CodeExtractor.h"
53 
54 #include <algorithm>
55 
56 using namespace llvm;
57 using namespace omp;
58 
59 #define DEBUG_TYPE "openmp-opt"
60 
61 static cl::opt<bool> DisableOpenMPOptimizations(
62     "openmp-opt-disable", cl::ZeroOrMore,
63     cl::desc("Disable OpenMP specific optimizations."), cl::Hidden,
64     cl::init(false));
65 
66 static cl::opt<bool> EnableParallelRegionMerging(
67     "openmp-opt-enable-merging", cl::ZeroOrMore,
68     cl::desc("Enable the OpenMP region merging optimization."), cl::Hidden,
69     cl::init(false));
70 
71 static cl::opt<bool>
72     DisableInternalization("openmp-opt-disable-internalization", cl::ZeroOrMore,
73                            cl::desc("Disable function internalization."),
74                            cl::Hidden, cl::init(false));
75 
76 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false),
77                                     cl::Hidden);
78 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels",
79                                         cl::init(false), cl::Hidden);
80 
81 static cl::opt<bool> HideMemoryTransferLatency(
82     "openmp-hide-memory-transfer-latency",
83     cl::desc("[WIP] Tries to hide the latency of host to device memory"
84              " transfers"),
85     cl::Hidden, cl::init(false));
86 
87 static cl::opt<bool> DisableOpenMPOptDeglobalization(
88     "openmp-opt-disable-deglobalization", cl::ZeroOrMore,
89     cl::desc("Disable OpenMP optimizations involving deglobalization."),
90     cl::Hidden, cl::init(false));
91 
92 static cl::opt<bool> DisableOpenMPOptSPMDization(
93     "openmp-opt-disable-spmdization", cl::ZeroOrMore,
94     cl::desc("Disable OpenMP optimizations involving SPMD-ization."),
95     cl::Hidden, cl::init(false));
96 
97 static cl::opt<bool> DisableOpenMPOptFolding(
98     "openmp-opt-disable-folding", cl::ZeroOrMore,
99     cl::desc("Disable OpenMP optimizations involving folding."), cl::Hidden,
100     cl::init(false));
101 
102 static cl::opt<bool> DisableOpenMPOptStateMachineRewrite(
103     "openmp-opt-disable-state-machine-rewrite", cl::ZeroOrMore,
104     cl::desc("Disable OpenMP optimizations that replace the state machine."),
105     cl::Hidden, cl::init(false));
106 
107 static cl::opt<bool> DisableOpenMPOptBarrierElimination(
108     "openmp-opt-disable-barrier-elimination", cl::ZeroOrMore,
109     cl::desc("Disable OpenMP optimizations that eliminate barriers."),
110     cl::Hidden, cl::init(false));
111 
112 static cl::opt<bool> PrintModuleAfterOptimizations(
113     "openmp-opt-print-module-after", cl::ZeroOrMore,
114     cl::desc("Print the current module after OpenMP optimizations."),
115     cl::Hidden, cl::init(false));
116 
117 static cl::opt<bool> PrintModuleBeforeOptimizations(
118     "openmp-opt-print-module-before", cl::ZeroOrMore,
119     cl::desc("Print the current module before OpenMP optimizations."),
120     cl::Hidden, cl::init(false));
121 
122 static cl::opt<bool> AlwaysInlineDeviceFunctions(
123     "openmp-opt-inline-device", cl::ZeroOrMore,
124     cl::desc("Inline all applicible functions on the device."), cl::Hidden,
125     cl::init(false));
126 
127 static cl::opt<bool>
128     EnableVerboseRemarks("openmp-opt-verbose-remarks", cl::ZeroOrMore,
129                          cl::desc("Enables more verbose remarks."), cl::Hidden,
130                          cl::init(false));
131 
132 static cl::opt<unsigned>
133     SetFixpointIterations("openmp-opt-max-iterations", cl::Hidden,
134                           cl::desc("Maximal number of attributor iterations."),
135                           cl::init(256));
136 
137 static cl::opt<unsigned>
138     SharedMemoryLimit("openmp-opt-shared-limit", cl::Hidden,
139                       cl::desc("Maximum amount of shared memory to use."),
140                       cl::init(std::numeric_limits<unsigned>::max()));
141 
142 STATISTIC(NumOpenMPRuntimeCallsDeduplicated,
143           "Number of OpenMP runtime calls deduplicated");
144 STATISTIC(NumOpenMPParallelRegionsDeleted,
145           "Number of OpenMP parallel regions deleted");
146 STATISTIC(NumOpenMPRuntimeFunctionsIdentified,
147           "Number of OpenMP runtime functions identified");
148 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified,
149           "Number of OpenMP runtime function uses identified");
150 STATISTIC(NumOpenMPTargetRegionKernels,
151           "Number of OpenMP target region entry points (=kernels) identified");
152 STATISTIC(NumOpenMPTargetRegionKernelsSPMD,
153           "Number of OpenMP target region entry points (=kernels) executed in "
154           "SPMD-mode instead of generic-mode");
155 STATISTIC(NumOpenMPTargetRegionKernelsWithoutStateMachine,
156           "Number of OpenMP target region entry points (=kernels) executed in "
157           "generic-mode without a state machines");
158 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback,
159           "Number of OpenMP target region entry points (=kernels) executed in "
160           "generic-mode with customized state machines with fallback");
161 STATISTIC(NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback,
162           "Number of OpenMP target region entry points (=kernels) executed in "
163           "generic-mode with customized state machines without fallback");
164 STATISTIC(
165     NumOpenMPParallelRegionsReplacedInGPUStateMachine,
166     "Number of OpenMP parallel regions replaced with ID in GPU state machines");
167 STATISTIC(NumOpenMPParallelRegionsMerged,
168           "Number of OpenMP parallel regions merged");
169 STATISTIC(NumBytesMovedToSharedMemory,
170           "Amount of memory pushed to shared memory");
171 STATISTIC(NumBarriersEliminated, "Number of redundant barriers eliminated");
172 
173 #if !defined(NDEBUG)
174 static constexpr auto TAG = "[" DEBUG_TYPE "]";
175 #endif
176 
177 namespace {
178 
179 struct AAHeapToShared;
180 
181 struct AAICVTracker;
182 
183 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for
184 /// Attributor runs.
185 struct OMPInformationCache : public InformationCache {
186   OMPInformationCache(Module &M, AnalysisGetter &AG,
187                       BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
188                       KernelSet &Kernels)
189       : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
190         Kernels(Kernels) {
191 
192     OMPBuilder.initialize();
193     initializeRuntimeFunctions();
194     initializeInternalControlVars();
195   }
196 
197   /// Generic information that describes an internal control variable.
198   struct InternalControlVarInfo {
199     /// The kind, as described by InternalControlVar enum.
200     InternalControlVar Kind;
201 
202     /// The name of the ICV.
203     StringRef Name;
204 
205     /// Environment variable associated with this ICV.
206     StringRef EnvVarName;
207 
208     /// Initial value kind.
209     ICVInitValue InitKind;
210 
211     /// Initial value.
212     ConstantInt *InitValue;
213 
214     /// Setter RTL function associated with this ICV.
215     RuntimeFunction Setter;
216 
217     /// Getter RTL function associated with this ICV.
218     RuntimeFunction Getter;
219 
220     /// RTL Function corresponding to the override clause of this ICV
221     RuntimeFunction Clause;
222   };
223 
224   /// Generic information that describes a runtime function
225   struct RuntimeFunctionInfo {
226 
227     /// The kind, as described by the RuntimeFunction enum.
228     RuntimeFunction Kind;
229 
230     /// The name of the function.
231     StringRef Name;
232 
233     /// Flag to indicate a variadic function.
234     bool IsVarArg;
235 
236     /// The return type of the function.
237     Type *ReturnType;
238 
239     /// The argument types of the function.
240     SmallVector<Type *, 8> ArgumentTypes;
241 
242     /// The declaration if available.
243     Function *Declaration = nullptr;
244 
245     /// Uses of this runtime function per function containing the use.
246     using UseVector = SmallVector<Use *, 16>;
247 
248     /// Clear UsesMap for runtime function.
249     void clearUsesMap() { UsesMap.clear(); }
250 
251     /// Boolean conversion that is true if the runtime function was found.
252     operator bool() const { return Declaration; }
253 
254     /// Return the vector of uses in function \p F.
255     UseVector &getOrCreateUseVector(Function *F) {
256       std::shared_ptr<UseVector> &UV = UsesMap[F];
257       if (!UV)
258         UV = std::make_shared<UseVector>();
259       return *UV;
260     }
261 
262     /// Return the vector of uses in function \p F or `nullptr` if there are
263     /// none.
264     const UseVector *getUseVector(Function &F) const {
265       auto I = UsesMap.find(&F);
266       if (I != UsesMap.end())
267         return I->second.get();
268       return nullptr;
269     }
270 
271     /// Return how many functions contain uses of this runtime function.
272     size_t getNumFunctionsWithUses() const { return UsesMap.size(); }
273 
274     /// Return the number of arguments (or the minimal number for variadic
275     /// functions).
276     size_t getNumArgs() const { return ArgumentTypes.size(); }
277 
278     /// Run the callback \p CB on each use and forget the use if the result is
279     /// true. The callback will be fed the function in which the use was
280     /// encountered as second argument.
281     void foreachUse(SmallVectorImpl<Function *> &SCC,
282                     function_ref<bool(Use &, Function &)> CB) {
283       for (Function *F : SCC)
284         foreachUse(CB, F);
285     }
286 
287     /// Run the callback \p CB on each use within the function \p F and forget
288     /// the use if the result is true.
289     void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) {
290       SmallVector<unsigned, 8> ToBeDeleted;
291       ToBeDeleted.clear();
292 
293       unsigned Idx = 0;
294       UseVector &UV = getOrCreateUseVector(F);
295 
296       for (Use *U : UV) {
297         if (CB(*U, *F))
298           ToBeDeleted.push_back(Idx);
299         ++Idx;
300       }
301 
302       // Remove the to-be-deleted indices in reverse order as prior
303       // modifications will not modify the smaller indices.
304       while (!ToBeDeleted.empty()) {
305         unsigned Idx = ToBeDeleted.pop_back_val();
306         UV[Idx] = UV.back();
307         UV.pop_back();
308       }
309     }
310 
311   private:
312     /// Map from functions to all uses of this runtime function contained in
313     /// them.
314     DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap;
315 
316   public:
317     /// Iterators for the uses of this runtime function.
318     decltype(UsesMap)::iterator begin() { return UsesMap.begin(); }
319     decltype(UsesMap)::iterator end() { return UsesMap.end(); }
320   };
321 
322   /// An OpenMP-IR-Builder instance
323   OpenMPIRBuilder OMPBuilder;
324 
325   /// Map from runtime function kind to the runtime function description.
326   EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction,
327                   RuntimeFunction::OMPRTL___last>
328       RFIs;
329 
330   /// Map from function declarations/definitions to their runtime enum type.
331   DenseMap<Function *, RuntimeFunction> RuntimeFunctionIDMap;
332 
333   /// Map from ICV kind to the ICV description.
334   EnumeratedArray<InternalControlVarInfo, InternalControlVar,
335                   InternalControlVar::ICV___last>
336       ICVs;
337 
338   /// Helper to initialize all internal control variable information for those
339   /// defined in OMPKinds.def.
340   void initializeInternalControlVars() {
341 #define ICV_RT_SET(_Name, RTL)                                                 \
342   {                                                                            \
343     auto &ICV = ICVs[_Name];                                                   \
344     ICV.Setter = RTL;                                                          \
345   }
346 #define ICV_RT_GET(Name, RTL)                                                  \
347   {                                                                            \
348     auto &ICV = ICVs[Name];                                                    \
349     ICV.Getter = RTL;                                                          \
350   }
351 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init)                           \
352   {                                                                            \
353     auto &ICV = ICVs[Enum];                                                    \
354     ICV.Name = _Name;                                                          \
355     ICV.Kind = Enum;                                                           \
356     ICV.InitKind = Init;                                                       \
357     ICV.EnvVarName = _EnvVarName;                                              \
358     switch (ICV.InitKind) {                                                    \
359     case ICV_IMPLEMENTATION_DEFINED:                                           \
360       ICV.InitValue = nullptr;                                                 \
361       break;                                                                   \
362     case ICV_ZERO:                                                             \
363       ICV.InitValue = ConstantInt::get(                                        \
364           Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0);                \
365       break;                                                                   \
366     case ICV_FALSE:                                                            \
367       ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext());    \
368       break;                                                                   \
369     case ICV_LAST:                                                             \
370       break;                                                                   \
371     }                                                                          \
372   }
373 #include "llvm/Frontend/OpenMP/OMPKinds.def"
374   }
375 
376   /// Returns true if the function declaration \p F matches the runtime
377   /// function types, that is, return type \p RTFRetType, and argument types
378   /// \p RTFArgTypes.
379   static bool declMatchesRTFTypes(Function *F, Type *RTFRetType,
380                                   SmallVector<Type *, 8> &RTFArgTypes) {
381     // TODO: We should output information to the user (under debug output
382     //       and via remarks).
383 
384     if (!F)
385       return false;
386     if (F->getReturnType() != RTFRetType)
387       return false;
388     if (F->arg_size() != RTFArgTypes.size())
389       return false;
390 
391     auto *RTFTyIt = RTFArgTypes.begin();
392     for (Argument &Arg : F->args()) {
393       if (Arg.getType() != *RTFTyIt)
394         return false;
395 
396       ++RTFTyIt;
397     }
398 
399     return true;
400   }
401 
402   // Helper to collect all uses of the declaration in the UsesMap.
403   unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) {
404     unsigned NumUses = 0;
405     if (!RFI.Declaration)
406       return NumUses;
407     OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration);
408 
409     if (CollectStats) {
410       NumOpenMPRuntimeFunctionsIdentified += 1;
411       NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses();
412     }
413 
414     // TODO: We directly convert uses into proper calls and unknown uses.
415     for (Use &U : RFI.Declaration->uses()) {
416       if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) {
417         if (ModuleSlice.count(UserI->getFunction())) {
418           RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U);
419           ++NumUses;
420         }
421       } else {
422         RFI.getOrCreateUseVector(nullptr).push_back(&U);
423         ++NumUses;
424       }
425     }
426     return NumUses;
427   }
428 
429   // Helper function to recollect uses of a runtime function.
430   void recollectUsesForFunction(RuntimeFunction RTF) {
431     auto &RFI = RFIs[RTF];
432     RFI.clearUsesMap();
433     collectUses(RFI, /*CollectStats*/ false);
434   }
435 
436   // Helper function to recollect uses of all runtime functions.
437   void recollectUses() {
438     for (int Idx = 0; Idx < RFIs.size(); ++Idx)
439       recollectUsesForFunction(static_cast<RuntimeFunction>(Idx));
440   }
441 
442   // Helper function to inherit the calling convention of the function callee.
443   void setCallingConvention(FunctionCallee Callee, CallInst *CI) {
444     if (Function *Fn = dyn_cast<Function>(Callee.getCallee()))
445       CI->setCallingConv(Fn->getCallingConv());
446   }
447 
448   /// Helper to initialize all runtime function information for those defined
449   /// in OpenMPKinds.def.
450   void initializeRuntimeFunctions() {
451     Module &M = *((*ModuleSlice.begin())->getParent());
452 
453     // Helper macros for handling __VA_ARGS__ in OMP_RTL
454 #define OMP_TYPE(VarName, ...)                                                 \
455   Type *VarName = OMPBuilder.VarName;                                          \
456   (void)VarName;
457 
458 #define OMP_ARRAY_TYPE(VarName, ...)                                           \
459   ArrayType *VarName##Ty = OMPBuilder.VarName##Ty;                             \
460   (void)VarName##Ty;                                                           \
461   PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy;                     \
462   (void)VarName##PtrTy;
463 
464 #define OMP_FUNCTION_TYPE(VarName, ...)                                        \
465   FunctionType *VarName = OMPBuilder.VarName;                                  \
466   (void)VarName;                                                               \
467   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
468   (void)VarName##Ptr;
469 
470 #define OMP_STRUCT_TYPE(VarName, ...)                                          \
471   StructType *VarName = OMPBuilder.VarName;                                    \
472   (void)VarName;                                                               \
473   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
474   (void)VarName##Ptr;
475 
476 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...)                     \
477   {                                                                            \
478     SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__});                           \
479     Function *F = M.getFunction(_Name);                                        \
480     RTLFunctions.insert(F);                                                    \
481     if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) {           \
482       RuntimeFunctionIDMap[F] = _Enum;                                         \
483       auto &RFI = RFIs[_Enum];                                                 \
484       RFI.Kind = _Enum;                                                        \
485       RFI.Name = _Name;                                                        \
486       RFI.IsVarArg = _IsVarArg;                                                \
487       RFI.ReturnType = OMPBuilder._ReturnType;                                 \
488       RFI.ArgumentTypes = std::move(ArgsTypes);                                \
489       RFI.Declaration = F;                                                     \
490       unsigned NumUses = collectUses(RFI);                                     \
491       (void)NumUses;                                                           \
492       LLVM_DEBUG({                                                             \
493         dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not")           \
494                << " found\n";                                                  \
495         if (RFI.Declaration)                                                   \
496           dbgs() << TAG << "-> got " << NumUses << " uses in "                 \
497                  << RFI.getNumFunctionsWithUses()                              \
498                  << " different functions.\n";                                 \
499       });                                                                      \
500     }                                                                          \
501   }
502 #include "llvm/Frontend/OpenMP/OMPKinds.def"
503 
504     // Remove the `noinline` attribute from `__kmpc`, `_OMP::` and `omp_`
505     // functions, except if `optnone` is present.
506     for (Function &F : M) {
507       for (StringRef Prefix : {"__kmpc", "_ZN4_OMP", "omp_"})
508         if (F.getName().startswith(Prefix) &&
509             !F.hasFnAttribute(Attribute::OptimizeNone))
510           F.removeFnAttr(Attribute::NoInline);
511     }
512 
513     // TODO: We should attach the attributes defined in OMPKinds.def.
514   }
515 
516   /// Collection of known kernels (\see Kernel) in the module.
517   KernelSet &Kernels;
518 
519   /// Collection of known OpenMP runtime functions..
520   DenseSet<const Function *> RTLFunctions;
521 };
522 
523 template <typename Ty, bool InsertInvalidates = true>
524 struct BooleanStateWithSetVector : public BooleanState {
525   bool contains(const Ty &Elem) const { return Set.contains(Elem); }
526   bool insert(const Ty &Elem) {
527     if (InsertInvalidates)
528       BooleanState::indicatePessimisticFixpoint();
529     return Set.insert(Elem);
530   }
531 
532   const Ty &operator[](int Idx) const { return Set[Idx]; }
533   bool operator==(const BooleanStateWithSetVector &RHS) const {
534     return BooleanState::operator==(RHS) && Set == RHS.Set;
535   }
536   bool operator!=(const BooleanStateWithSetVector &RHS) const {
537     return !(*this == RHS);
538   }
539 
540   bool empty() const { return Set.empty(); }
541   size_t size() const { return Set.size(); }
542 
543   /// "Clamp" this state with \p RHS.
544   BooleanStateWithSetVector &operator^=(const BooleanStateWithSetVector &RHS) {
545     BooleanState::operator^=(RHS);
546     Set.insert(RHS.Set.begin(), RHS.Set.end());
547     return *this;
548   }
549 
550 private:
551   /// A set to keep track of elements.
552   SetVector<Ty> Set;
553 
554 public:
555   typename decltype(Set)::iterator begin() { return Set.begin(); }
556   typename decltype(Set)::iterator end() { return Set.end(); }
557   typename decltype(Set)::const_iterator begin() const { return Set.begin(); }
558   typename decltype(Set)::const_iterator end() const { return Set.end(); }
559 };
560 
561 template <typename Ty, bool InsertInvalidates = true>
562 using BooleanStateWithPtrSetVector =
563     BooleanStateWithSetVector<Ty *, InsertInvalidates>;
564 
565 struct KernelInfoState : AbstractState {
566   /// Flag to track if we reached a fixpoint.
567   bool IsAtFixpoint = false;
568 
569   /// The parallel regions (identified by the outlined parallel functions) that
570   /// can be reached from the associated function.
571   BooleanStateWithPtrSetVector<Function, /* InsertInvalidates */ false>
572       ReachedKnownParallelRegions;
573 
574   /// State to track what parallel region we might reach.
575   BooleanStateWithPtrSetVector<CallBase> ReachedUnknownParallelRegions;
576 
577   /// State to track if we are in SPMD-mode, assumed or know, and why we decided
578   /// we cannot be. If it is assumed, then RequiresFullRuntime should also be
579   /// false.
580   BooleanStateWithPtrSetVector<Instruction, false> SPMDCompatibilityTracker;
581 
582   /// The __kmpc_target_init call in this kernel, if any. If we find more than
583   /// one we abort as the kernel is malformed.
584   CallBase *KernelInitCB = nullptr;
585 
586   /// The __kmpc_target_deinit call in this kernel, if any. If we find more than
587   /// one we abort as the kernel is malformed.
588   CallBase *KernelDeinitCB = nullptr;
589 
590   /// Flag to indicate if the associated function is a kernel entry.
591   bool IsKernelEntry = false;
592 
593   /// State to track what kernel entries can reach the associated function.
594   BooleanStateWithPtrSetVector<Function, false> ReachingKernelEntries;
595 
596   /// State to indicate if we can track parallel level of the associated
597   /// function. We will give up tracking if we encounter unknown caller or the
598   /// caller is __kmpc_parallel_51.
599   BooleanStateWithSetVector<uint8_t> ParallelLevels;
600 
601   /// Abstract State interface
602   ///{
603 
604   KernelInfoState() = default;
605   KernelInfoState(bool BestState) {
606     if (!BestState)
607       indicatePessimisticFixpoint();
608   }
609 
610   /// See AbstractState::isValidState(...)
611   bool isValidState() const override { return true; }
612 
613   /// See AbstractState::isAtFixpoint(...)
614   bool isAtFixpoint() const override { return IsAtFixpoint; }
615 
616   /// See AbstractState::indicatePessimisticFixpoint(...)
617   ChangeStatus indicatePessimisticFixpoint() override {
618     IsAtFixpoint = true;
619     ReachingKernelEntries.indicatePessimisticFixpoint();
620     SPMDCompatibilityTracker.indicatePessimisticFixpoint();
621     ReachedKnownParallelRegions.indicatePessimisticFixpoint();
622     ReachedUnknownParallelRegions.indicatePessimisticFixpoint();
623     return ChangeStatus::CHANGED;
624   }
625 
626   /// See AbstractState::indicateOptimisticFixpoint(...)
627   ChangeStatus indicateOptimisticFixpoint() override {
628     IsAtFixpoint = true;
629     ReachingKernelEntries.indicateOptimisticFixpoint();
630     SPMDCompatibilityTracker.indicateOptimisticFixpoint();
631     ReachedKnownParallelRegions.indicateOptimisticFixpoint();
632     ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
633     return ChangeStatus::UNCHANGED;
634   }
635 
636   /// Return the assumed state
637   KernelInfoState &getAssumed() { return *this; }
638   const KernelInfoState &getAssumed() const { return *this; }
639 
640   bool operator==(const KernelInfoState &RHS) const {
641     if (SPMDCompatibilityTracker != RHS.SPMDCompatibilityTracker)
642       return false;
643     if (ReachedKnownParallelRegions != RHS.ReachedKnownParallelRegions)
644       return false;
645     if (ReachedUnknownParallelRegions != RHS.ReachedUnknownParallelRegions)
646       return false;
647     if (ReachingKernelEntries != RHS.ReachingKernelEntries)
648       return false;
649     return true;
650   }
651 
652   /// Returns true if this kernel contains any OpenMP parallel regions.
653   bool mayContainParallelRegion() {
654     return !ReachedKnownParallelRegions.empty() ||
655            !ReachedUnknownParallelRegions.empty();
656   }
657 
658   /// Return empty set as the best state of potential values.
659   static KernelInfoState getBestState() { return KernelInfoState(true); }
660 
661   static KernelInfoState getBestState(KernelInfoState &KIS) {
662     return getBestState();
663   }
664 
665   /// Return full set as the worst state of potential values.
666   static KernelInfoState getWorstState() { return KernelInfoState(false); }
667 
668   /// "Clamp" this state with \p KIS.
669   KernelInfoState operator^=(const KernelInfoState &KIS) {
670     // Do not merge two different _init and _deinit call sites.
671     if (KIS.KernelInitCB) {
672       if (KernelInitCB && KernelInitCB != KIS.KernelInitCB)
673         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
674                          "assumptions.");
675       KernelInitCB = KIS.KernelInitCB;
676     }
677     if (KIS.KernelDeinitCB) {
678       if (KernelDeinitCB && KernelDeinitCB != KIS.KernelDeinitCB)
679         llvm_unreachable("Kernel that calls another kernel violates OpenMP-Opt "
680                          "assumptions.");
681       KernelDeinitCB = KIS.KernelDeinitCB;
682     }
683     SPMDCompatibilityTracker ^= KIS.SPMDCompatibilityTracker;
684     ReachedKnownParallelRegions ^= KIS.ReachedKnownParallelRegions;
685     ReachedUnknownParallelRegions ^= KIS.ReachedUnknownParallelRegions;
686     return *this;
687   }
688 
689   KernelInfoState operator&=(const KernelInfoState &KIS) {
690     return (*this ^= KIS);
691   }
692 
693   ///}
694 };
695 
696 /// Used to map the values physically (in the IR) stored in an offload
697 /// array, to a vector in memory.
698 struct OffloadArray {
699   /// Physical array (in the IR).
700   AllocaInst *Array = nullptr;
701   /// Mapped values.
702   SmallVector<Value *, 8> StoredValues;
703   /// Last stores made in the offload array.
704   SmallVector<StoreInst *, 8> LastAccesses;
705 
706   OffloadArray() = default;
707 
708   /// Initializes the OffloadArray with the values stored in \p Array before
709   /// instruction \p Before is reached. Returns false if the initialization
710   /// fails.
711   /// This MUST be used immediately after the construction of the object.
712   bool initialize(AllocaInst &Array, Instruction &Before) {
713     if (!Array.getAllocatedType()->isArrayTy())
714       return false;
715 
716     if (!getValues(Array, Before))
717       return false;
718 
719     this->Array = &Array;
720     return true;
721   }
722 
723   static const unsigned DeviceIDArgNum = 1;
724   static const unsigned BasePtrsArgNum = 3;
725   static const unsigned PtrsArgNum = 4;
726   static const unsigned SizesArgNum = 5;
727 
728 private:
729   /// Traverses the BasicBlock where \p Array is, collecting the stores made to
730   /// \p Array, leaving StoredValues with the values stored before the
731   /// instruction \p Before is reached.
732   bool getValues(AllocaInst &Array, Instruction &Before) {
733     // Initialize container.
734     const uint64_t NumValues = Array.getAllocatedType()->getArrayNumElements();
735     StoredValues.assign(NumValues, nullptr);
736     LastAccesses.assign(NumValues, nullptr);
737 
738     // TODO: This assumes the instruction \p Before is in the same
739     //  BasicBlock as Array. Make it general, for any control flow graph.
740     BasicBlock *BB = Array.getParent();
741     if (BB != Before.getParent())
742       return false;
743 
744     const DataLayout &DL = Array.getModule()->getDataLayout();
745     const unsigned int PointerSize = DL.getPointerSize();
746 
747     for (Instruction &I : *BB) {
748       if (&I == &Before)
749         break;
750 
751       if (!isa<StoreInst>(&I))
752         continue;
753 
754       auto *S = cast<StoreInst>(&I);
755       int64_t Offset = -1;
756       auto *Dst =
757           GetPointerBaseWithConstantOffset(S->getPointerOperand(), Offset, DL);
758       if (Dst == &Array) {
759         int64_t Idx = Offset / PointerSize;
760         StoredValues[Idx] = getUnderlyingObject(S->getValueOperand());
761         LastAccesses[Idx] = S;
762       }
763     }
764 
765     return isFilled();
766   }
767 
768   /// Returns true if all values in StoredValues and
769   /// LastAccesses are not nullptrs.
770   bool isFilled() {
771     const unsigned NumValues = StoredValues.size();
772     for (unsigned I = 0; I < NumValues; ++I) {
773       if (!StoredValues[I] || !LastAccesses[I])
774         return false;
775     }
776 
777     return true;
778   }
779 };
780 
781 struct OpenMPOpt {
782 
783   using OptimizationRemarkGetter =
784       function_ref<OptimizationRemarkEmitter &(Function *)>;
785 
786   OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater,
787             OptimizationRemarkGetter OREGetter,
788             OMPInformationCache &OMPInfoCache, Attributor &A)
789       : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater),
790         OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {}
791 
792   /// Check if any remarks are enabled for openmp-opt
793   bool remarksEnabled() {
794     auto &Ctx = M.getContext();
795     return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE);
796   }
797 
798   /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice.
799   bool run(bool IsModulePass) {
800     if (SCC.empty())
801       return false;
802 
803     bool Changed = false;
804 
805     LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size()
806                       << " functions in a slice with "
807                       << OMPInfoCache.ModuleSlice.size() << " functions\n");
808 
809     if (IsModulePass) {
810       Changed |= runAttributor(IsModulePass);
811 
812       // Recollect uses, in case Attributor deleted any.
813       OMPInfoCache.recollectUses();
814 
815       // TODO: This should be folded into buildCustomStateMachine.
816       Changed |= rewriteDeviceCodeStateMachine();
817 
818       if (remarksEnabled())
819         analysisGlobalization();
820 
821       Changed |= eliminateBarriers();
822     } else {
823       if (PrintICVValues)
824         printICVs();
825       if (PrintOpenMPKernels)
826         printKernels();
827 
828       Changed |= runAttributor(IsModulePass);
829 
830       // Recollect uses, in case Attributor deleted any.
831       OMPInfoCache.recollectUses();
832 
833       Changed |= deleteParallelRegions();
834 
835       if (HideMemoryTransferLatency)
836         Changed |= hideMemTransfersLatency();
837       Changed |= deduplicateRuntimeCalls();
838       if (EnableParallelRegionMerging) {
839         if (mergeParallelRegions()) {
840           deduplicateRuntimeCalls();
841           Changed = true;
842         }
843       }
844 
845       Changed |= eliminateBarriers();
846     }
847 
848     return Changed;
849   }
850 
851   /// Print initial ICV values for testing.
852   /// FIXME: This should be done from the Attributor once it is added.
853   void printICVs() const {
854     InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel,
855                                  ICV_proc_bind};
856 
857     for (Function *F : OMPInfoCache.ModuleSlice) {
858       for (auto ICV : ICVs) {
859         auto ICVInfo = OMPInfoCache.ICVs[ICV];
860         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
861           return ORA << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name)
862                      << " Value: "
863                      << (ICVInfo.InitValue
864                              ? toString(ICVInfo.InitValue->getValue(), 10, true)
865                              : "IMPLEMENTATION_DEFINED");
866         };
867 
868         emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPICVTracker", Remark);
869       }
870     }
871   }
872 
873   /// Print OpenMP GPU kernels for testing.
874   void printKernels() const {
875     for (Function *F : SCC) {
876       if (!OMPInfoCache.Kernels.count(F))
877         continue;
878 
879       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
880         return ORA << "OpenMP GPU kernel "
881                    << ore::NV("OpenMPGPUKernel", F->getName()) << "\n";
882       };
883 
884       emitRemark<OptimizationRemarkAnalysis>(F, "OpenMPGPU", Remark);
885     }
886   }
887 
888   /// Return the call if \p U is a callee use in a regular call. If \p RFI is
889   /// given it has to be the callee or a nullptr is returned.
890   static CallInst *getCallIfRegularCall(
891       Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
892     CallInst *CI = dyn_cast<CallInst>(U.getUser());
893     if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() &&
894         (!RFI ||
895          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
896       return CI;
897     return nullptr;
898   }
899 
900   /// Return the call if \p V is a regular call. If \p RFI is given it has to be
901   /// the callee or a nullptr is returned.
902   static CallInst *getCallIfRegularCall(
903       Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
904     CallInst *CI = dyn_cast<CallInst>(&V);
905     if (CI && !CI->hasOperandBundles() &&
906         (!RFI ||
907          (RFI->Declaration && CI->getCalledFunction() == RFI->Declaration)))
908       return CI;
909     return nullptr;
910   }
911 
912 private:
913   /// Merge parallel regions when it is safe.
914   bool mergeParallelRegions() {
915     const unsigned CallbackCalleeOperand = 2;
916     const unsigned CallbackFirstArgOperand = 3;
917     using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
918 
919     // Check if there are any __kmpc_fork_call calls to merge.
920     OMPInformationCache::RuntimeFunctionInfo &RFI =
921         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
922 
923     if (!RFI.Declaration)
924       return false;
925 
926     // Unmergable calls that prevent merging a parallel region.
927     OMPInformationCache::RuntimeFunctionInfo UnmergableCallsInfo[] = {
928         OMPInfoCache.RFIs[OMPRTL___kmpc_push_proc_bind],
929         OMPInfoCache.RFIs[OMPRTL___kmpc_push_num_threads],
930     };
931 
932     bool Changed = false;
933     LoopInfo *LI = nullptr;
934     DominatorTree *DT = nullptr;
935 
936     SmallDenseMap<BasicBlock *, SmallPtrSet<Instruction *, 4>> BB2PRMap;
937 
938     BasicBlock *StartBB = nullptr, *EndBB = nullptr;
939     auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
940                          BasicBlock &ContinuationIP) {
941       BasicBlock *CGStartBB = CodeGenIP.getBlock();
942       BasicBlock *CGEndBB =
943           SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
944       assert(StartBB != nullptr && "StartBB should not be null");
945       CGStartBB->getTerminator()->setSuccessor(0, StartBB);
946       assert(EndBB != nullptr && "EndBB should not be null");
947       EndBB->getTerminator()->setSuccessor(0, CGEndBB);
948     };
949 
950     auto PrivCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &,
951                       Value &Inner, Value *&ReplacementValue) -> InsertPointTy {
952       ReplacementValue = &Inner;
953       return CodeGenIP;
954     };
955 
956     auto FiniCB = [&](InsertPointTy CodeGenIP) {};
957 
958     /// Create a sequential execution region within a merged parallel region,
959     /// encapsulated in a master construct with a barrier for synchronization.
960     auto CreateSequentialRegion = [&](Function *OuterFn,
961                                       BasicBlock *OuterPredBB,
962                                       Instruction *SeqStartI,
963                                       Instruction *SeqEndI) {
964       // Isolate the instructions of the sequential region to a separate
965       // block.
966       BasicBlock *ParentBB = SeqStartI->getParent();
967       BasicBlock *SeqEndBB =
968           SplitBlock(ParentBB, SeqEndI->getNextNode(), DT, LI);
969       BasicBlock *SeqAfterBB =
970           SplitBlock(SeqEndBB, &*SeqEndBB->getFirstInsertionPt(), DT, LI);
971       BasicBlock *SeqStartBB =
972           SplitBlock(ParentBB, SeqStartI, DT, LI, nullptr, "seq.par.merged");
973 
974       assert(ParentBB->getUniqueSuccessor() == SeqStartBB &&
975              "Expected a different CFG");
976       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
977       ParentBB->getTerminator()->eraseFromParent();
978 
979       auto BodyGenCB = [&](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
980                            BasicBlock &ContinuationIP) {
981         BasicBlock *CGStartBB = CodeGenIP.getBlock();
982         BasicBlock *CGEndBB =
983             SplitBlock(CGStartBB, &*CodeGenIP.getPoint(), DT, LI);
984         assert(SeqStartBB != nullptr && "SeqStartBB should not be null");
985         CGStartBB->getTerminator()->setSuccessor(0, SeqStartBB);
986         assert(SeqEndBB != nullptr && "SeqEndBB should not be null");
987         SeqEndBB->getTerminator()->setSuccessor(0, CGEndBB);
988       };
989       auto FiniCB = [&](InsertPointTy CodeGenIP) {};
990 
991       // Find outputs from the sequential region to outside users and
992       // broadcast their values to them.
993       for (Instruction &I : *SeqStartBB) {
994         SmallPtrSet<Instruction *, 4> OutsideUsers;
995         for (User *Usr : I.users()) {
996           Instruction &UsrI = *cast<Instruction>(Usr);
997           // Ignore outputs to LT intrinsics, code extraction for the merged
998           // parallel region will fix them.
999           if (UsrI.isLifetimeStartOrEnd())
1000             continue;
1001 
1002           if (UsrI.getParent() != SeqStartBB)
1003             OutsideUsers.insert(&UsrI);
1004         }
1005 
1006         if (OutsideUsers.empty())
1007           continue;
1008 
1009         // Emit an alloca in the outer region to store the broadcasted
1010         // value.
1011         const DataLayout &DL = M.getDataLayout();
1012         AllocaInst *AllocaI = new AllocaInst(
1013             I.getType(), DL.getAllocaAddrSpace(), nullptr,
1014             I.getName() + ".seq.output.alloc", &OuterFn->front().front());
1015 
1016         // Emit a store instruction in the sequential BB to update the
1017         // value.
1018         new StoreInst(&I, AllocaI, SeqStartBB->getTerminator());
1019 
1020         // Emit a load instruction and replace the use of the output value
1021         // with it.
1022         for (Instruction *UsrI : OutsideUsers) {
1023           LoadInst *LoadI = new LoadInst(
1024               I.getType(), AllocaI, I.getName() + ".seq.output.load", UsrI);
1025           UsrI->replaceUsesOfWith(&I, LoadI);
1026         }
1027       }
1028 
1029       OpenMPIRBuilder::LocationDescription Loc(
1030           InsertPointTy(ParentBB, ParentBB->end()), DL);
1031       InsertPointTy SeqAfterIP =
1032           OMPInfoCache.OMPBuilder.createMaster(Loc, BodyGenCB, FiniCB);
1033 
1034       OMPInfoCache.OMPBuilder.createBarrier(SeqAfterIP, OMPD_parallel);
1035 
1036       BranchInst::Create(SeqAfterBB, SeqAfterIP.getBlock());
1037 
1038       LLVM_DEBUG(dbgs() << TAG << "After sequential inlining " << *OuterFn
1039                         << "\n");
1040     };
1041 
1042     // Helper to merge the __kmpc_fork_call calls in MergableCIs. They are all
1043     // contained in BB and only separated by instructions that can be
1044     // redundantly executed in parallel. The block BB is split before the first
1045     // call (in MergableCIs) and after the last so the entire region we merge
1046     // into a single parallel region is contained in a single basic block
1047     // without any other instructions. We use the OpenMPIRBuilder to outline
1048     // that block and call the resulting function via __kmpc_fork_call.
1049     auto Merge = [&](const SmallVectorImpl<CallInst *> &MergableCIs,
1050                      BasicBlock *BB) {
1051       // TODO: Change the interface to allow single CIs expanded, e.g, to
1052       // include an outer loop.
1053       assert(MergableCIs.size() > 1 && "Assumed multiple mergable CIs");
1054 
1055       auto Remark = [&](OptimizationRemark OR) {
1056         OR << "Parallel region merged with parallel region"
1057            << (MergableCIs.size() > 2 ? "s" : "") << " at ";
1058         for (auto *CI : llvm::drop_begin(MergableCIs)) {
1059           OR << ore::NV("OpenMPParallelMerge", CI->getDebugLoc());
1060           if (CI != MergableCIs.back())
1061             OR << ", ";
1062         }
1063         return OR << ".";
1064       };
1065 
1066       emitRemark<OptimizationRemark>(MergableCIs.front(), "OMP150", Remark);
1067 
1068       Function *OriginalFn = BB->getParent();
1069       LLVM_DEBUG(dbgs() << TAG << "Merge " << MergableCIs.size()
1070                         << " parallel regions in " << OriginalFn->getName()
1071                         << "\n");
1072 
1073       // Isolate the calls to merge in a separate block.
1074       EndBB = SplitBlock(BB, MergableCIs.back()->getNextNode(), DT, LI);
1075       BasicBlock *AfterBB =
1076           SplitBlock(EndBB, &*EndBB->getFirstInsertionPt(), DT, LI);
1077       StartBB = SplitBlock(BB, MergableCIs.front(), DT, LI, nullptr,
1078                            "omp.par.merged");
1079 
1080       assert(BB->getUniqueSuccessor() == StartBB && "Expected a different CFG");
1081       const DebugLoc DL = BB->getTerminator()->getDebugLoc();
1082       BB->getTerminator()->eraseFromParent();
1083 
1084       // Create sequential regions for sequential instructions that are
1085       // in-between mergable parallel regions.
1086       for (auto *It = MergableCIs.begin(), *End = MergableCIs.end() - 1;
1087            It != End; ++It) {
1088         Instruction *ForkCI = *It;
1089         Instruction *NextForkCI = *(It + 1);
1090 
1091         // Continue if there are not in-between instructions.
1092         if (ForkCI->getNextNode() == NextForkCI)
1093           continue;
1094 
1095         CreateSequentialRegion(OriginalFn, BB, ForkCI->getNextNode(),
1096                                NextForkCI->getPrevNode());
1097       }
1098 
1099       OpenMPIRBuilder::LocationDescription Loc(InsertPointTy(BB, BB->end()),
1100                                                DL);
1101       IRBuilder<>::InsertPoint AllocaIP(
1102           &OriginalFn->getEntryBlock(),
1103           OriginalFn->getEntryBlock().getFirstInsertionPt());
1104       // Create the merged parallel region with default proc binding, to
1105       // avoid overriding binding settings, and without explicit cancellation.
1106       InsertPointTy AfterIP = OMPInfoCache.OMPBuilder.createParallel(
1107           Loc, AllocaIP, BodyGenCB, PrivCB, FiniCB, nullptr, nullptr,
1108           OMP_PROC_BIND_default, /* IsCancellable */ false);
1109       BranchInst::Create(AfterBB, AfterIP.getBlock());
1110 
1111       // Perform the actual outlining.
1112       OMPInfoCache.OMPBuilder.finalize(OriginalFn);
1113 
1114       Function *OutlinedFn = MergableCIs.front()->getCaller();
1115 
1116       // Replace the __kmpc_fork_call calls with direct calls to the outlined
1117       // callbacks.
1118       SmallVector<Value *, 8> Args;
1119       for (auto *CI : MergableCIs) {
1120         Value *Callee =
1121             CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts();
1122         FunctionType *FT =
1123             cast<FunctionType>(Callee->getType()->getPointerElementType());
1124         Args.clear();
1125         Args.push_back(OutlinedFn->getArg(0));
1126         Args.push_back(OutlinedFn->getArg(1));
1127         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1128              ++U)
1129           Args.push_back(CI->getArgOperand(U));
1130 
1131         CallInst *NewCI = CallInst::Create(FT, Callee, Args, "", CI);
1132         if (CI->getDebugLoc())
1133           NewCI->setDebugLoc(CI->getDebugLoc());
1134 
1135         // Forward parameter attributes from the callback to the callee.
1136         for (unsigned U = CallbackFirstArgOperand, E = CI->arg_size(); U < E;
1137              ++U)
1138           for (const Attribute &A : CI->getAttributes().getParamAttrs(U))
1139             NewCI->addParamAttr(
1140                 U - (CallbackFirstArgOperand - CallbackCalleeOperand), A);
1141 
1142         // Emit an explicit barrier to replace the implicit fork-join barrier.
1143         if (CI != MergableCIs.back()) {
1144           // TODO: Remove barrier if the merged parallel region includes the
1145           // 'nowait' clause.
1146           OMPInfoCache.OMPBuilder.createBarrier(
1147               InsertPointTy(NewCI->getParent(),
1148                             NewCI->getNextNode()->getIterator()),
1149               OMPD_parallel);
1150         }
1151 
1152         CI->eraseFromParent();
1153       }
1154 
1155       assert(OutlinedFn != OriginalFn && "Outlining failed");
1156       CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn);
1157       CGUpdater.reanalyzeFunction(*OriginalFn);
1158 
1159       NumOpenMPParallelRegionsMerged += MergableCIs.size();
1160 
1161       return true;
1162     };
1163 
1164     // Helper function that identifes sequences of
1165     // __kmpc_fork_call uses in a basic block.
1166     auto DetectPRsCB = [&](Use &U, Function &F) {
1167       CallInst *CI = getCallIfRegularCall(U, &RFI);
1168       BB2PRMap[CI->getParent()].insert(CI);
1169 
1170       return false;
1171     };
1172 
1173     BB2PRMap.clear();
1174     RFI.foreachUse(SCC, DetectPRsCB);
1175     SmallVector<SmallVector<CallInst *, 4>, 4> MergableCIsVector;
1176     // Find mergable parallel regions within a basic block that are
1177     // safe to merge, that is any in-between instructions can safely
1178     // execute in parallel after merging.
1179     // TODO: support merging across basic-blocks.
1180     for (auto &It : BB2PRMap) {
1181       auto &CIs = It.getSecond();
1182       if (CIs.size() < 2)
1183         continue;
1184 
1185       BasicBlock *BB = It.getFirst();
1186       SmallVector<CallInst *, 4> MergableCIs;
1187 
1188       /// Returns true if the instruction is mergable, false otherwise.
1189       /// A terminator instruction is unmergable by definition since merging
1190       /// works within a BB. Instructions before the mergable region are
1191       /// mergable if they are not calls to OpenMP runtime functions that may
1192       /// set different execution parameters for subsequent parallel regions.
1193       /// Instructions in-between parallel regions are mergable if they are not
1194       /// calls to any non-intrinsic function since that may call a non-mergable
1195       /// OpenMP runtime function.
1196       auto IsMergable = [&](Instruction &I, bool IsBeforeMergableRegion) {
1197         // We do not merge across BBs, hence return false (unmergable) if the
1198         // instruction is a terminator.
1199         if (I.isTerminator())
1200           return false;
1201 
1202         if (!isa<CallInst>(&I))
1203           return true;
1204 
1205         CallInst *CI = cast<CallInst>(&I);
1206         if (IsBeforeMergableRegion) {
1207           Function *CalledFunction = CI->getCalledFunction();
1208           if (!CalledFunction)
1209             return false;
1210           // Return false (unmergable) if the call before the parallel
1211           // region calls an explicit affinity (proc_bind) or number of
1212           // threads (num_threads) compiler-generated function. Those settings
1213           // may be incompatible with following parallel regions.
1214           // TODO: ICV tracking to detect compatibility.
1215           for (const auto &RFI : UnmergableCallsInfo) {
1216             if (CalledFunction == RFI.Declaration)
1217               return false;
1218           }
1219         } else {
1220           // Return false (unmergable) if there is a call instruction
1221           // in-between parallel regions when it is not an intrinsic. It
1222           // may call an unmergable OpenMP runtime function in its callpath.
1223           // TODO: Keep track of possible OpenMP calls in the callpath.
1224           if (!isa<IntrinsicInst>(CI))
1225             return false;
1226         }
1227 
1228         return true;
1229       };
1230       // Find maximal number of parallel region CIs that are safe to merge.
1231       for (auto It = BB->begin(), End = BB->end(); It != End;) {
1232         Instruction &I = *It;
1233         ++It;
1234 
1235         if (CIs.count(&I)) {
1236           MergableCIs.push_back(cast<CallInst>(&I));
1237           continue;
1238         }
1239 
1240         // Continue expanding if the instruction is mergable.
1241         if (IsMergable(I, MergableCIs.empty()))
1242           continue;
1243 
1244         // Forward the instruction iterator to skip the next parallel region
1245         // since there is an unmergable instruction which can affect it.
1246         for (; It != End; ++It) {
1247           Instruction &SkipI = *It;
1248           if (CIs.count(&SkipI)) {
1249             LLVM_DEBUG(dbgs() << TAG << "Skip parallel region " << SkipI
1250                               << " due to " << I << "\n");
1251             ++It;
1252             break;
1253           }
1254         }
1255 
1256         // Store mergable regions found.
1257         if (MergableCIs.size() > 1) {
1258           MergableCIsVector.push_back(MergableCIs);
1259           LLVM_DEBUG(dbgs() << TAG << "Found " << MergableCIs.size()
1260                             << " parallel regions in block " << BB->getName()
1261                             << " of function " << BB->getParent()->getName()
1262                             << "\n";);
1263         }
1264 
1265         MergableCIs.clear();
1266       }
1267 
1268       if (!MergableCIsVector.empty()) {
1269         Changed = true;
1270 
1271         for (auto &MergableCIs : MergableCIsVector)
1272           Merge(MergableCIs, BB);
1273         MergableCIsVector.clear();
1274       }
1275     }
1276 
1277     if (Changed) {
1278       /// Re-collect use for fork calls, emitted barrier calls, and
1279       /// any emitted master/end_master calls.
1280       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_fork_call);
1281       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_barrier);
1282       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_master);
1283       OMPInfoCache.recollectUsesForFunction(OMPRTL___kmpc_end_master);
1284     }
1285 
1286     return Changed;
1287   }
1288 
1289   /// Try to delete parallel regions if possible.
1290   bool deleteParallelRegions() {
1291     const unsigned CallbackCalleeOperand = 2;
1292 
1293     OMPInformationCache::RuntimeFunctionInfo &RFI =
1294         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
1295 
1296     if (!RFI.Declaration)
1297       return false;
1298 
1299     bool Changed = false;
1300     auto DeleteCallCB = [&](Use &U, Function &) {
1301       CallInst *CI = getCallIfRegularCall(U);
1302       if (!CI)
1303         return false;
1304       auto *Fn = dyn_cast<Function>(
1305           CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts());
1306       if (!Fn)
1307         return false;
1308       if (!Fn->onlyReadsMemory())
1309         return false;
1310       if (!Fn->hasFnAttribute(Attribute::WillReturn))
1311         return false;
1312 
1313       LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in "
1314                         << CI->getCaller()->getName() << "\n");
1315 
1316       auto Remark = [&](OptimizationRemark OR) {
1317         return OR << "Removing parallel region with no side-effects.";
1318       };
1319       emitRemark<OptimizationRemark>(CI, "OMP160", Remark);
1320 
1321       CGUpdater.removeCallSite(*CI);
1322       CI->eraseFromParent();
1323       Changed = true;
1324       ++NumOpenMPParallelRegionsDeleted;
1325       return true;
1326     };
1327 
1328     RFI.foreachUse(SCC, DeleteCallCB);
1329 
1330     return Changed;
1331   }
1332 
1333   /// Try to eliminate runtime calls by reusing existing ones.
1334   bool deduplicateRuntimeCalls() {
1335     bool Changed = false;
1336 
1337     RuntimeFunction DeduplicableRuntimeCallIDs[] = {
1338         OMPRTL_omp_get_num_threads,
1339         OMPRTL_omp_in_parallel,
1340         OMPRTL_omp_get_cancellation,
1341         OMPRTL_omp_get_thread_limit,
1342         OMPRTL_omp_get_supported_active_levels,
1343         OMPRTL_omp_get_level,
1344         OMPRTL_omp_get_ancestor_thread_num,
1345         OMPRTL_omp_get_team_size,
1346         OMPRTL_omp_get_active_level,
1347         OMPRTL_omp_in_final,
1348         OMPRTL_omp_get_proc_bind,
1349         OMPRTL_omp_get_num_places,
1350         OMPRTL_omp_get_num_procs,
1351         OMPRTL_omp_get_place_num,
1352         OMPRTL_omp_get_partition_num_places,
1353         OMPRTL_omp_get_partition_place_nums};
1354 
1355     // Global-tid is handled separately.
1356     SmallSetVector<Value *, 16> GTIdArgs;
1357     collectGlobalThreadIdArguments(GTIdArgs);
1358     LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size()
1359                       << " global thread ID arguments\n");
1360 
1361     for (Function *F : SCC) {
1362       for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs)
1363         Changed |= deduplicateRuntimeCalls(
1364             *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]);
1365 
1366       // __kmpc_global_thread_num is special as we can replace it with an
1367       // argument in enough cases to make it worth trying.
1368       Value *GTIdArg = nullptr;
1369       for (Argument &Arg : F->args())
1370         if (GTIdArgs.count(&Arg)) {
1371           GTIdArg = &Arg;
1372           break;
1373         }
1374       Changed |= deduplicateRuntimeCalls(
1375           *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg);
1376     }
1377 
1378     return Changed;
1379   }
1380 
1381   /// Tries to hide the latency of runtime calls that involve host to
1382   /// device memory transfers by splitting them into their "issue" and "wait"
1383   /// versions. The "issue" is moved upwards as much as possible. The "wait" is
1384   /// moved downards as much as possible. The "issue" issues the memory transfer
1385   /// asynchronously, returning a handle. The "wait" waits in the returned
1386   /// handle for the memory transfer to finish.
1387   bool hideMemTransfersLatency() {
1388     auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper];
1389     bool Changed = false;
1390     auto SplitMemTransfers = [&](Use &U, Function &Decl) {
1391       auto *RTCall = getCallIfRegularCall(U, &RFI);
1392       if (!RTCall)
1393         return false;
1394 
1395       OffloadArray OffloadArrays[3];
1396       if (!getValuesInOffloadArrays(*RTCall, OffloadArrays))
1397         return false;
1398 
1399       LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays));
1400 
1401       // TODO: Check if can be moved upwards.
1402       bool WasSplit = false;
1403       Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall);
1404       if (WaitMovementPoint)
1405         WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint);
1406 
1407       Changed |= WasSplit;
1408       return WasSplit;
1409     };
1410     RFI.foreachUse(SCC, SplitMemTransfers);
1411 
1412     return Changed;
1413   }
1414 
1415   /// Eliminates redundant, aligned barriers in OpenMP offloaded kernels.
1416   /// TODO: Make this an AA and expand it to work across blocks and functions.
1417   bool eliminateBarriers() {
1418     bool Changed = false;
1419 
1420     if (DisableOpenMPOptBarrierElimination)
1421       return /*Changed=*/false;
1422 
1423     if (OMPInfoCache.Kernels.empty())
1424       return /*Changed=*/false;
1425 
1426     enum ImplicitBarrierType { IBT_ENTRY, IBT_EXIT };
1427 
1428     class BarrierInfo {
1429       Instruction *I;
1430       enum ImplicitBarrierType Type;
1431 
1432     public:
1433       BarrierInfo(enum ImplicitBarrierType Type) : I(nullptr), Type(Type) {}
1434       BarrierInfo(Instruction &I) : I(&I) {}
1435 
1436       bool isImplicit() { return !I; }
1437 
1438       bool isImplicitEntry() { return isImplicit() && Type == IBT_ENTRY; }
1439 
1440       bool isImplicitExit() { return isImplicit() && Type == IBT_EXIT; }
1441 
1442       Instruction *getInstruction() { return I; }
1443     };
1444 
1445     for (Function *Kernel : OMPInfoCache.Kernels) {
1446       for (BasicBlock &BB : *Kernel) {
1447         SmallVector<BarrierInfo, 8> BarriersInBlock;
1448         SmallPtrSet<Instruction *, 8> BarriersToBeDeleted;
1449 
1450         // Add the kernel entry implicit barrier.
1451         if (&Kernel->getEntryBlock() == &BB)
1452           BarriersInBlock.push_back(IBT_ENTRY);
1453 
1454         // Find implicit and explicit aligned barriers in the same basic block.
1455         for (Instruction &I : BB) {
1456           if (isa<ReturnInst>(I)) {
1457             // Add the implicit barrier when exiting the kernel.
1458             BarriersInBlock.push_back(IBT_EXIT);
1459             continue;
1460           }
1461           CallBase *CB = dyn_cast<CallBase>(&I);
1462           if (!CB)
1463             continue;
1464 
1465           auto IsAlignBarrierCB = [&](CallBase &CB) {
1466             switch (CB.getIntrinsicID()) {
1467             case Intrinsic::nvvm_barrier0:
1468             case Intrinsic::nvvm_barrier0_and:
1469             case Intrinsic::nvvm_barrier0_or:
1470             case Intrinsic::nvvm_barrier0_popc:
1471               return true;
1472             default:
1473               break;
1474             }
1475             return hasAssumption(CB,
1476                                  KnownAssumptionString("ompx_aligned_barrier"));
1477           };
1478 
1479           if (IsAlignBarrierCB(*CB)) {
1480             // Add an explicit aligned barrier.
1481             BarriersInBlock.push_back(I);
1482           }
1483         }
1484 
1485         if (BarriersInBlock.size() <= 1)
1486           continue;
1487 
1488         // A barrier in a barrier pair is removeable if all instructions
1489         // between the barriers in the pair are side-effect free modulo the
1490         // barrier operation.
1491         auto IsBarrierRemoveable = [&Kernel](BarrierInfo *StartBI,
1492                                              BarrierInfo *EndBI) {
1493           assert(
1494               !StartBI->isImplicitExit() &&
1495               "Expected start barrier to be other than a kernel exit barrier");
1496           assert(
1497               !EndBI->isImplicitEntry() &&
1498               "Expected end barrier to be other than a kernel entry barrier");
1499           // If StarBI instructions is null then this the implicit
1500           // kernel entry barrier, so iterate from the first instruction in the
1501           // entry block.
1502           Instruction *I = (StartBI->isImplicitEntry())
1503                                ? &Kernel->getEntryBlock().front()
1504                                : StartBI->getInstruction()->getNextNode();
1505           assert(I && "Expected non-null start instruction");
1506           Instruction *E = (EndBI->isImplicitExit())
1507                                ? I->getParent()->getTerminator()
1508                                : EndBI->getInstruction();
1509           assert(E && "Expected non-null end instruction");
1510 
1511           for (; I != E; I = I->getNextNode()) {
1512             if (!I->mayHaveSideEffects() && !I->mayReadFromMemory())
1513               continue;
1514 
1515             auto IsPotentiallyAffectedByBarrier =
1516                 [](Optional<MemoryLocation> Loc) {
1517                   const Value *Obj = (Loc && Loc->Ptr)
1518                                          ? getUnderlyingObject(Loc->Ptr)
1519                                          : nullptr;
1520                   if (!Obj) {
1521                     LLVM_DEBUG(
1522                         dbgs()
1523                         << "Access to unknown location requires barriers\n");
1524                     return true;
1525                   }
1526                   if (isa<UndefValue>(Obj))
1527                     return false;
1528                   if (isa<AllocaInst>(Obj))
1529                     return false;
1530                   if (auto *GV = dyn_cast<GlobalVariable>(Obj)) {
1531                     if (GV->isConstant())
1532                       return false;
1533                     if (GV->isThreadLocal())
1534                       return false;
1535                     if (GV->getAddressSpace() == (int)AddressSpace::Local)
1536                       return false;
1537                     if (GV->getAddressSpace() == (int)AddressSpace::Constant)
1538                       return false;
1539                   }
1540                   LLVM_DEBUG(dbgs() << "Access to '" << *Obj
1541                                     << "' requires barriers\n");
1542                   return true;
1543                 };
1544 
1545             if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
1546               Optional<MemoryLocation> Loc = MemoryLocation::getForDest(MI);
1547               if (IsPotentiallyAffectedByBarrier(Loc))
1548                 return false;
1549               if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
1550                 Optional<MemoryLocation> Loc =
1551                     MemoryLocation::getForSource(MTI);
1552                 if (IsPotentiallyAffectedByBarrier(Loc))
1553                   return false;
1554               }
1555               continue;
1556             }
1557 
1558             if (auto *LI = dyn_cast<LoadInst>(I))
1559               if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1560                 continue;
1561 
1562             Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
1563             if (IsPotentiallyAffectedByBarrier(Loc))
1564               return false;
1565           }
1566 
1567           return true;
1568         };
1569 
1570         // Iterate barrier pairs and remove an explicit barrier if analysis
1571         // deems it removeable.
1572         for (auto *It = BarriersInBlock.begin(),
1573                   *End = BarriersInBlock.end() - 1;
1574              It != End; ++It) {
1575 
1576           BarrierInfo *StartBI = It;
1577           BarrierInfo *EndBI = (It + 1);
1578 
1579           // Cannot remove when both are implicit barriers, continue.
1580           if (StartBI->isImplicit() && EndBI->isImplicit())
1581             continue;
1582 
1583           if (!IsBarrierRemoveable(StartBI, EndBI))
1584             continue;
1585 
1586           assert(!(StartBI->isImplicit() && EndBI->isImplicit()) &&
1587                  "Expected at least one explicit barrier to remove.");
1588 
1589           // Remove an explicit barrier, check first, then second.
1590           if (!StartBI->isImplicit()) {
1591             LLVM_DEBUG(dbgs() << "Remove start barrier "
1592                               << *StartBI->getInstruction() << "\n");
1593             BarriersToBeDeleted.insert(StartBI->getInstruction());
1594           } else {
1595             LLVM_DEBUG(dbgs() << "Remove end barrier "
1596                               << *EndBI->getInstruction() << "\n");
1597             BarriersToBeDeleted.insert(EndBI->getInstruction());
1598           }
1599         }
1600 
1601         if (BarriersToBeDeleted.empty())
1602           continue;
1603 
1604         Changed = true;
1605         for (Instruction *I : BarriersToBeDeleted) {
1606           ++NumBarriersEliminated;
1607           auto Remark = [&](OptimizationRemark OR) {
1608             return OR << "Redundant barrier eliminated.";
1609           };
1610 
1611           if (EnableVerboseRemarks)
1612             emitRemark<OptimizationRemark>(I, "OMP190", Remark);
1613           I->eraseFromParent();
1614         }
1615       }
1616     }
1617 
1618     return Changed;
1619   }
1620 
1621   void analysisGlobalization() {
1622     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
1623 
1624     auto CheckGlobalization = [&](Use &U, Function &Decl) {
1625       if (CallInst *CI = getCallIfRegularCall(U, &RFI)) {
1626         auto Remark = [&](OptimizationRemarkMissed ORM) {
1627           return ORM
1628                  << "Found thread data sharing on the GPU. "
1629                  << "Expect degraded performance due to data globalization.";
1630         };
1631         emitRemark<OptimizationRemarkMissed>(CI, "OMP112", Remark);
1632       }
1633 
1634       return false;
1635     };
1636 
1637     RFI.foreachUse(SCC, CheckGlobalization);
1638   }
1639 
1640   /// Maps the values stored in the offload arrays passed as arguments to
1641   /// \p RuntimeCall into the offload arrays in \p OAs.
1642   bool getValuesInOffloadArrays(CallInst &RuntimeCall,
1643                                 MutableArrayRef<OffloadArray> OAs) {
1644     assert(OAs.size() == 3 && "Need space for three offload arrays!");
1645 
1646     // A runtime call that involves memory offloading looks something like:
1647     // call void @__tgt_target_data_begin_mapper(arg0, arg1,
1648     //   i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes,
1649     // ...)
1650     // So, the idea is to access the allocas that allocate space for these
1651     // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes.
1652     // Therefore:
1653     // i8** %offload_baseptrs.
1654     Value *BasePtrsArg =
1655         RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum);
1656     // i8** %offload_ptrs.
1657     Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum);
1658     // i8** %offload_sizes.
1659     Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum);
1660 
1661     // Get values stored in **offload_baseptrs.
1662     auto *V = getUnderlyingObject(BasePtrsArg);
1663     if (!isa<AllocaInst>(V))
1664       return false;
1665     auto *BasePtrsArray = cast<AllocaInst>(V);
1666     if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall))
1667       return false;
1668 
1669     // Get values stored in **offload_baseptrs.
1670     V = getUnderlyingObject(PtrsArg);
1671     if (!isa<AllocaInst>(V))
1672       return false;
1673     auto *PtrsArray = cast<AllocaInst>(V);
1674     if (!OAs[1].initialize(*PtrsArray, RuntimeCall))
1675       return false;
1676 
1677     // Get values stored in **offload_sizes.
1678     V = getUnderlyingObject(SizesArg);
1679     // If it's a [constant] global array don't analyze it.
1680     if (isa<GlobalValue>(V))
1681       return isa<Constant>(V);
1682     if (!isa<AllocaInst>(V))
1683       return false;
1684 
1685     auto *SizesArray = cast<AllocaInst>(V);
1686     if (!OAs[2].initialize(*SizesArray, RuntimeCall))
1687       return false;
1688 
1689     return true;
1690   }
1691 
1692   /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG.
1693   /// For now this is a way to test that the function getValuesInOffloadArrays
1694   /// is working properly.
1695   /// TODO: Move this to a unittest when unittests are available for OpenMPOpt.
1696   void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) {
1697     assert(OAs.size() == 3 && "There are three offload arrays to debug!");
1698 
1699     LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n");
1700     std::string ValuesStr;
1701     raw_string_ostream Printer(ValuesStr);
1702     std::string Separator = " --- ";
1703 
1704     for (auto *BP : OAs[0].StoredValues) {
1705       BP->print(Printer);
1706       Printer << Separator;
1707     }
1708     LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n");
1709     ValuesStr.clear();
1710 
1711     for (auto *P : OAs[1].StoredValues) {
1712       P->print(Printer);
1713       Printer << Separator;
1714     }
1715     LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n");
1716     ValuesStr.clear();
1717 
1718     for (auto *S : OAs[2].StoredValues) {
1719       S->print(Printer);
1720       Printer << Separator;
1721     }
1722     LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n");
1723   }
1724 
1725   /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be
1726   /// moved. Returns nullptr if the movement is not possible, or not worth it.
1727   Instruction *canBeMovedDownwards(CallInst &RuntimeCall) {
1728     // FIXME: This traverses only the BasicBlock where RuntimeCall is.
1729     //  Make it traverse the CFG.
1730 
1731     Instruction *CurrentI = &RuntimeCall;
1732     bool IsWorthIt = false;
1733     while ((CurrentI = CurrentI->getNextNode())) {
1734 
1735       // TODO: Once we detect the regions to be offloaded we should use the
1736       //  alias analysis manager to check if CurrentI may modify one of
1737       //  the offloaded regions.
1738       if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) {
1739         if (IsWorthIt)
1740           return CurrentI;
1741 
1742         return nullptr;
1743       }
1744 
1745       // FIXME: For now if we move it over anything without side effect
1746       //  is worth it.
1747       IsWorthIt = true;
1748     }
1749 
1750     // Return end of BasicBlock.
1751     return RuntimeCall.getParent()->getTerminator();
1752   }
1753 
1754   /// Splits \p RuntimeCall into its "issue" and "wait" counterparts.
1755   bool splitTargetDataBeginRTC(CallInst &RuntimeCall,
1756                                Instruction &WaitMovementPoint) {
1757     // Create stack allocated handle (__tgt_async_info) at the beginning of the
1758     // function. Used for storing information of the async transfer, allowing to
1759     // wait on it later.
1760     auto &IRBuilder = OMPInfoCache.OMPBuilder;
1761     auto *F = RuntimeCall.getCaller();
1762     Instruction *FirstInst = &(F->getEntryBlock().front());
1763     AllocaInst *Handle = new AllocaInst(
1764         IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst);
1765 
1766     // Add "issue" runtime call declaration:
1767     // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32,
1768     //   i8**, i8**, i64*, i64*)
1769     FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction(
1770         M, OMPRTL___tgt_target_data_begin_mapper_issue);
1771 
1772     // Change RuntimeCall call site for its asynchronous version.
1773     SmallVector<Value *, 16> Args;
1774     for (auto &Arg : RuntimeCall.args())
1775       Args.push_back(Arg.get());
1776     Args.push_back(Handle);
1777 
1778     CallInst *IssueCallsite =
1779         CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
1780     OMPInfoCache.setCallingConvention(IssueDecl, IssueCallsite);
1781     RuntimeCall.eraseFromParent();
1782 
1783     // Add "wait" runtime call declaration:
1784     // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info)
1785     FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction(
1786         M, OMPRTL___tgt_target_data_begin_mapper_wait);
1787 
1788     Value *WaitParams[2] = {
1789         IssueCallsite->getArgOperand(
1790             OffloadArray::DeviceIDArgNum), // device_id.
1791         Handle                             // handle to wait on.
1792     };
1793     CallInst *WaitCallsite = CallInst::Create(
1794         WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
1795     OMPInfoCache.setCallingConvention(WaitDecl, WaitCallsite);
1796 
1797     return true;
1798   }
1799 
1800   static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent,
1801                                     bool GlobalOnly, bool &SingleChoice) {
1802     if (CurrentIdent == NextIdent)
1803       return CurrentIdent;
1804 
1805     // TODO: Figure out how to actually combine multiple debug locations. For
1806     //       now we just keep an existing one if there is a single choice.
1807     if (!GlobalOnly || isa<GlobalValue>(NextIdent)) {
1808       SingleChoice = !CurrentIdent;
1809       return NextIdent;
1810     }
1811     return nullptr;
1812   }
1813 
1814   /// Return an `struct ident_t*` value that represents the ones used in the
1815   /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not
1816   /// return a local `struct ident_t*`. For now, if we cannot find a suitable
1817   /// return value we create one from scratch. We also do not yet combine
1818   /// information, e.g., the source locations, see combinedIdentStruct.
1819   Value *
1820   getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI,
1821                                  Function &F, bool GlobalOnly) {
1822     bool SingleChoice = true;
1823     Value *Ident = nullptr;
1824     auto CombineIdentStruct = [&](Use &U, Function &Caller) {
1825       CallInst *CI = getCallIfRegularCall(U, &RFI);
1826       if (!CI || &F != &Caller)
1827         return false;
1828       Ident = combinedIdentStruct(Ident, CI->getArgOperand(0),
1829                                   /* GlobalOnly */ true, SingleChoice);
1830       return false;
1831     };
1832     RFI.foreachUse(SCC, CombineIdentStruct);
1833 
1834     if (!Ident || !SingleChoice) {
1835       // The IRBuilder uses the insertion block to get to the module, this is
1836       // unfortunate but we work around it for now.
1837       if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock())
1838         OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy(
1839             &F.getEntryBlock(), F.getEntryBlock().begin()));
1840       // Create a fallback location if non was found.
1841       // TODO: Use the debug locations of the calls instead.
1842       uint32_t SrcLocStrSize;
1843       Constant *Loc =
1844           OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
1845       Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc, SrcLocStrSize);
1846     }
1847     return Ident;
1848   }
1849 
1850   /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or
1851   /// \p ReplVal if given.
1852   bool deduplicateRuntimeCalls(Function &F,
1853                                OMPInformationCache::RuntimeFunctionInfo &RFI,
1854                                Value *ReplVal = nullptr) {
1855     auto *UV = RFI.getUseVector(F);
1856     if (!UV || UV->size() + (ReplVal != nullptr) < 2)
1857       return false;
1858 
1859     LLVM_DEBUG(
1860         dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name
1861                << (ReplVal ? " with an existing value\n" : "\n") << "\n");
1862 
1863     assert((!ReplVal || (isa<Argument>(ReplVal) &&
1864                          cast<Argument>(ReplVal)->getParent() == &F)) &&
1865            "Unexpected replacement value!");
1866 
1867     // TODO: Use dominance to find a good position instead.
1868     auto CanBeMoved = [this](CallBase &CB) {
1869       unsigned NumArgs = CB.arg_size();
1870       if (NumArgs == 0)
1871         return true;
1872       if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)
1873         return false;
1874       for (unsigned U = 1; U < NumArgs; ++U)
1875         if (isa<Instruction>(CB.getArgOperand(U)))
1876           return false;
1877       return true;
1878     };
1879 
1880     if (!ReplVal) {
1881       for (Use *U : *UV)
1882         if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) {
1883           if (!CanBeMoved(*CI))
1884             continue;
1885 
1886           // If the function is a kernel, dedup will move
1887           // the runtime call right after the kernel init callsite. Otherwise,
1888           // it will move it to the beginning of the caller function.
1889           if (isKernel(F)) {
1890             auto &KernelInitRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
1891             auto *KernelInitUV = KernelInitRFI.getUseVector(F);
1892 
1893             if (KernelInitUV->empty())
1894               continue;
1895 
1896             assert(KernelInitUV->size() == 1 &&
1897                    "Expected a single __kmpc_target_init in kernel\n");
1898 
1899             CallInst *KernelInitCI =
1900                 getCallIfRegularCall(*KernelInitUV->front(), &KernelInitRFI);
1901             assert(KernelInitCI &&
1902                    "Expected a call to __kmpc_target_init in kernel\n");
1903 
1904             CI->moveAfter(KernelInitCI);
1905           } else
1906             CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt());
1907           ReplVal = CI;
1908           break;
1909         }
1910       if (!ReplVal)
1911         return false;
1912     }
1913 
1914     // If we use a call as a replacement value we need to make sure the ident is
1915     // valid at the new location. For now we just pick a global one, either
1916     // existing and used by one of the calls, or created from scratch.
1917     if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) {
1918       if (!CI->arg_empty() &&
1919           CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) {
1920         Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F,
1921                                                       /* GlobalOnly */ true);
1922         CI->setArgOperand(0, Ident);
1923       }
1924     }
1925 
1926     bool Changed = false;
1927     auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) {
1928       CallInst *CI = getCallIfRegularCall(U, &RFI);
1929       if (!CI || CI == ReplVal || &F != &Caller)
1930         return false;
1931       assert(CI->getCaller() == &F && "Unexpected call!");
1932 
1933       auto Remark = [&](OptimizationRemark OR) {
1934         return OR << "OpenMP runtime call "
1935                   << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated.";
1936       };
1937       if (CI->getDebugLoc())
1938         emitRemark<OptimizationRemark>(CI, "OMP170", Remark);
1939       else
1940         emitRemark<OptimizationRemark>(&F, "OMP170", Remark);
1941 
1942       CGUpdater.removeCallSite(*CI);
1943       CI->replaceAllUsesWith(ReplVal);
1944       CI->eraseFromParent();
1945       ++NumOpenMPRuntimeCallsDeduplicated;
1946       Changed = true;
1947       return true;
1948     };
1949     RFI.foreachUse(SCC, ReplaceAndDeleteCB);
1950 
1951     return Changed;
1952   }
1953 
1954   /// Collect arguments that represent the global thread id in \p GTIdArgs.
1955   void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> &GTIdArgs) {
1956     // TODO: Below we basically perform a fixpoint iteration with a pessimistic
1957     //       initialization. We could define an AbstractAttribute instead and
1958     //       run the Attributor here once it can be run as an SCC pass.
1959 
1960     // Helper to check the argument \p ArgNo at all call sites of \p F for
1961     // a GTId.
1962     auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) {
1963       if (!F.hasLocalLinkage())
1964         return false;
1965       for (Use &U : F.uses()) {
1966         if (CallInst *CI = getCallIfRegularCall(U)) {
1967           Value *ArgOp = CI->getArgOperand(ArgNo);
1968           if (CI == &RefCI || GTIdArgs.count(ArgOp) ||
1969               getCallIfRegularCall(
1970                   *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]))
1971             continue;
1972         }
1973         return false;
1974       }
1975       return true;
1976     };
1977 
1978     // Helper to identify uses of a GTId as GTId arguments.
1979     auto AddUserArgs = [&](Value &GTId) {
1980       for (Use &U : GTId.uses())
1981         if (CallInst *CI = dyn_cast<CallInst>(U.getUser()))
1982           if (CI->isArgOperand(&U))
1983             if (Function *Callee = CI->getCalledFunction())
1984               if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI))
1985                 GTIdArgs.insert(Callee->getArg(U.getOperandNo()));
1986     };
1987 
1988     // The argument users of __kmpc_global_thread_num calls are GTIds.
1989     OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI =
1990         OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num];
1991 
1992     GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) {
1993       if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI))
1994         AddUserArgs(*CI);
1995       return false;
1996     });
1997 
1998     // Transitively search for more arguments by looking at the users of the
1999     // ones we know already. During the search the GTIdArgs vector is extended
2000     // so we cannot cache the size nor can we use a range based for.
2001     for (unsigned U = 0; U < GTIdArgs.size(); ++U)
2002       AddUserArgs(*GTIdArgs[U]);
2003   }
2004 
2005   /// Kernel (=GPU) optimizations and utility functions
2006   ///
2007   ///{{
2008 
2009   /// Check if \p F is a kernel, hence entry point for target offloading.
2010   bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); }
2011 
2012   /// Cache to remember the unique kernel for a function.
2013   DenseMap<Function *, Optional<Kernel>> UniqueKernelMap;
2014 
2015   /// Find the unique kernel that will execute \p F, if any.
2016   Kernel getUniqueKernelFor(Function &F);
2017 
2018   /// Find the unique kernel that will execute \p I, if any.
2019   Kernel getUniqueKernelFor(Instruction &I) {
2020     return getUniqueKernelFor(*I.getFunction());
2021   }
2022 
2023   /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in
2024   /// the cases we can avoid taking the address of a function.
2025   bool rewriteDeviceCodeStateMachine();
2026 
2027   ///
2028   ///}}
2029 
2030   /// Emit a remark generically
2031   ///
2032   /// This template function can be used to generically emit a remark. The
2033   /// RemarkKind should be one of the following:
2034   ///   - OptimizationRemark to indicate a successful optimization attempt
2035   ///   - OptimizationRemarkMissed to report a failed optimization attempt
2036   ///   - OptimizationRemarkAnalysis to provide additional information about an
2037   ///     optimization attempt
2038   ///
2039   /// The remark is built using a callback function provided by the caller that
2040   /// takes a RemarkKind as input and returns a RemarkKind.
2041   template <typename RemarkKind, typename RemarkCallBack>
2042   void emitRemark(Instruction *I, StringRef RemarkName,
2043                   RemarkCallBack &&RemarkCB) const {
2044     Function *F = I->getParent()->getParent();
2045     auto &ORE = OREGetter(F);
2046 
2047     if (RemarkName.startswith("OMP"))
2048       ORE.emit([&]() {
2049         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I))
2050                << " [" << RemarkName << "]";
2051       });
2052     else
2053       ORE.emit(
2054           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, I)); });
2055   }
2056 
2057   /// Emit a remark on a function.
2058   template <typename RemarkKind, typename RemarkCallBack>
2059   void emitRemark(Function *F, StringRef RemarkName,
2060                   RemarkCallBack &&RemarkCB) const {
2061     auto &ORE = OREGetter(F);
2062 
2063     if (RemarkName.startswith("OMP"))
2064       ORE.emit([&]() {
2065         return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F))
2066                << " [" << RemarkName << "]";
2067       });
2068     else
2069       ORE.emit(
2070           [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, F)); });
2071   }
2072 
2073   /// RAII struct to temporarily change an RTL function's linkage to external.
2074   /// This prevents it from being mistakenly removed by other optimizations.
2075   struct ExternalizationRAII {
2076     ExternalizationRAII(OMPInformationCache &OMPInfoCache,
2077                         RuntimeFunction RFKind)
2078         : Declaration(OMPInfoCache.RFIs[RFKind].Declaration) {
2079       if (!Declaration)
2080         return;
2081 
2082       LinkageType = Declaration->getLinkage();
2083       Declaration->setLinkage(GlobalValue::ExternalLinkage);
2084     }
2085 
2086     ~ExternalizationRAII() {
2087       if (!Declaration)
2088         return;
2089 
2090       Declaration->setLinkage(LinkageType);
2091     }
2092 
2093     Function *Declaration;
2094     GlobalValue::LinkageTypes LinkageType;
2095   };
2096 
2097   /// The underlying module.
2098   Module &M;
2099 
2100   /// The SCC we are operating on.
2101   SmallVectorImpl<Function *> &SCC;
2102 
2103   /// Callback to update the call graph, the first argument is a removed call,
2104   /// the second an optional replacement call.
2105   CallGraphUpdater &CGUpdater;
2106 
2107   /// Callback to get an OptimizationRemarkEmitter from a Function *
2108   OptimizationRemarkGetter OREGetter;
2109 
2110   /// OpenMP-specific information cache. Also Used for Attributor runs.
2111   OMPInformationCache &OMPInfoCache;
2112 
2113   /// Attributor instance.
2114   Attributor &A;
2115 
2116   /// Helper function to run Attributor on SCC.
2117   bool runAttributor(bool IsModulePass) {
2118     if (SCC.empty())
2119       return false;
2120 
2121     // Temporarily make these function have external linkage so the Attributor
2122     // doesn't remove them when we try to look them up later.
2123     ExternalizationRAII Parallel(OMPInfoCache, OMPRTL___kmpc_kernel_parallel);
2124     ExternalizationRAII EndParallel(OMPInfoCache,
2125                                     OMPRTL___kmpc_kernel_end_parallel);
2126     ExternalizationRAII BarrierSPMD(OMPInfoCache,
2127                                     OMPRTL___kmpc_barrier_simple_spmd);
2128     ExternalizationRAII BarrierGeneric(OMPInfoCache,
2129                                        OMPRTL___kmpc_barrier_simple_generic);
2130     ExternalizationRAII ThreadId(OMPInfoCache,
2131                                  OMPRTL___kmpc_get_hardware_thread_id_in_block);
2132     ExternalizationRAII NumThreads(
2133         OMPInfoCache, OMPRTL___kmpc_get_hardware_num_threads_in_block);
2134     ExternalizationRAII WarpSize(OMPInfoCache, OMPRTL___kmpc_get_warp_size);
2135 
2136     registerAAs(IsModulePass);
2137 
2138     ChangeStatus Changed = A.run();
2139 
2140     LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size()
2141                       << " functions, result: " << Changed << ".\n");
2142 
2143     return Changed == ChangeStatus::CHANGED;
2144   }
2145 
2146   void registerFoldRuntimeCall(RuntimeFunction RF);
2147 
2148   /// Populate the Attributor with abstract attribute opportunities in the
2149   /// function.
2150   void registerAAs(bool IsModulePass);
2151 };
2152 
2153 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) {
2154   if (!OMPInfoCache.ModuleSlice.count(&F))
2155     return nullptr;
2156 
2157   // Use a scope to keep the lifetime of the CachedKernel short.
2158   {
2159     Optional<Kernel> &CachedKernel = UniqueKernelMap[&F];
2160     if (CachedKernel)
2161       return *CachedKernel;
2162 
2163     // TODO: We should use an AA to create an (optimistic and callback
2164     //       call-aware) call graph. For now we stick to simple patterns that
2165     //       are less powerful, basically the worst fixpoint.
2166     if (isKernel(F)) {
2167       CachedKernel = Kernel(&F);
2168       return *CachedKernel;
2169     }
2170 
2171     CachedKernel = nullptr;
2172     if (!F.hasLocalLinkage()) {
2173 
2174       // See https://openmp.llvm.org/remarks/OptimizationRemarks.html
2175       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2176         return ORA << "Potentially unknown OpenMP target region caller.";
2177       };
2178       emitRemark<OptimizationRemarkAnalysis>(&F, "OMP100", Remark);
2179 
2180       return nullptr;
2181     }
2182   }
2183 
2184   auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel {
2185     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2186       // Allow use in equality comparisons.
2187       if (Cmp->isEquality())
2188         return getUniqueKernelFor(*Cmp);
2189       return nullptr;
2190     }
2191     if (auto *CB = dyn_cast<CallBase>(U.getUser())) {
2192       // Allow direct calls.
2193       if (CB->isCallee(&U))
2194         return getUniqueKernelFor(*CB);
2195 
2196       OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2197           OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2198       // Allow the use in __kmpc_parallel_51 calls.
2199       if (OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI))
2200         return getUniqueKernelFor(*CB);
2201       return nullptr;
2202     }
2203     // Disallow every other use.
2204     return nullptr;
2205   };
2206 
2207   // TODO: In the future we want to track more than just a unique kernel.
2208   SmallPtrSet<Kernel, 2> PotentialKernels;
2209   OMPInformationCache::foreachUse(F, [&](const Use &U) {
2210     PotentialKernels.insert(GetUniqueKernelForUse(U));
2211   });
2212 
2213   Kernel K = nullptr;
2214   if (PotentialKernels.size() == 1)
2215     K = *PotentialKernels.begin();
2216 
2217   // Cache the result.
2218   UniqueKernelMap[&F] = K;
2219 
2220   return K;
2221 }
2222 
2223 bool OpenMPOpt::rewriteDeviceCodeStateMachine() {
2224   OMPInformationCache::RuntimeFunctionInfo &KernelParallelRFI =
2225       OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
2226 
2227   bool Changed = false;
2228   if (!KernelParallelRFI)
2229     return Changed;
2230 
2231   // If we have disabled state machine changes, exit
2232   if (DisableOpenMPOptStateMachineRewrite)
2233     return Changed;
2234 
2235   for (Function *F : SCC) {
2236 
2237     // Check if the function is a use in a __kmpc_parallel_51 call at
2238     // all.
2239     bool UnknownUse = false;
2240     bool KernelParallelUse = false;
2241     unsigned NumDirectCalls = 0;
2242 
2243     SmallVector<Use *, 2> ToBeReplacedStateMachineUses;
2244     OMPInformationCache::foreachUse(*F, [&](Use &U) {
2245       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2246         if (CB->isCallee(&U)) {
2247           ++NumDirectCalls;
2248           return;
2249         }
2250 
2251       if (isa<ICmpInst>(U.getUser())) {
2252         ToBeReplacedStateMachineUses.push_back(&U);
2253         return;
2254       }
2255 
2256       // Find wrapper functions that represent parallel kernels.
2257       CallInst *CI =
2258           OpenMPOpt::getCallIfRegularCall(*U.getUser(), &KernelParallelRFI);
2259       const unsigned int WrapperFunctionArgNo = 6;
2260       if (!KernelParallelUse && CI &&
2261           CI->getArgOperandNo(&U) == WrapperFunctionArgNo) {
2262         KernelParallelUse = true;
2263         ToBeReplacedStateMachineUses.push_back(&U);
2264         return;
2265       }
2266       UnknownUse = true;
2267     });
2268 
2269     // Do not emit a remark if we haven't seen a __kmpc_parallel_51
2270     // use.
2271     if (!KernelParallelUse)
2272       continue;
2273 
2274     // If this ever hits, we should investigate.
2275     // TODO: Checking the number of uses is not a necessary restriction and
2276     // should be lifted.
2277     if (UnknownUse || NumDirectCalls != 1 ||
2278         ToBeReplacedStateMachineUses.size() > 2) {
2279       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2280         return ORA << "Parallel region is used in "
2281                    << (UnknownUse ? "unknown" : "unexpected")
2282                    << " ways. Will not attempt to rewrite the state machine.";
2283       };
2284       emitRemark<OptimizationRemarkAnalysis>(F, "OMP101", Remark);
2285       continue;
2286     }
2287 
2288     // Even if we have __kmpc_parallel_51 calls, we (for now) give
2289     // up if the function is not called from a unique kernel.
2290     Kernel K = getUniqueKernelFor(*F);
2291     if (!K) {
2292       auto Remark = [&](OptimizationRemarkAnalysis ORA) {
2293         return ORA << "Parallel region is not called from a unique kernel. "
2294                       "Will not attempt to rewrite the state machine.";
2295       };
2296       emitRemark<OptimizationRemarkAnalysis>(F, "OMP102", Remark);
2297       continue;
2298     }
2299 
2300     // We now know F is a parallel body function called only from the kernel K.
2301     // We also identified the state machine uses in which we replace the
2302     // function pointer by a new global symbol for identification purposes. This
2303     // ensures only direct calls to the function are left.
2304 
2305     Module &M = *F->getParent();
2306     Type *Int8Ty = Type::getInt8Ty(M.getContext());
2307 
2308     auto *ID = new GlobalVariable(
2309         M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage,
2310         UndefValue::get(Int8Ty), F->getName() + ".ID");
2311 
2312     for (Use *U : ToBeReplacedStateMachineUses)
2313       U->set(ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2314           ID, U->get()->getType()));
2315 
2316     ++NumOpenMPParallelRegionsReplacedInGPUStateMachine;
2317 
2318     Changed = true;
2319   }
2320 
2321   return Changed;
2322 }
2323 
2324 /// Abstract Attribute for tracking ICV values.
2325 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> {
2326   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2327   AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2328 
2329   void initialize(Attributor &A) override {
2330     Function *F = getAnchorScope();
2331     if (!F || !A.isFunctionIPOAmendable(*F))
2332       indicatePessimisticFixpoint();
2333   }
2334 
2335   /// Returns true if value is assumed to be tracked.
2336   bool isAssumedTracked() const { return getAssumed(); }
2337 
2338   /// Returns true if value is known to be tracked.
2339   bool isKnownTracked() const { return getAssumed(); }
2340 
2341   /// Create an abstract attribute biew for the position \p IRP.
2342   static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A);
2343 
2344   /// Return the value with which \p I can be replaced for specific \p ICV.
2345   virtual Optional<Value *> getReplacementValue(InternalControlVar ICV,
2346                                                 const Instruction *I,
2347                                                 Attributor &A) const {
2348     return None;
2349   }
2350 
2351   /// Return an assumed unique ICV value if a single candidate is found. If
2352   /// there cannot be one, return a nullptr. If it is not clear yet, return the
2353   /// Optional::NoneType.
2354   virtual Optional<Value *>
2355   getUniqueReplacementValue(InternalControlVar ICV) const = 0;
2356 
2357   // Currently only nthreads is being tracked.
2358   // this array will only grow with time.
2359   InternalControlVar TrackableICVs[1] = {ICV_nthreads};
2360 
2361   /// See AbstractAttribute::getName()
2362   const std::string getName() const override { return "AAICVTracker"; }
2363 
2364   /// See AbstractAttribute::getIdAddr()
2365   const char *getIdAddr() const override { return &ID; }
2366 
2367   /// This function should return true if the type of the \p AA is AAICVTracker
2368   static bool classof(const AbstractAttribute *AA) {
2369     return (AA->getIdAddr() == &ID);
2370   }
2371 
2372   static const char ID;
2373 };
2374 
2375 struct AAICVTrackerFunction : public AAICVTracker {
2376   AAICVTrackerFunction(const IRPosition &IRP, Attributor &A)
2377       : AAICVTracker(IRP, A) {}
2378 
2379   // FIXME: come up with better string.
2380   const std::string getAsStr() const override { return "ICVTrackerFunction"; }
2381 
2382   // FIXME: come up with some stats.
2383   void trackStatistics() const override {}
2384 
2385   /// We don't manifest anything for this AA.
2386   ChangeStatus manifest(Attributor &A) override {
2387     return ChangeStatus::UNCHANGED;
2388   }
2389 
2390   // Map of ICV to their values at specific program point.
2391   EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar,
2392                   InternalControlVar::ICV___last>
2393       ICVReplacementValuesMap;
2394 
2395   ChangeStatus updateImpl(Attributor &A) override {
2396     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2397 
2398     Function *F = getAnchorScope();
2399 
2400     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2401 
2402     for (InternalControlVar ICV : TrackableICVs) {
2403       auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2404 
2405       auto &ValuesMap = ICVReplacementValuesMap[ICV];
2406       auto TrackValues = [&](Use &U, Function &) {
2407         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U);
2408         if (!CI)
2409           return false;
2410 
2411         // FIXME: handle setters with more that 1 arguments.
2412         /// Track new value.
2413         if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second)
2414           HasChanged = ChangeStatus::CHANGED;
2415 
2416         return false;
2417       };
2418 
2419       auto CallCheck = [&](Instruction &I) {
2420         Optional<Value *> ReplVal = getValueForCall(A, I, ICV);
2421         if (ReplVal.hasValue() &&
2422             ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
2423           HasChanged = ChangeStatus::CHANGED;
2424 
2425         return true;
2426       };
2427 
2428       // Track all changes of an ICV.
2429       SetterRFI.foreachUse(TrackValues, F);
2430 
2431       bool UsedAssumedInformation = false;
2432       A.checkForAllInstructions(CallCheck, *this, {Instruction::Call},
2433                                 UsedAssumedInformation,
2434                                 /* CheckBBLivenessOnly */ true);
2435 
2436       /// TODO: Figure out a way to avoid adding entry in
2437       /// ICVReplacementValuesMap
2438       Instruction *Entry = &F->getEntryBlock().front();
2439       if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry))
2440         ValuesMap.insert(std::make_pair(Entry, nullptr));
2441     }
2442 
2443     return HasChanged;
2444   }
2445 
2446   /// Helper to check if \p I is a call and get the value for it if it is
2447   /// unique.
2448   Optional<Value *> getValueForCall(Attributor &A, const Instruction &I,
2449                                     InternalControlVar &ICV) const {
2450 
2451     const auto *CB = dyn_cast<CallBase>(&I);
2452     if (!CB || CB->hasFnAttr("no_openmp") ||
2453         CB->hasFnAttr("no_openmp_routines"))
2454       return None;
2455 
2456     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2457     auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter];
2458     auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
2459     Function *CalledFunction = CB->getCalledFunction();
2460 
2461     // Indirect call, assume ICV changes.
2462     if (CalledFunction == nullptr)
2463       return nullptr;
2464     if (CalledFunction == GetterRFI.Declaration)
2465       return None;
2466     if (CalledFunction == SetterRFI.Declaration) {
2467       if (ICVReplacementValuesMap[ICV].count(&I))
2468         return ICVReplacementValuesMap[ICV].lookup(&I);
2469 
2470       return nullptr;
2471     }
2472 
2473     // Since we don't know, assume it changes the ICV.
2474     if (CalledFunction->isDeclaration())
2475       return nullptr;
2476 
2477     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2478         *this, IRPosition::callsite_returned(*CB), DepClassTy::REQUIRED);
2479 
2480     if (ICVTrackingAA.isAssumedTracked()) {
2481       Optional<Value *> URV = ICVTrackingAA.getUniqueReplacementValue(ICV);
2482       if (!URV || (*URV && AA::isValidAtPosition(**URV, I, OMPInfoCache)))
2483         return URV;
2484     }
2485 
2486     // If we don't know, assume it changes.
2487     return nullptr;
2488   }
2489 
2490   // We don't check unique value for a function, so return None.
2491   Optional<Value *>
2492   getUniqueReplacementValue(InternalControlVar ICV) const override {
2493     return None;
2494   }
2495 
2496   /// Return the value with which \p I can be replaced for specific \p ICV.
2497   Optional<Value *> getReplacementValue(InternalControlVar ICV,
2498                                         const Instruction *I,
2499                                         Attributor &A) const override {
2500     const auto &ValuesMap = ICVReplacementValuesMap[ICV];
2501     if (ValuesMap.count(I))
2502       return ValuesMap.lookup(I);
2503 
2504     SmallVector<const Instruction *, 16> Worklist;
2505     SmallPtrSet<const Instruction *, 16> Visited;
2506     Worklist.push_back(I);
2507 
2508     Optional<Value *> ReplVal;
2509 
2510     while (!Worklist.empty()) {
2511       const Instruction *CurrInst = Worklist.pop_back_val();
2512       if (!Visited.insert(CurrInst).second)
2513         continue;
2514 
2515       const BasicBlock *CurrBB = CurrInst->getParent();
2516 
2517       // Go up and look for all potential setters/calls that might change the
2518       // ICV.
2519       while ((CurrInst = CurrInst->getPrevNode())) {
2520         if (ValuesMap.count(CurrInst)) {
2521           Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
2522           // Unknown value, track new.
2523           if (!ReplVal.hasValue()) {
2524             ReplVal = NewReplVal;
2525             break;
2526           }
2527 
2528           // If we found a new value, we can't know the icv value anymore.
2529           if (NewReplVal.hasValue())
2530             if (ReplVal != NewReplVal)
2531               return nullptr;
2532 
2533           break;
2534         }
2535 
2536         Optional<Value *> NewReplVal = getValueForCall(A, *CurrInst, ICV);
2537         if (!NewReplVal.hasValue())
2538           continue;
2539 
2540         // Unknown value, track new.
2541         if (!ReplVal.hasValue()) {
2542           ReplVal = NewReplVal;
2543           break;
2544         }
2545 
2546         // if (NewReplVal.hasValue())
2547         // We found a new value, we can't know the icv value anymore.
2548         if (ReplVal != NewReplVal)
2549           return nullptr;
2550       }
2551 
2552       // If we are in the same BB and we have a value, we are done.
2553       if (CurrBB == I->getParent() && ReplVal.hasValue())
2554         return ReplVal;
2555 
2556       // Go through all predecessors and add terminators for analysis.
2557       for (const BasicBlock *Pred : predecessors(CurrBB))
2558         if (const Instruction *Terminator = Pred->getTerminator())
2559           Worklist.push_back(Terminator);
2560     }
2561 
2562     return ReplVal;
2563   }
2564 };
2565 
2566 struct AAICVTrackerFunctionReturned : AAICVTracker {
2567   AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A)
2568       : AAICVTracker(IRP, A) {}
2569 
2570   // FIXME: come up with better string.
2571   const std::string getAsStr() const override {
2572     return "ICVTrackerFunctionReturned";
2573   }
2574 
2575   // FIXME: come up with some stats.
2576   void trackStatistics() const override {}
2577 
2578   /// We don't manifest anything for this AA.
2579   ChangeStatus manifest(Attributor &A) override {
2580     return ChangeStatus::UNCHANGED;
2581   }
2582 
2583   // Map of ICV to their values at specific program point.
2584   EnumeratedArray<Optional<Value *>, InternalControlVar,
2585                   InternalControlVar::ICV___last>
2586       ICVReplacementValuesMap;
2587 
2588   /// Return the value with which \p I can be replaced for specific \p ICV.
2589   Optional<Value *>
2590   getUniqueReplacementValue(InternalControlVar ICV) const override {
2591     return ICVReplacementValuesMap[ICV];
2592   }
2593 
2594   ChangeStatus updateImpl(Attributor &A) override {
2595     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2596     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2597         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2598 
2599     if (!ICVTrackingAA.isAssumedTracked())
2600       return indicatePessimisticFixpoint();
2601 
2602     for (InternalControlVar ICV : TrackableICVs) {
2603       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2604       Optional<Value *> UniqueICVValue;
2605 
2606       auto CheckReturnInst = [&](Instruction &I) {
2607         Optional<Value *> NewReplVal =
2608             ICVTrackingAA.getReplacementValue(ICV, &I, A);
2609 
2610         // If we found a second ICV value there is no unique returned value.
2611         if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal)
2612           return false;
2613 
2614         UniqueICVValue = NewReplVal;
2615 
2616         return true;
2617       };
2618 
2619       bool UsedAssumedInformation = false;
2620       if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret},
2621                                      UsedAssumedInformation,
2622                                      /* CheckBBLivenessOnly */ true))
2623         UniqueICVValue = nullptr;
2624 
2625       if (UniqueICVValue == ReplVal)
2626         continue;
2627 
2628       ReplVal = UniqueICVValue;
2629       Changed = ChangeStatus::CHANGED;
2630     }
2631 
2632     return Changed;
2633   }
2634 };
2635 
2636 struct AAICVTrackerCallSite : AAICVTracker {
2637   AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A)
2638       : AAICVTracker(IRP, A) {}
2639 
2640   void initialize(Attributor &A) override {
2641     Function *F = getAnchorScope();
2642     if (!F || !A.isFunctionIPOAmendable(*F))
2643       indicatePessimisticFixpoint();
2644 
2645     // We only initialize this AA for getters, so we need to know which ICV it
2646     // gets.
2647     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2648     for (InternalControlVar ICV : TrackableICVs) {
2649       auto ICVInfo = OMPInfoCache.ICVs[ICV];
2650       auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter];
2651       if (Getter.Declaration == getAssociatedFunction()) {
2652         AssociatedICV = ICVInfo.Kind;
2653         return;
2654       }
2655     }
2656 
2657     /// Unknown ICV.
2658     indicatePessimisticFixpoint();
2659   }
2660 
2661   ChangeStatus manifest(Attributor &A) override {
2662     if (!ReplVal.hasValue() || !ReplVal.getValue())
2663       return ChangeStatus::UNCHANGED;
2664 
2665     A.changeValueAfterManifest(*getCtxI(), **ReplVal);
2666     A.deleteAfterManifest(*getCtxI());
2667 
2668     return ChangeStatus::CHANGED;
2669   }
2670 
2671   // FIXME: come up with better string.
2672   const std::string getAsStr() const override { return "ICVTrackerCallSite"; }
2673 
2674   // FIXME: come up with some stats.
2675   void trackStatistics() const override {}
2676 
2677   InternalControlVar AssociatedICV;
2678   Optional<Value *> ReplVal;
2679 
2680   ChangeStatus updateImpl(Attributor &A) override {
2681     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2682         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
2683 
2684     // We don't have any information, so we assume it changes the ICV.
2685     if (!ICVTrackingAA.isAssumedTracked())
2686       return indicatePessimisticFixpoint();
2687 
2688     Optional<Value *> NewReplVal =
2689         ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A);
2690 
2691     if (ReplVal == NewReplVal)
2692       return ChangeStatus::UNCHANGED;
2693 
2694     ReplVal = NewReplVal;
2695     return ChangeStatus::CHANGED;
2696   }
2697 
2698   // Return the value with which associated value can be replaced for specific
2699   // \p ICV.
2700   Optional<Value *>
2701   getUniqueReplacementValue(InternalControlVar ICV) const override {
2702     return ReplVal;
2703   }
2704 };
2705 
2706 struct AAICVTrackerCallSiteReturned : AAICVTracker {
2707   AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A)
2708       : AAICVTracker(IRP, A) {}
2709 
2710   // FIXME: come up with better string.
2711   const std::string getAsStr() const override {
2712     return "ICVTrackerCallSiteReturned";
2713   }
2714 
2715   // FIXME: come up with some stats.
2716   void trackStatistics() const override {}
2717 
2718   /// We don't manifest anything for this AA.
2719   ChangeStatus manifest(Attributor &A) override {
2720     return ChangeStatus::UNCHANGED;
2721   }
2722 
2723   // Map of ICV to their values at specific program point.
2724   EnumeratedArray<Optional<Value *>, InternalControlVar,
2725                   InternalControlVar::ICV___last>
2726       ICVReplacementValuesMap;
2727 
2728   /// Return the value with which associated value can be replaced for specific
2729   /// \p ICV.
2730   Optional<Value *>
2731   getUniqueReplacementValue(InternalControlVar ICV) const override {
2732     return ICVReplacementValuesMap[ICV];
2733   }
2734 
2735   ChangeStatus updateImpl(Attributor &A) override {
2736     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2737     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
2738         *this, IRPosition::returned(*getAssociatedFunction()),
2739         DepClassTy::REQUIRED);
2740 
2741     // We don't have any information, so we assume it changes the ICV.
2742     if (!ICVTrackingAA.isAssumedTracked())
2743       return indicatePessimisticFixpoint();
2744 
2745     for (InternalControlVar ICV : TrackableICVs) {
2746       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
2747       Optional<Value *> NewReplVal =
2748           ICVTrackingAA.getUniqueReplacementValue(ICV);
2749 
2750       if (ReplVal == NewReplVal)
2751         continue;
2752 
2753       ReplVal = NewReplVal;
2754       Changed = ChangeStatus::CHANGED;
2755     }
2756     return Changed;
2757   }
2758 };
2759 
2760 struct AAExecutionDomainFunction : public AAExecutionDomain {
2761   AAExecutionDomainFunction(const IRPosition &IRP, Attributor &A)
2762       : AAExecutionDomain(IRP, A) {}
2763 
2764   const std::string getAsStr() const override {
2765     return "[AAExecutionDomain] " + std::to_string(SingleThreadedBBs.size()) +
2766            "/" + std::to_string(NumBBs) + " BBs thread 0 only.";
2767   }
2768 
2769   /// See AbstractAttribute::trackStatistics().
2770   void trackStatistics() const override {}
2771 
2772   void initialize(Attributor &A) override {
2773     Function *F = getAnchorScope();
2774     for (const auto &BB : *F)
2775       SingleThreadedBBs.insert(&BB);
2776     NumBBs = SingleThreadedBBs.size();
2777   }
2778 
2779   ChangeStatus manifest(Attributor &A) override {
2780     LLVM_DEBUG({
2781       for (const BasicBlock *BB : SingleThreadedBBs)
2782         dbgs() << TAG << " Basic block @" << getAnchorScope()->getName() << " "
2783                << BB->getName() << " is executed by a single thread.\n";
2784     });
2785     return ChangeStatus::UNCHANGED;
2786   }
2787 
2788   ChangeStatus updateImpl(Attributor &A) override;
2789 
2790   /// Check if an instruction is executed by a single thread.
2791   bool isExecutedByInitialThreadOnly(const Instruction &I) const override {
2792     return isExecutedByInitialThreadOnly(*I.getParent());
2793   }
2794 
2795   bool isExecutedByInitialThreadOnly(const BasicBlock &BB) const override {
2796     return isValidState() && SingleThreadedBBs.contains(&BB);
2797   }
2798 
2799   /// Set of basic blocks that are executed by a single thread.
2800   SmallSetVector<const BasicBlock *, 16> SingleThreadedBBs;
2801 
2802   /// Total number of basic blocks in this function.
2803   long unsigned NumBBs;
2804 };
2805 
2806 ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
2807   Function *F = getAnchorScope();
2808   ReversePostOrderTraversal<Function *> RPOT(F);
2809   auto NumSingleThreadedBBs = SingleThreadedBBs.size();
2810 
2811   bool AllCallSitesKnown;
2812   auto PredForCallSite = [&](AbstractCallSite ACS) {
2813     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
2814         *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2815         DepClassTy::REQUIRED);
2816     return ACS.isDirectCall() &&
2817            ExecutionDomainAA.isExecutedByInitialThreadOnly(
2818                *ACS.getInstruction());
2819   };
2820 
2821   if (!A.checkForAllCallSites(PredForCallSite, *this,
2822                               /* RequiresAllCallSites */ true,
2823                               AllCallSitesKnown))
2824     SingleThreadedBBs.remove(&F->getEntryBlock());
2825 
2826   auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2827   auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
2828 
2829   // Check if the edge into the successor block contains a condition that only
2830   // lets the main thread execute it.
2831   auto IsInitialThreadOnly = [&](BranchInst *Edge, BasicBlock *SuccessorBB) {
2832     if (!Edge || !Edge->isConditional())
2833       return false;
2834     if (Edge->getSuccessor(0) != SuccessorBB)
2835       return false;
2836 
2837     auto *Cmp = dyn_cast<CmpInst>(Edge->getCondition());
2838     if (!Cmp || !Cmp->isTrueWhenEqual() || !Cmp->isEquality())
2839       return false;
2840 
2841     ConstantInt *C = dyn_cast<ConstantInt>(Cmp->getOperand(1));
2842     if (!C)
2843       return false;
2844 
2845     // Match: -1 == __kmpc_target_init (for non-SPMD kernels only!)
2846     if (C->isAllOnesValue()) {
2847       auto *CB = dyn_cast<CallBase>(Cmp->getOperand(0));
2848       CB = CB ? OpenMPOpt::getCallIfRegularCall(*CB, &RFI) : nullptr;
2849       if (!CB)
2850         return false;
2851       const int InitModeArgNo = 1;
2852       auto *ModeCI = dyn_cast<ConstantInt>(CB->getOperand(InitModeArgNo));
2853       return ModeCI && (ModeCI->getSExtValue() & OMP_TGT_EXEC_MODE_GENERIC);
2854     }
2855 
2856     if (C->isZero()) {
2857       // Match: 0 == llvm.nvvm.read.ptx.sreg.tid.x()
2858       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2859         if (II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_tid_x)
2860           return true;
2861 
2862       // Match: 0 == llvm.amdgcn.workitem.id.x()
2863       if (auto *II = dyn_cast<IntrinsicInst>(Cmp->getOperand(0)))
2864         if (II->getIntrinsicID() == Intrinsic::amdgcn_workitem_id_x)
2865           return true;
2866     }
2867 
2868     return false;
2869   };
2870 
2871   // Merge all the predecessor states into the current basic block. A basic
2872   // block is executed by a single thread if all of its predecessors are.
2873   auto MergePredecessorStates = [&](BasicBlock *BB) {
2874     if (pred_empty(BB))
2875       return SingleThreadedBBs.contains(BB);
2876 
2877     bool IsInitialThread = true;
2878     for (BasicBlock *PredBB : predecessors(BB)) {
2879       if (!IsInitialThreadOnly(dyn_cast<BranchInst>(PredBB->getTerminator()),
2880                                BB))
2881         IsInitialThread &= SingleThreadedBBs.contains(PredBB);
2882     }
2883 
2884     return IsInitialThread;
2885   };
2886 
2887   for (auto *BB : RPOT) {
2888     if (!MergePredecessorStates(BB))
2889       SingleThreadedBBs.remove(BB);
2890   }
2891 
2892   return (NumSingleThreadedBBs == SingleThreadedBBs.size())
2893              ? ChangeStatus::UNCHANGED
2894              : ChangeStatus::CHANGED;
2895 }
2896 
2897 /// Try to replace memory allocation calls called by a single thread with a
2898 /// static buffer of shared memory.
2899 struct AAHeapToShared : public StateWrapper<BooleanState, AbstractAttribute> {
2900   using Base = StateWrapper<BooleanState, AbstractAttribute>;
2901   AAHeapToShared(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
2902 
2903   /// Create an abstract attribute view for the position \p IRP.
2904   static AAHeapToShared &createForPosition(const IRPosition &IRP,
2905                                            Attributor &A);
2906 
2907   /// Returns true if HeapToShared conversion is assumed to be possible.
2908   virtual bool isAssumedHeapToShared(CallBase &CB) const = 0;
2909 
2910   /// Returns true if HeapToShared conversion is assumed and the CB is a
2911   /// callsite to a free operation to be removed.
2912   virtual bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const = 0;
2913 
2914   /// See AbstractAttribute::getName().
2915   const std::string getName() const override { return "AAHeapToShared"; }
2916 
2917   /// See AbstractAttribute::getIdAddr().
2918   const char *getIdAddr() const override { return &ID; }
2919 
2920   /// This function should return true if the type of the \p AA is
2921   /// AAHeapToShared.
2922   static bool classof(const AbstractAttribute *AA) {
2923     return (AA->getIdAddr() == &ID);
2924   }
2925 
2926   /// Unique ID (due to the unique address)
2927   static const char ID;
2928 };
2929 
2930 struct AAHeapToSharedFunction : public AAHeapToShared {
2931   AAHeapToSharedFunction(const IRPosition &IRP, Attributor &A)
2932       : AAHeapToShared(IRP, A) {}
2933 
2934   const std::string getAsStr() const override {
2935     return "[AAHeapToShared] " + std::to_string(MallocCalls.size()) +
2936            " malloc calls eligible.";
2937   }
2938 
2939   /// See AbstractAttribute::trackStatistics().
2940   void trackStatistics() const override {}
2941 
2942   /// This functions finds free calls that will be removed by the
2943   /// HeapToShared transformation.
2944   void findPotentialRemovedFreeCalls(Attributor &A) {
2945     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2946     auto &FreeRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2947 
2948     PotentialRemovedFreeCalls.clear();
2949     // Update free call users of found malloc calls.
2950     for (CallBase *CB : MallocCalls) {
2951       SmallVector<CallBase *, 4> FreeCalls;
2952       for (auto *U : CB->users()) {
2953         CallBase *C = dyn_cast<CallBase>(U);
2954         if (C && C->getCalledFunction() == FreeRFI.Declaration)
2955           FreeCalls.push_back(C);
2956       }
2957 
2958       if (FreeCalls.size() != 1)
2959         continue;
2960 
2961       PotentialRemovedFreeCalls.insert(FreeCalls.front());
2962     }
2963   }
2964 
2965   void initialize(Attributor &A) override {
2966     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2967     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
2968 
2969     Attributor::SimplifictionCallbackTy SCB =
2970         [](const IRPosition &, const AbstractAttribute *,
2971            bool &) -> Optional<Value *> { return nullptr; };
2972     for (User *U : RFI.Declaration->users())
2973       if (CallBase *CB = dyn_cast<CallBase>(U)) {
2974         MallocCalls.insert(CB);
2975         A.registerSimplificationCallback(IRPosition::callsite_returned(*CB),
2976                                          SCB);
2977       }
2978 
2979     findPotentialRemovedFreeCalls(A);
2980   }
2981 
2982   bool isAssumedHeapToShared(CallBase &CB) const override {
2983     return isValidState() && MallocCalls.count(&CB);
2984   }
2985 
2986   bool isAssumedHeapToSharedRemovedFree(CallBase &CB) const override {
2987     return isValidState() && PotentialRemovedFreeCalls.count(&CB);
2988   }
2989 
2990   ChangeStatus manifest(Attributor &A) override {
2991     if (MallocCalls.empty())
2992       return ChangeStatus::UNCHANGED;
2993 
2994     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
2995     auto &FreeCall = OMPInfoCache.RFIs[OMPRTL___kmpc_free_shared];
2996 
2997     Function *F = getAnchorScope();
2998     auto *HS = A.lookupAAFor<AAHeapToStack>(IRPosition::function(*F), this,
2999                                             DepClassTy::OPTIONAL);
3000 
3001     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3002     for (CallBase *CB : MallocCalls) {
3003       // Skip replacing this if HeapToStack has already claimed it.
3004       if (HS && HS->isAssumedHeapToStack(*CB))
3005         continue;
3006 
3007       // Find the unique free call to remove it.
3008       SmallVector<CallBase *, 4> FreeCalls;
3009       for (auto *U : CB->users()) {
3010         CallBase *C = dyn_cast<CallBase>(U);
3011         if (C && C->getCalledFunction() == FreeCall.Declaration)
3012           FreeCalls.push_back(C);
3013       }
3014       if (FreeCalls.size() != 1)
3015         continue;
3016 
3017       auto *AllocSize = cast<ConstantInt>(CB->getArgOperand(0));
3018 
3019       if (AllocSize->getZExtValue() + SharedMemoryUsed > SharedMemoryLimit) {
3020         LLVM_DEBUG(dbgs() << TAG << "Cannot replace call " << *CB
3021                           << " with shared memory."
3022                           << " Shared memory usage is limited to "
3023                           << SharedMemoryLimit << " bytes\n");
3024         continue;
3025       }
3026 
3027       LLVM_DEBUG(dbgs() << TAG << "Replace globalization call " << *CB
3028                         << " with " << AllocSize->getZExtValue()
3029                         << " bytes of shared memory\n");
3030 
3031       // Create a new shared memory buffer of the same size as the allocation
3032       // and replace all the uses of the original allocation with it.
3033       Module *M = CB->getModule();
3034       Type *Int8Ty = Type::getInt8Ty(M->getContext());
3035       Type *Int8ArrTy = ArrayType::get(Int8Ty, AllocSize->getZExtValue());
3036       auto *SharedMem = new GlobalVariable(
3037           *M, Int8ArrTy, /* IsConstant */ false, GlobalValue::InternalLinkage,
3038           UndefValue::get(Int8ArrTy), CB->getName() + "_shared", nullptr,
3039           GlobalValue::NotThreadLocal,
3040           static_cast<unsigned>(AddressSpace::Shared));
3041       auto *NewBuffer =
3042           ConstantExpr::getPointerCast(SharedMem, Int8Ty->getPointerTo());
3043 
3044       auto Remark = [&](OptimizationRemark OR) {
3045         return OR << "Replaced globalized variable with "
3046                   << ore::NV("SharedMemory", AllocSize->getZExtValue())
3047                   << ((AllocSize->getZExtValue() != 1) ? " bytes " : " byte ")
3048                   << "of shared memory.";
3049       };
3050       A.emitRemark<OptimizationRemark>(CB, "OMP111", Remark);
3051 
3052       MaybeAlign Alignment = CB->getRetAlign();
3053       assert(Alignment &&
3054              "HeapToShared on allocation without alignment attribute");
3055       SharedMem->setAlignment(MaybeAlign(Alignment));
3056 
3057       A.changeValueAfterManifest(*CB, *NewBuffer);
3058       A.deleteAfterManifest(*CB);
3059       A.deleteAfterManifest(*FreeCalls.front());
3060 
3061       SharedMemoryUsed += AllocSize->getZExtValue();
3062       NumBytesMovedToSharedMemory = SharedMemoryUsed;
3063       Changed = ChangeStatus::CHANGED;
3064     }
3065 
3066     return Changed;
3067   }
3068 
3069   ChangeStatus updateImpl(Attributor &A) override {
3070     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3071     auto &RFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3072     Function *F = getAnchorScope();
3073 
3074     auto NumMallocCalls = MallocCalls.size();
3075 
3076     // Only consider malloc calls executed by a single thread with a constant.
3077     for (User *U : RFI.Declaration->users()) {
3078       const auto &ED = A.getAAFor<AAExecutionDomain>(
3079           *this, IRPosition::function(*F), DepClassTy::REQUIRED);
3080       if (CallBase *CB = dyn_cast<CallBase>(U))
3081         if (!isa<ConstantInt>(CB->getArgOperand(0)) ||
3082             !ED.isExecutedByInitialThreadOnly(*CB))
3083           MallocCalls.remove(CB);
3084     }
3085 
3086     findPotentialRemovedFreeCalls(A);
3087 
3088     if (NumMallocCalls != MallocCalls.size())
3089       return ChangeStatus::CHANGED;
3090 
3091     return ChangeStatus::UNCHANGED;
3092   }
3093 
3094   /// Collection of all malloc calls in a function.
3095   SmallSetVector<CallBase *, 4> MallocCalls;
3096   /// Collection of potentially removed free calls in a function.
3097   SmallPtrSet<CallBase *, 4> PotentialRemovedFreeCalls;
3098   /// The total amount of shared memory that has been used for HeapToShared.
3099   unsigned SharedMemoryUsed = 0;
3100 };
3101 
3102 struct AAKernelInfo : public StateWrapper<KernelInfoState, AbstractAttribute> {
3103   using Base = StateWrapper<KernelInfoState, AbstractAttribute>;
3104   AAKernelInfo(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
3105 
3106   /// Statistics are tracked as part of manifest for now.
3107   void trackStatistics() const override {}
3108 
3109   /// See AbstractAttribute::getAsStr()
3110   const std::string getAsStr() const override {
3111     if (!isValidState())
3112       return "<invalid>";
3113     return std::string(SPMDCompatibilityTracker.isAssumed() ? "SPMD"
3114                                                             : "generic") +
3115            std::string(SPMDCompatibilityTracker.isAtFixpoint() ? " [FIX]"
3116                                                                : "") +
3117            std::string(" #PRs: ") +
3118            (ReachedKnownParallelRegions.isValidState()
3119                 ? std::to_string(ReachedKnownParallelRegions.size())
3120                 : "<invalid>") +
3121            ", #Unknown PRs: " +
3122            (ReachedUnknownParallelRegions.isValidState()
3123                 ? std::to_string(ReachedUnknownParallelRegions.size())
3124                 : "<invalid>") +
3125            ", #Reaching Kernels: " +
3126            (ReachingKernelEntries.isValidState()
3127                 ? std::to_string(ReachingKernelEntries.size())
3128                 : "<invalid>");
3129   }
3130 
3131   /// Create an abstract attribute biew for the position \p IRP.
3132   static AAKernelInfo &createForPosition(const IRPosition &IRP, Attributor &A);
3133 
3134   /// See AbstractAttribute::getName()
3135   const std::string getName() const override { return "AAKernelInfo"; }
3136 
3137   /// See AbstractAttribute::getIdAddr()
3138   const char *getIdAddr() const override { return &ID; }
3139 
3140   /// This function should return true if the type of the \p AA is AAKernelInfo
3141   static bool classof(const AbstractAttribute *AA) {
3142     return (AA->getIdAddr() == &ID);
3143   }
3144 
3145   static const char ID;
3146 };
3147 
3148 /// The function kernel info abstract attribute, basically, what can we say
3149 /// about a function with regards to the KernelInfoState.
3150 struct AAKernelInfoFunction : AAKernelInfo {
3151   AAKernelInfoFunction(const IRPosition &IRP, Attributor &A)
3152       : AAKernelInfo(IRP, A) {}
3153 
3154   SmallPtrSet<Instruction *, 4> GuardedInstructions;
3155 
3156   SmallPtrSetImpl<Instruction *> &getGuardedInstructions() {
3157     return GuardedInstructions;
3158   }
3159 
3160   /// See AbstractAttribute::initialize(...).
3161   void initialize(Attributor &A) override {
3162     // This is a high-level transform that might change the constant arguments
3163     // of the init and dinit calls. We need to tell the Attributor about this
3164     // to avoid other parts using the current constant value for simpliication.
3165     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3166 
3167     Function *Fn = getAnchorScope();
3168     if (!OMPInfoCache.Kernels.count(Fn))
3169       return;
3170 
3171     // Add itself to the reaching kernel and set IsKernelEntry.
3172     ReachingKernelEntries.insert(Fn);
3173     IsKernelEntry = true;
3174 
3175     OMPInformationCache::RuntimeFunctionInfo &InitRFI =
3176         OMPInfoCache.RFIs[OMPRTL___kmpc_target_init];
3177     OMPInformationCache::RuntimeFunctionInfo &DeinitRFI =
3178         OMPInfoCache.RFIs[OMPRTL___kmpc_target_deinit];
3179 
3180     // For kernels we perform more initialization work, first we find the init
3181     // and deinit calls.
3182     auto StoreCallBase = [](Use &U,
3183                             OMPInformationCache::RuntimeFunctionInfo &RFI,
3184                             CallBase *&Storage) {
3185       CallBase *CB = OpenMPOpt::getCallIfRegularCall(U, &RFI);
3186       assert(CB &&
3187              "Unexpected use of __kmpc_target_init or __kmpc_target_deinit!");
3188       assert(!Storage &&
3189              "Multiple uses of __kmpc_target_init or __kmpc_target_deinit!");
3190       Storage = CB;
3191       return false;
3192     };
3193     InitRFI.foreachUse(
3194         [&](Use &U, Function &) {
3195           StoreCallBase(U, InitRFI, KernelInitCB);
3196           return false;
3197         },
3198         Fn);
3199     DeinitRFI.foreachUse(
3200         [&](Use &U, Function &) {
3201           StoreCallBase(U, DeinitRFI, KernelDeinitCB);
3202           return false;
3203         },
3204         Fn);
3205 
3206     // Ignore kernels without initializers such as global constructors.
3207     if (!KernelInitCB || !KernelDeinitCB) {
3208       indicateOptimisticFixpoint();
3209       return;
3210     }
3211 
3212     // For kernels we might need to initialize/finalize the IsSPMD state and
3213     // we need to register a simplification callback so that the Attributor
3214     // knows the constant arguments to __kmpc_target_init and
3215     // __kmpc_target_deinit might actually change.
3216 
3217     Attributor::SimplifictionCallbackTy StateMachineSimplifyCB =
3218         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3219             bool &UsedAssumedInformation) -> Optional<Value *> {
3220       // IRP represents the "use generic state machine" argument of an
3221       // __kmpc_target_init call. We will answer this one with the internal
3222       // state. As long as we are not in an invalid state, we will create a
3223       // custom state machine so the value should be a `i1 false`. If we are
3224       // in an invalid state, we won't change the value that is in the IR.
3225       if (!ReachedKnownParallelRegions.isValidState())
3226         return nullptr;
3227       // If we have disabled state machine rewrites, don't make a custom one.
3228       if (DisableOpenMPOptStateMachineRewrite)
3229         return nullptr;
3230       if (AA)
3231         A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3232       UsedAssumedInformation = !isAtFixpoint();
3233       auto *FalseVal =
3234           ConstantInt::getBool(IRP.getAnchorValue().getContext(), false);
3235       return FalseVal;
3236     };
3237 
3238     Attributor::SimplifictionCallbackTy ModeSimplifyCB =
3239         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3240             bool &UsedAssumedInformation) -> Optional<Value *> {
3241       // IRP represents the "SPMDCompatibilityTracker" argument of an
3242       // __kmpc_target_init or
3243       // __kmpc_target_deinit call. We will answer this one with the internal
3244       // state.
3245       if (!SPMDCompatibilityTracker.isValidState())
3246         return nullptr;
3247       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3248         if (AA)
3249           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3250         UsedAssumedInformation = true;
3251       } else {
3252         UsedAssumedInformation = false;
3253       }
3254       auto *Val = ConstantInt::getSigned(
3255           IntegerType::getInt8Ty(IRP.getAnchorValue().getContext()),
3256           SPMDCompatibilityTracker.isAssumed() ? OMP_TGT_EXEC_MODE_SPMD
3257                                                : OMP_TGT_EXEC_MODE_GENERIC);
3258       return Val;
3259     };
3260 
3261     Attributor::SimplifictionCallbackTy IsGenericModeSimplifyCB =
3262         [&](const IRPosition &IRP, const AbstractAttribute *AA,
3263             bool &UsedAssumedInformation) -> Optional<Value *> {
3264       // IRP represents the "RequiresFullRuntime" argument of an
3265       // __kmpc_target_init or __kmpc_target_deinit call. We will answer this
3266       // one with the internal state of the SPMDCompatibilityTracker, so if
3267       // generic then true, if SPMD then false.
3268       if (!SPMDCompatibilityTracker.isValidState())
3269         return nullptr;
3270       if (!SPMDCompatibilityTracker.isAtFixpoint()) {
3271         if (AA)
3272           A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
3273         UsedAssumedInformation = true;
3274       } else {
3275         UsedAssumedInformation = false;
3276       }
3277       auto *Val = ConstantInt::getBool(IRP.getAnchorValue().getContext(),
3278                                        !SPMDCompatibilityTracker.isAssumed());
3279       return Val;
3280     };
3281 
3282     constexpr const int InitModeArgNo = 1;
3283     constexpr const int DeinitModeArgNo = 1;
3284     constexpr const int InitUseStateMachineArgNo = 2;
3285     constexpr const int InitRequiresFullRuntimeArgNo = 3;
3286     constexpr const int DeinitRequiresFullRuntimeArgNo = 2;
3287     A.registerSimplificationCallback(
3288         IRPosition::callsite_argument(*KernelInitCB, InitUseStateMachineArgNo),
3289         StateMachineSimplifyCB);
3290     A.registerSimplificationCallback(
3291         IRPosition::callsite_argument(*KernelInitCB, InitModeArgNo),
3292         ModeSimplifyCB);
3293     A.registerSimplificationCallback(
3294         IRPosition::callsite_argument(*KernelDeinitCB, DeinitModeArgNo),
3295         ModeSimplifyCB);
3296     A.registerSimplificationCallback(
3297         IRPosition::callsite_argument(*KernelInitCB,
3298                                       InitRequiresFullRuntimeArgNo),
3299         IsGenericModeSimplifyCB);
3300     A.registerSimplificationCallback(
3301         IRPosition::callsite_argument(*KernelDeinitCB,
3302                                       DeinitRequiresFullRuntimeArgNo),
3303         IsGenericModeSimplifyCB);
3304 
3305     // Check if we know we are in SPMD-mode already.
3306     ConstantInt *ModeArg =
3307         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3308     if (ModeArg && (ModeArg->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3309       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
3310     // This is a generic region but SPMDization is disabled so stop tracking.
3311     else if (DisableOpenMPOptSPMDization)
3312       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
3313   }
3314 
3315   /// Sanitize the string \p S such that it is a suitable global symbol name.
3316   static std::string sanitizeForGlobalName(std::string S) {
3317     std::replace_if(
3318         S.begin(), S.end(),
3319         [](const char C) {
3320           return !((C >= 'a' && C <= 'z') || (C >= 'A' && C <= 'Z') ||
3321                    (C >= '0' && C <= '9') || C == '_');
3322         },
3323         '.');
3324     return S;
3325   }
3326 
3327   /// Modify the IR based on the KernelInfoState as the fixpoint iteration is
3328   /// finished now.
3329   ChangeStatus manifest(Attributor &A) override {
3330     // If we are not looking at a kernel with __kmpc_target_init and
3331     // __kmpc_target_deinit call we cannot actually manifest the information.
3332     if (!KernelInitCB || !KernelDeinitCB)
3333       return ChangeStatus::UNCHANGED;
3334 
3335     // If we can we change the execution mode to SPMD-mode otherwise we build a
3336     // custom state machine.
3337     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3338     if (!changeToSPMDMode(A, Changed))
3339       return buildCustomStateMachine(A);
3340 
3341     return Changed;
3342   }
3343 
3344   bool changeToSPMDMode(Attributor &A, ChangeStatus &Changed) {
3345     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3346 
3347     if (!SPMDCompatibilityTracker.isAssumed()) {
3348       for (Instruction *NonCompatibleI : SPMDCompatibilityTracker) {
3349         if (!NonCompatibleI)
3350           continue;
3351 
3352         // Skip diagnostics on calls to known OpenMP runtime functions for now.
3353         if (auto *CB = dyn_cast<CallBase>(NonCompatibleI))
3354           if (OMPInfoCache.RTLFunctions.contains(CB->getCalledFunction()))
3355             continue;
3356 
3357         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3358           ORA << "Value has potential side effects preventing SPMD-mode "
3359                  "execution";
3360           if (isa<CallBase>(NonCompatibleI)) {
3361             ORA << ". Add `__attribute__((assume(\"ompx_spmd_amenable\")))` to "
3362                    "the called function to override";
3363           }
3364           return ORA << ".";
3365         };
3366         A.emitRemark<OptimizationRemarkAnalysis>(NonCompatibleI, "OMP121",
3367                                                  Remark);
3368 
3369         LLVM_DEBUG(dbgs() << TAG << "SPMD-incompatible side-effect: "
3370                           << *NonCompatibleI << "\n");
3371       }
3372 
3373       return false;
3374     }
3375 
3376     // Check if the kernel is already in SPMD mode, if so, return success.
3377     Function *Kernel = getAnchorScope();
3378     GlobalVariable *ExecMode = Kernel->getParent()->getGlobalVariable(
3379         (Kernel->getName() + "_exec_mode").str());
3380     assert(ExecMode && "Kernel without exec mode?");
3381     assert(ExecMode->getInitializer() && "ExecMode doesn't have initializer!");
3382 
3383     // Set the global exec mode flag to indicate SPMD-Generic mode.
3384     assert(isa<ConstantInt>(ExecMode->getInitializer()) &&
3385            "ExecMode is not an integer!");
3386     const int8_t ExecModeVal =
3387         cast<ConstantInt>(ExecMode->getInitializer())->getSExtValue();
3388     if (ExecModeVal != OMP_TGT_EXEC_MODE_GENERIC)
3389       return true;
3390 
3391     // We will now unconditionally modify the IR, indicate a change.
3392     Changed = ChangeStatus::CHANGED;
3393 
3394     auto CreateGuardedRegion = [&](Instruction *RegionStartI,
3395                                    Instruction *RegionEndI) {
3396       LoopInfo *LI = nullptr;
3397       DominatorTree *DT = nullptr;
3398       MemorySSAUpdater *MSU = nullptr;
3399       using InsertPointTy = OpenMPIRBuilder::InsertPointTy;
3400 
3401       BasicBlock *ParentBB = RegionStartI->getParent();
3402       Function *Fn = ParentBB->getParent();
3403       Module &M = *Fn->getParent();
3404 
3405       // Create all the blocks and logic.
3406       // ParentBB:
3407       //    goto RegionCheckTidBB
3408       // RegionCheckTidBB:
3409       //    Tid = __kmpc_hardware_thread_id()
3410       //    if (Tid != 0)
3411       //        goto RegionBarrierBB
3412       // RegionStartBB:
3413       //    <execute instructions guarded>
3414       //    goto RegionEndBB
3415       // RegionEndBB:
3416       //    <store escaping values to shared mem>
3417       //    goto RegionBarrierBB
3418       //  RegionBarrierBB:
3419       //    __kmpc_simple_barrier_spmd()
3420       //    // second barrier is omitted if lacking escaping values.
3421       //    <load escaping values from shared mem>
3422       //    __kmpc_simple_barrier_spmd()
3423       //    goto RegionExitBB
3424       // RegionExitBB:
3425       //    <execute rest of instructions>
3426 
3427       BasicBlock *RegionEndBB = SplitBlock(ParentBB, RegionEndI->getNextNode(),
3428                                            DT, LI, MSU, "region.guarded.end");
3429       BasicBlock *RegionBarrierBB =
3430           SplitBlock(RegionEndBB, &*RegionEndBB->getFirstInsertionPt(), DT, LI,
3431                      MSU, "region.barrier");
3432       BasicBlock *RegionExitBB =
3433           SplitBlock(RegionBarrierBB, &*RegionBarrierBB->getFirstInsertionPt(),
3434                      DT, LI, MSU, "region.exit");
3435       BasicBlock *RegionStartBB =
3436           SplitBlock(ParentBB, RegionStartI, DT, LI, MSU, "region.guarded");
3437 
3438       assert(ParentBB->getUniqueSuccessor() == RegionStartBB &&
3439              "Expected a different CFG");
3440 
3441       BasicBlock *RegionCheckTidBB = SplitBlock(
3442           ParentBB, ParentBB->getTerminator(), DT, LI, MSU, "region.check.tid");
3443 
3444       // Register basic blocks with the Attributor.
3445       A.registerManifestAddedBasicBlock(*RegionEndBB);
3446       A.registerManifestAddedBasicBlock(*RegionBarrierBB);
3447       A.registerManifestAddedBasicBlock(*RegionExitBB);
3448       A.registerManifestAddedBasicBlock(*RegionStartBB);
3449       A.registerManifestAddedBasicBlock(*RegionCheckTidBB);
3450 
3451       bool HasBroadcastValues = false;
3452       // Find escaping outputs from the guarded region to outside users and
3453       // broadcast their values to them.
3454       for (Instruction &I : *RegionStartBB) {
3455         SmallPtrSet<Instruction *, 4> OutsideUsers;
3456         for (User *Usr : I.users()) {
3457           Instruction &UsrI = *cast<Instruction>(Usr);
3458           if (UsrI.getParent() != RegionStartBB)
3459             OutsideUsers.insert(&UsrI);
3460         }
3461 
3462         if (OutsideUsers.empty())
3463           continue;
3464 
3465         HasBroadcastValues = true;
3466 
3467         // Emit a global variable in shared memory to store the broadcasted
3468         // value.
3469         auto *SharedMem = new GlobalVariable(
3470             M, I.getType(), /* IsConstant */ false,
3471             GlobalValue::InternalLinkage, UndefValue::get(I.getType()),
3472             sanitizeForGlobalName(
3473                 (I.getName() + ".guarded.output.alloc").str()),
3474             nullptr, GlobalValue::NotThreadLocal,
3475             static_cast<unsigned>(AddressSpace::Shared));
3476 
3477         // Emit a store instruction to update the value.
3478         new StoreInst(&I, SharedMem, RegionEndBB->getTerminator());
3479 
3480         LoadInst *LoadI = new LoadInst(I.getType(), SharedMem,
3481                                        I.getName() + ".guarded.output.load",
3482                                        RegionBarrierBB->getTerminator());
3483 
3484         // Emit a load instruction and replace uses of the output value.
3485         for (Instruction *UsrI : OutsideUsers)
3486           UsrI->replaceUsesOfWith(&I, LoadI);
3487       }
3488 
3489       auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3490 
3491       // Go to tid check BB in ParentBB.
3492       const DebugLoc DL = ParentBB->getTerminator()->getDebugLoc();
3493       ParentBB->getTerminator()->eraseFromParent();
3494       OpenMPIRBuilder::LocationDescription Loc(
3495           InsertPointTy(ParentBB, ParentBB->end()), DL);
3496       OMPInfoCache.OMPBuilder.updateToLocation(Loc);
3497       uint32_t SrcLocStrSize;
3498       auto *SrcLocStr =
3499           OMPInfoCache.OMPBuilder.getOrCreateSrcLocStr(Loc, SrcLocStrSize);
3500       Value *Ident =
3501           OMPInfoCache.OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize);
3502       BranchInst::Create(RegionCheckTidBB, ParentBB)->setDebugLoc(DL);
3503 
3504       // Add check for Tid in RegionCheckTidBB
3505       RegionCheckTidBB->getTerminator()->eraseFromParent();
3506       OpenMPIRBuilder::LocationDescription LocRegionCheckTid(
3507           InsertPointTy(RegionCheckTidBB, RegionCheckTidBB->end()), DL);
3508       OMPInfoCache.OMPBuilder.updateToLocation(LocRegionCheckTid);
3509       FunctionCallee HardwareTidFn =
3510           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3511               M, OMPRTL___kmpc_get_hardware_thread_id_in_block);
3512       CallInst *Tid =
3513           OMPInfoCache.OMPBuilder.Builder.CreateCall(HardwareTidFn, {});
3514       Tid->setDebugLoc(DL);
3515       OMPInfoCache.setCallingConvention(HardwareTidFn, Tid);
3516       Value *TidCheck = OMPInfoCache.OMPBuilder.Builder.CreateIsNull(Tid);
3517       OMPInfoCache.OMPBuilder.Builder
3518           .CreateCondBr(TidCheck, RegionStartBB, RegionBarrierBB)
3519           ->setDebugLoc(DL);
3520 
3521       // First barrier for synchronization, ensures main thread has updated
3522       // values.
3523       FunctionCallee BarrierFn =
3524           OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3525               M, OMPRTL___kmpc_barrier_simple_spmd);
3526       OMPInfoCache.OMPBuilder.updateToLocation(InsertPointTy(
3527           RegionBarrierBB, RegionBarrierBB->getFirstInsertionPt()));
3528       CallInst *Barrier =
3529           OMPInfoCache.OMPBuilder.Builder.CreateCall(BarrierFn, {Ident, Tid});
3530       Barrier->setDebugLoc(DL);
3531       OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3532 
3533       // Second barrier ensures workers have read broadcast values.
3534       if (HasBroadcastValues) {
3535         CallInst *Barrier = CallInst::Create(BarrierFn, {Ident, Tid}, "",
3536                                              RegionBarrierBB->getTerminator());
3537         Barrier->setDebugLoc(DL);
3538         OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3539       }
3540     };
3541 
3542     auto &AllocSharedRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
3543     SmallPtrSet<BasicBlock *, 8> Visited;
3544     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3545       BasicBlock *BB = GuardedI->getParent();
3546       if (!Visited.insert(BB).second)
3547         continue;
3548 
3549       SmallVector<std::pair<Instruction *, Instruction *>> Reorders;
3550       Instruction *LastEffect = nullptr;
3551       BasicBlock::reverse_iterator IP = BB->rbegin(), IPEnd = BB->rend();
3552       while (++IP != IPEnd) {
3553         if (!IP->mayHaveSideEffects() && !IP->mayReadFromMemory())
3554           continue;
3555         Instruction *I = &*IP;
3556         if (OpenMPOpt::getCallIfRegularCall(*I, &AllocSharedRFI))
3557           continue;
3558         if (!I->user_empty() || !SPMDCompatibilityTracker.contains(I)) {
3559           LastEffect = nullptr;
3560           continue;
3561         }
3562         if (LastEffect)
3563           Reorders.push_back({I, LastEffect});
3564         LastEffect = &*IP;
3565       }
3566       for (auto &Reorder : Reorders)
3567         Reorder.first->moveBefore(Reorder.second);
3568     }
3569 
3570     SmallVector<std::pair<Instruction *, Instruction *>, 4> GuardedRegions;
3571 
3572     for (Instruction *GuardedI : SPMDCompatibilityTracker) {
3573       BasicBlock *BB = GuardedI->getParent();
3574       auto *CalleeAA = A.lookupAAFor<AAKernelInfo>(
3575           IRPosition::function(*GuardedI->getFunction()), nullptr,
3576           DepClassTy::NONE);
3577       assert(CalleeAA != nullptr && "Expected Callee AAKernelInfo");
3578       auto &CalleeAAFunction = *cast<AAKernelInfoFunction>(CalleeAA);
3579       // Continue if instruction is already guarded.
3580       if (CalleeAAFunction.getGuardedInstructions().contains(GuardedI))
3581         continue;
3582 
3583       Instruction *GuardedRegionStart = nullptr, *GuardedRegionEnd = nullptr;
3584       for (Instruction &I : *BB) {
3585         // If instruction I needs to be guarded update the guarded region
3586         // bounds.
3587         if (SPMDCompatibilityTracker.contains(&I)) {
3588           CalleeAAFunction.getGuardedInstructions().insert(&I);
3589           if (GuardedRegionStart)
3590             GuardedRegionEnd = &I;
3591           else
3592             GuardedRegionStart = GuardedRegionEnd = &I;
3593 
3594           continue;
3595         }
3596 
3597         // Instruction I does not need guarding, store
3598         // any region found and reset bounds.
3599         if (GuardedRegionStart) {
3600           GuardedRegions.push_back(
3601               std::make_pair(GuardedRegionStart, GuardedRegionEnd));
3602           GuardedRegionStart = nullptr;
3603           GuardedRegionEnd = nullptr;
3604         }
3605       }
3606     }
3607 
3608     for (auto &GR : GuardedRegions)
3609       CreateGuardedRegion(GR.first, GR.second);
3610 
3611     // Adjust the global exec mode flag that tells the runtime what mode this
3612     // kernel is executed in.
3613     assert(ExecModeVal == OMP_TGT_EXEC_MODE_GENERIC &&
3614            "Initially non-SPMD kernel has SPMD exec mode!");
3615     ExecMode->setInitializer(
3616         ConstantInt::get(ExecMode->getInitializer()->getType(),
3617                          ExecModeVal | OMP_TGT_EXEC_MODE_GENERIC_SPMD));
3618 
3619     // Next rewrite the init and deinit calls to indicate we use SPMD-mode now.
3620     const int InitModeArgNo = 1;
3621     const int DeinitModeArgNo = 1;
3622     const int InitUseStateMachineArgNo = 2;
3623     const int InitRequiresFullRuntimeArgNo = 3;
3624     const int DeinitRequiresFullRuntimeArgNo = 2;
3625 
3626     auto &Ctx = getAnchorValue().getContext();
3627     A.changeUseAfterManifest(
3628         KernelInitCB->getArgOperandUse(InitModeArgNo),
3629         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3630                                 OMP_TGT_EXEC_MODE_SPMD));
3631     A.changeUseAfterManifest(
3632         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo),
3633         *ConstantInt::getBool(Ctx, false));
3634     A.changeUseAfterManifest(
3635         KernelDeinitCB->getArgOperandUse(DeinitModeArgNo),
3636         *ConstantInt::getSigned(IntegerType::getInt8Ty(Ctx),
3637                                 OMP_TGT_EXEC_MODE_SPMD));
3638     A.changeUseAfterManifest(
3639         KernelInitCB->getArgOperandUse(InitRequiresFullRuntimeArgNo),
3640         *ConstantInt::getBool(Ctx, false));
3641     A.changeUseAfterManifest(
3642         KernelDeinitCB->getArgOperandUse(DeinitRequiresFullRuntimeArgNo),
3643         *ConstantInt::getBool(Ctx, false));
3644 
3645     ++NumOpenMPTargetRegionKernelsSPMD;
3646 
3647     auto Remark = [&](OptimizationRemark OR) {
3648       return OR << "Transformed generic-mode kernel to SPMD-mode.";
3649     };
3650     A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP120", Remark);
3651     return true;
3652   };
3653 
3654   ChangeStatus buildCustomStateMachine(Attributor &A) {
3655     // If we have disabled state machine rewrites, don't make a custom one
3656     if (DisableOpenMPOptStateMachineRewrite)
3657       return ChangeStatus::UNCHANGED;
3658 
3659     // Don't rewrite the state machine if we are not in a valid state.
3660     if (!ReachedKnownParallelRegions.isValidState())
3661       return ChangeStatus::UNCHANGED;
3662 
3663     const int InitModeArgNo = 1;
3664     const int InitUseStateMachineArgNo = 2;
3665 
3666     // Check if the current configuration is non-SPMD and generic state machine.
3667     // If we already have SPMD mode or a custom state machine we do not need to
3668     // go any further. If it is anything but a constant something is weird and
3669     // we give up.
3670     ConstantInt *UseStateMachine = dyn_cast<ConstantInt>(
3671         KernelInitCB->getArgOperand(InitUseStateMachineArgNo));
3672     ConstantInt *Mode =
3673         dyn_cast<ConstantInt>(KernelInitCB->getArgOperand(InitModeArgNo));
3674 
3675     // If we are stuck with generic mode, try to create a custom device (=GPU)
3676     // state machine which is specialized for the parallel regions that are
3677     // reachable by the kernel.
3678     if (!UseStateMachine || UseStateMachine->isZero() || !Mode ||
3679         (Mode->getSExtValue() & OMP_TGT_EXEC_MODE_SPMD))
3680       return ChangeStatus::UNCHANGED;
3681 
3682     // If not SPMD mode, indicate we use a custom state machine now.
3683     auto &Ctx = getAnchorValue().getContext();
3684     auto *FalseVal = ConstantInt::getBool(Ctx, false);
3685     A.changeUseAfterManifest(
3686         KernelInitCB->getArgOperandUse(InitUseStateMachineArgNo), *FalseVal);
3687 
3688     // If we don't actually need a state machine we are done here. This can
3689     // happen if there simply are no parallel regions. In the resulting kernel
3690     // all worker threads will simply exit right away, leaving the main thread
3691     // to do the work alone.
3692     if (!mayContainParallelRegion()) {
3693       ++NumOpenMPTargetRegionKernelsWithoutStateMachine;
3694 
3695       auto Remark = [&](OptimizationRemark OR) {
3696         return OR << "Removing unused state machine from generic-mode kernel.";
3697       };
3698       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP130", Remark);
3699 
3700       return ChangeStatus::CHANGED;
3701     }
3702 
3703     // Keep track in the statistics of our new shiny custom state machine.
3704     if (ReachedUnknownParallelRegions.empty()) {
3705       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithoutFallback;
3706 
3707       auto Remark = [&](OptimizationRemark OR) {
3708         return OR << "Rewriting generic-mode kernel with a customized state "
3709                      "machine.";
3710       };
3711       A.emitRemark<OptimizationRemark>(KernelInitCB, "OMP131", Remark);
3712     } else {
3713       ++NumOpenMPTargetRegionKernelsCustomStateMachineWithFallback;
3714 
3715       auto Remark = [&](OptimizationRemarkAnalysis OR) {
3716         return OR << "Generic-mode kernel is executed with a customized state "
3717                      "machine that requires a fallback.";
3718       };
3719       A.emitRemark<OptimizationRemarkAnalysis>(KernelInitCB, "OMP132", Remark);
3720 
3721       // Tell the user why we ended up with a fallback.
3722       for (CallBase *UnknownParallelRegionCB : ReachedUnknownParallelRegions) {
3723         if (!UnknownParallelRegionCB)
3724           continue;
3725         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
3726           return ORA << "Call may contain unknown parallel regions. Use "
3727                      << "`__attribute__((assume(\"omp_no_parallelism\")))` to "
3728                         "override.";
3729         };
3730         A.emitRemark<OptimizationRemarkAnalysis>(UnknownParallelRegionCB,
3731                                                  "OMP133", Remark);
3732       }
3733     }
3734 
3735     // Create all the blocks:
3736     //
3737     //                       InitCB = __kmpc_target_init(...)
3738     //                       BlockHwSize =
3739     //                         __kmpc_get_hardware_num_threads_in_block();
3740     //                       WarpSize = __kmpc_get_warp_size();
3741     //                       BlockSize = BlockHwSize - WarpSize;
3742     // IsWorkerCheckBB:      bool IsWorker = InitCB != -1;
3743     //                       if (IsWorker) {
3744     //                         if (InitCB >= BlockSize) return;
3745     // SMBeginBB:               __kmpc_barrier_simple_generic(...);
3746     //                         void *WorkFn;
3747     //                         bool Active = __kmpc_kernel_parallel(&WorkFn);
3748     //                         if (!WorkFn) return;
3749     // SMIsActiveCheckBB:       if (Active) {
3750     // SMIfCascadeCurrentBB:      if      (WorkFn == <ParFn0>)
3751     //                              ParFn0(...);
3752     // SMIfCascadeCurrentBB:      else if (WorkFn == <ParFn1>)
3753     //                              ParFn1(...);
3754     //                            ...
3755     // SMIfCascadeCurrentBB:      else
3756     //                              ((WorkFnTy*)WorkFn)(...);
3757     // SMEndParallelBB:           __kmpc_kernel_end_parallel(...);
3758     //                          }
3759     // SMDoneBB:                __kmpc_barrier_simple_generic(...);
3760     //                          goto SMBeginBB;
3761     //                       }
3762     // UserCodeEntryBB:      // user code
3763     //                       __kmpc_target_deinit(...)
3764     //
3765     Function *Kernel = getAssociatedFunction();
3766     assert(Kernel && "Expected an associated function!");
3767 
3768     BasicBlock *InitBB = KernelInitCB->getParent();
3769     BasicBlock *UserCodeEntryBB = InitBB->splitBasicBlock(
3770         KernelInitCB->getNextNode(), "thread.user_code.check");
3771     BasicBlock *IsWorkerCheckBB =
3772         BasicBlock::Create(Ctx, "is_worker_check", Kernel, UserCodeEntryBB);
3773     BasicBlock *StateMachineBeginBB = BasicBlock::Create(
3774         Ctx, "worker_state_machine.begin", Kernel, UserCodeEntryBB);
3775     BasicBlock *StateMachineFinishedBB = BasicBlock::Create(
3776         Ctx, "worker_state_machine.finished", Kernel, UserCodeEntryBB);
3777     BasicBlock *StateMachineIsActiveCheckBB = BasicBlock::Create(
3778         Ctx, "worker_state_machine.is_active.check", Kernel, UserCodeEntryBB);
3779     BasicBlock *StateMachineIfCascadeCurrentBB =
3780         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3781                            Kernel, UserCodeEntryBB);
3782     BasicBlock *StateMachineEndParallelBB =
3783         BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.end",
3784                            Kernel, UserCodeEntryBB);
3785     BasicBlock *StateMachineDoneBarrierBB = BasicBlock::Create(
3786         Ctx, "worker_state_machine.done.barrier", Kernel, UserCodeEntryBB);
3787     A.registerManifestAddedBasicBlock(*InitBB);
3788     A.registerManifestAddedBasicBlock(*UserCodeEntryBB);
3789     A.registerManifestAddedBasicBlock(*IsWorkerCheckBB);
3790     A.registerManifestAddedBasicBlock(*StateMachineBeginBB);
3791     A.registerManifestAddedBasicBlock(*StateMachineFinishedBB);
3792     A.registerManifestAddedBasicBlock(*StateMachineIsActiveCheckBB);
3793     A.registerManifestAddedBasicBlock(*StateMachineIfCascadeCurrentBB);
3794     A.registerManifestAddedBasicBlock(*StateMachineEndParallelBB);
3795     A.registerManifestAddedBasicBlock(*StateMachineDoneBarrierBB);
3796 
3797     const DebugLoc &DLoc = KernelInitCB->getDebugLoc();
3798     ReturnInst::Create(Ctx, StateMachineFinishedBB)->setDebugLoc(DLoc);
3799     InitBB->getTerminator()->eraseFromParent();
3800 
3801     Instruction *IsWorker =
3802         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_NE, KernelInitCB,
3803                          ConstantInt::get(KernelInitCB->getType(), -1),
3804                          "thread.is_worker", InitBB);
3805     IsWorker->setDebugLoc(DLoc);
3806     BranchInst::Create(IsWorkerCheckBB, UserCodeEntryBB, IsWorker, InitBB);
3807 
3808     Module &M = *Kernel->getParent();
3809     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
3810     FunctionCallee BlockHwSizeFn =
3811         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3812             M, OMPRTL___kmpc_get_hardware_num_threads_in_block);
3813     FunctionCallee WarpSizeFn =
3814         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3815             M, OMPRTL___kmpc_get_warp_size);
3816     CallInst *BlockHwSize =
3817         CallInst::Create(BlockHwSizeFn, "block.hw_size", IsWorkerCheckBB);
3818     OMPInfoCache.setCallingConvention(BlockHwSizeFn, BlockHwSize);
3819     BlockHwSize->setDebugLoc(DLoc);
3820     CallInst *WarpSize =
3821         CallInst::Create(WarpSizeFn, "warp.size", IsWorkerCheckBB);
3822     OMPInfoCache.setCallingConvention(WarpSizeFn, WarpSize);
3823     WarpSize->setDebugLoc(DLoc);
3824     Instruction *BlockSize = BinaryOperator::CreateSub(
3825         BlockHwSize, WarpSize, "block.size", IsWorkerCheckBB);
3826     BlockSize->setDebugLoc(DLoc);
3827     Instruction *IsMainOrWorker = ICmpInst::Create(
3828         ICmpInst::ICmp, llvm::CmpInst::ICMP_SLT, KernelInitCB, BlockSize,
3829         "thread.is_main_or_worker", IsWorkerCheckBB);
3830     IsMainOrWorker->setDebugLoc(DLoc);
3831     BranchInst::Create(StateMachineBeginBB, StateMachineFinishedBB,
3832                        IsMainOrWorker, IsWorkerCheckBB);
3833 
3834     // Create local storage for the work function pointer.
3835     const DataLayout &DL = M.getDataLayout();
3836     Type *VoidPtrTy = Type::getInt8PtrTy(Ctx);
3837     Instruction *WorkFnAI =
3838         new AllocaInst(VoidPtrTy, DL.getAllocaAddrSpace(), nullptr,
3839                        "worker.work_fn.addr", &Kernel->getEntryBlock().front());
3840     WorkFnAI->setDebugLoc(DLoc);
3841 
3842     OMPInfoCache.OMPBuilder.updateToLocation(
3843         OpenMPIRBuilder::LocationDescription(
3844             IRBuilder<>::InsertPoint(StateMachineBeginBB,
3845                                      StateMachineBeginBB->end()),
3846             DLoc));
3847 
3848     Value *Ident = KernelInitCB->getArgOperand(0);
3849     Value *GTid = KernelInitCB;
3850 
3851     FunctionCallee BarrierFn =
3852         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3853             M, OMPRTL___kmpc_barrier_simple_generic);
3854     CallInst *Barrier =
3855         CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineBeginBB);
3856     OMPInfoCache.setCallingConvention(BarrierFn, Barrier);
3857     Barrier->setDebugLoc(DLoc);
3858 
3859     if (WorkFnAI->getType()->getPointerAddressSpace() !=
3860         (unsigned int)AddressSpace::Generic) {
3861       WorkFnAI = new AddrSpaceCastInst(
3862           WorkFnAI,
3863           PointerType::getWithSamePointeeType(
3864               cast<PointerType>(WorkFnAI->getType()),
3865               (unsigned int)AddressSpace::Generic),
3866           WorkFnAI->getName() + ".generic", StateMachineBeginBB);
3867       WorkFnAI->setDebugLoc(DLoc);
3868     }
3869 
3870     FunctionCallee KernelParallelFn =
3871         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3872             M, OMPRTL___kmpc_kernel_parallel);
3873     CallInst *IsActiveWorker = CallInst::Create(
3874         KernelParallelFn, {WorkFnAI}, "worker.is_active", StateMachineBeginBB);
3875     OMPInfoCache.setCallingConvention(KernelParallelFn, IsActiveWorker);
3876     IsActiveWorker->setDebugLoc(DLoc);
3877     Instruction *WorkFn = new LoadInst(VoidPtrTy, WorkFnAI, "worker.work_fn",
3878                                        StateMachineBeginBB);
3879     WorkFn->setDebugLoc(DLoc);
3880 
3881     FunctionType *ParallelRegionFnTy = FunctionType::get(
3882         Type::getVoidTy(Ctx), {Type::getInt16Ty(Ctx), Type::getInt32Ty(Ctx)},
3883         false);
3884     Value *WorkFnCast = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
3885         WorkFn, ParallelRegionFnTy->getPointerTo(), "worker.work_fn.addr_cast",
3886         StateMachineBeginBB);
3887 
3888     Instruction *IsDone =
3889         ICmpInst::Create(ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFn,
3890                          Constant::getNullValue(VoidPtrTy), "worker.is_done",
3891                          StateMachineBeginBB);
3892     IsDone->setDebugLoc(DLoc);
3893     BranchInst::Create(StateMachineFinishedBB, StateMachineIsActiveCheckBB,
3894                        IsDone, StateMachineBeginBB)
3895         ->setDebugLoc(DLoc);
3896 
3897     BranchInst::Create(StateMachineIfCascadeCurrentBB,
3898                        StateMachineDoneBarrierBB, IsActiveWorker,
3899                        StateMachineIsActiveCheckBB)
3900         ->setDebugLoc(DLoc);
3901 
3902     Value *ZeroArg =
3903         Constant::getNullValue(ParallelRegionFnTy->getParamType(0));
3904 
3905     // Now that we have most of the CFG skeleton it is time for the if-cascade
3906     // that checks the function pointer we got from the runtime against the
3907     // parallel regions we expect, if there are any.
3908     for (int I = 0, E = ReachedKnownParallelRegions.size(); I < E; ++I) {
3909       auto *ParallelRegion = ReachedKnownParallelRegions[I];
3910       BasicBlock *PRExecuteBB = BasicBlock::Create(
3911           Ctx, "worker_state_machine.parallel_region.execute", Kernel,
3912           StateMachineEndParallelBB);
3913       CallInst::Create(ParallelRegion, {ZeroArg, GTid}, "", PRExecuteBB)
3914           ->setDebugLoc(DLoc);
3915       BranchInst::Create(StateMachineEndParallelBB, PRExecuteBB)
3916           ->setDebugLoc(DLoc);
3917 
3918       BasicBlock *PRNextBB =
3919           BasicBlock::Create(Ctx, "worker_state_machine.parallel_region.check",
3920                              Kernel, StateMachineEndParallelBB);
3921 
3922       // Check if we need to compare the pointer at all or if we can just
3923       // call the parallel region function.
3924       Value *IsPR;
3925       if (I + 1 < E || !ReachedUnknownParallelRegions.empty()) {
3926         Instruction *CmpI = ICmpInst::Create(
3927             ICmpInst::ICmp, llvm::CmpInst::ICMP_EQ, WorkFnCast, ParallelRegion,
3928             "worker.check_parallel_region", StateMachineIfCascadeCurrentBB);
3929         CmpI->setDebugLoc(DLoc);
3930         IsPR = CmpI;
3931       } else {
3932         IsPR = ConstantInt::getTrue(Ctx);
3933       }
3934 
3935       BranchInst::Create(PRExecuteBB, PRNextBB, IsPR,
3936                          StateMachineIfCascadeCurrentBB)
3937           ->setDebugLoc(DLoc);
3938       StateMachineIfCascadeCurrentBB = PRNextBB;
3939     }
3940 
3941     // At the end of the if-cascade we place the indirect function pointer call
3942     // in case we might need it, that is if there can be parallel regions we
3943     // have not handled in the if-cascade above.
3944     if (!ReachedUnknownParallelRegions.empty()) {
3945       StateMachineIfCascadeCurrentBB->setName(
3946           "worker_state_machine.parallel_region.fallback.execute");
3947       CallInst::Create(ParallelRegionFnTy, WorkFnCast, {ZeroArg, GTid}, "",
3948                        StateMachineIfCascadeCurrentBB)
3949           ->setDebugLoc(DLoc);
3950     }
3951     BranchInst::Create(StateMachineEndParallelBB,
3952                        StateMachineIfCascadeCurrentBB)
3953         ->setDebugLoc(DLoc);
3954 
3955     FunctionCallee EndParallelFn =
3956         OMPInfoCache.OMPBuilder.getOrCreateRuntimeFunction(
3957             M, OMPRTL___kmpc_kernel_end_parallel);
3958     CallInst *EndParallel =
3959         CallInst::Create(EndParallelFn, {}, "", StateMachineEndParallelBB);
3960     OMPInfoCache.setCallingConvention(EndParallelFn, EndParallel);
3961     EndParallel->setDebugLoc(DLoc);
3962     BranchInst::Create(StateMachineDoneBarrierBB, StateMachineEndParallelBB)
3963         ->setDebugLoc(DLoc);
3964 
3965     CallInst::Create(BarrierFn, {Ident, GTid}, "", StateMachineDoneBarrierBB)
3966         ->setDebugLoc(DLoc);
3967     BranchInst::Create(StateMachineBeginBB, StateMachineDoneBarrierBB)
3968         ->setDebugLoc(DLoc);
3969 
3970     return ChangeStatus::CHANGED;
3971   }
3972 
3973   /// Fixpoint iteration update function. Will be called every time a dependence
3974   /// changed its state (and in the beginning).
3975   ChangeStatus updateImpl(Attributor &A) override {
3976     KernelInfoState StateBefore = getState();
3977 
3978     // Callback to check a read/write instruction.
3979     auto CheckRWInst = [&](Instruction &I) {
3980       // We handle calls later.
3981       if (isa<CallBase>(I))
3982         return true;
3983       // We only care about write effects.
3984       if (!I.mayWriteToMemory())
3985         return true;
3986       if (auto *SI = dyn_cast<StoreInst>(&I)) {
3987         SmallVector<const Value *> Objects;
3988         getUnderlyingObjects(SI->getPointerOperand(), Objects);
3989         if (llvm::all_of(Objects,
3990                          [](const Value *Obj) { return isa<AllocaInst>(Obj); }))
3991           return true;
3992         // Check for AAHeapToStack moved objects which must not be guarded.
3993         auto &HS = A.getAAFor<AAHeapToStack>(
3994             *this, IRPosition::function(*I.getFunction()),
3995             DepClassTy::OPTIONAL);
3996         if (llvm::all_of(Objects, [&HS](const Value *Obj) {
3997               auto *CB = dyn_cast<CallBase>(Obj);
3998               if (!CB)
3999                 return false;
4000               return HS.isAssumedHeapToStack(*CB);
4001             })) {
4002           return true;
4003         }
4004       }
4005 
4006       // Insert instruction that needs guarding.
4007       SPMDCompatibilityTracker.insert(&I);
4008       return true;
4009     };
4010 
4011     bool UsedAssumedInformationInCheckRWInst = false;
4012     if (!SPMDCompatibilityTracker.isAtFixpoint())
4013       if (!A.checkForAllReadWriteInstructions(
4014               CheckRWInst, *this, UsedAssumedInformationInCheckRWInst))
4015         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4016 
4017     bool UsedAssumedInformationFromReachingKernels = false;
4018     if (!IsKernelEntry) {
4019       updateParallelLevels(A);
4020 
4021       bool AllReachingKernelsKnown = true;
4022       updateReachingKernelEntries(A, AllReachingKernelsKnown);
4023       UsedAssumedInformationFromReachingKernels = !AllReachingKernelsKnown;
4024 
4025       if (!ParallelLevels.isValidState())
4026         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4027       else if (!ReachingKernelEntries.isValidState())
4028         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4029       else if (!SPMDCompatibilityTracker.empty()) {
4030         // Check if all reaching kernels agree on the mode as we can otherwise
4031         // not guard instructions. We might not be sure about the mode so we
4032         // we cannot fix the internal spmd-zation state either.
4033         int SPMD = 0, Generic = 0;
4034         for (auto *Kernel : ReachingKernelEntries) {
4035           auto &CBAA = A.getAAFor<AAKernelInfo>(
4036               *this, IRPosition::function(*Kernel), DepClassTy::OPTIONAL);
4037           if (CBAA.SPMDCompatibilityTracker.isValidState() &&
4038               CBAA.SPMDCompatibilityTracker.isAssumed())
4039             ++SPMD;
4040           else
4041             ++Generic;
4042           if (!CBAA.SPMDCompatibilityTracker.isAtFixpoint())
4043             UsedAssumedInformationFromReachingKernels = true;
4044         }
4045         if (SPMD != 0 && Generic != 0)
4046           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4047       }
4048     }
4049 
4050     // Callback to check a call instruction.
4051     bool AllParallelRegionStatesWereFixed = true;
4052     bool AllSPMDStatesWereFixed = true;
4053     auto CheckCallInst = [&](Instruction &I) {
4054       auto &CB = cast<CallBase>(I);
4055       auto &CBAA = A.getAAFor<AAKernelInfo>(
4056           *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4057       getState() ^= CBAA.getState();
4058       AllSPMDStatesWereFixed &= CBAA.SPMDCompatibilityTracker.isAtFixpoint();
4059       AllParallelRegionStatesWereFixed &=
4060           CBAA.ReachedKnownParallelRegions.isAtFixpoint();
4061       AllParallelRegionStatesWereFixed &=
4062           CBAA.ReachedUnknownParallelRegions.isAtFixpoint();
4063       return true;
4064     };
4065 
4066     bool UsedAssumedInformationInCheckCallInst = false;
4067     if (!A.checkForAllCallLikeInstructions(
4068             CheckCallInst, *this, UsedAssumedInformationInCheckCallInst)) {
4069       LLVM_DEBUG(dbgs() << TAG
4070                         << "Failed to visit all call-like instructions!\n";);
4071       return indicatePessimisticFixpoint();
4072     }
4073 
4074     // If we haven't used any assumed information for the reached parallel
4075     // region states we can fix it.
4076     if (!UsedAssumedInformationInCheckCallInst &&
4077         AllParallelRegionStatesWereFixed) {
4078       ReachedKnownParallelRegions.indicateOptimisticFixpoint();
4079       ReachedUnknownParallelRegions.indicateOptimisticFixpoint();
4080     }
4081 
4082     // If we are sure there are no parallel regions in the kernel we do not
4083     // want SPMD mode.
4084     if (IsKernelEntry && ReachedUnknownParallelRegions.isAtFixpoint() &&
4085         ReachedKnownParallelRegions.isAtFixpoint() &&
4086         ReachedUnknownParallelRegions.isValidState() &&
4087         ReachedKnownParallelRegions.isValidState() &&
4088         !mayContainParallelRegion())
4089       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4090 
4091     // If we haven't used any assumed information for the SPMD state we can fix
4092     // it.
4093     if (!UsedAssumedInformationInCheckRWInst &&
4094         !UsedAssumedInformationInCheckCallInst &&
4095         !UsedAssumedInformationFromReachingKernels && AllSPMDStatesWereFixed)
4096       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4097 
4098     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4099                                      : ChangeStatus::CHANGED;
4100   }
4101 
4102 private:
4103   /// Update info regarding reaching kernels.
4104   void updateReachingKernelEntries(Attributor &A,
4105                                    bool &AllReachingKernelsKnown) {
4106     auto PredCallSite = [&](AbstractCallSite ACS) {
4107       Function *Caller = ACS.getInstruction()->getFunction();
4108 
4109       assert(Caller && "Caller is nullptr");
4110 
4111       auto &CAA = A.getOrCreateAAFor<AAKernelInfo>(
4112           IRPosition::function(*Caller), this, DepClassTy::REQUIRED);
4113       if (CAA.ReachingKernelEntries.isValidState()) {
4114         ReachingKernelEntries ^= CAA.ReachingKernelEntries;
4115         return true;
4116       }
4117 
4118       // We lost track of the caller of the associated function, any kernel
4119       // could reach now.
4120       ReachingKernelEntries.indicatePessimisticFixpoint();
4121 
4122       return true;
4123     };
4124 
4125     if (!A.checkForAllCallSites(PredCallSite, *this,
4126                                 true /* RequireAllCallSites */,
4127                                 AllReachingKernelsKnown))
4128       ReachingKernelEntries.indicatePessimisticFixpoint();
4129   }
4130 
4131   /// Update info regarding parallel levels.
4132   void updateParallelLevels(Attributor &A) {
4133     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4134     OMPInformationCache::RuntimeFunctionInfo &Parallel51RFI =
4135         OMPInfoCache.RFIs[OMPRTL___kmpc_parallel_51];
4136 
4137     auto PredCallSite = [&](AbstractCallSite ACS) {
4138       Function *Caller = ACS.getInstruction()->getFunction();
4139 
4140       assert(Caller && "Caller is nullptr");
4141 
4142       auto &CAA =
4143           A.getOrCreateAAFor<AAKernelInfo>(IRPosition::function(*Caller));
4144       if (CAA.ParallelLevels.isValidState()) {
4145         // Any function that is called by `__kmpc_parallel_51` will not be
4146         // folded as the parallel level in the function is updated. In order to
4147         // get it right, all the analysis would depend on the implentation. That
4148         // said, if in the future any change to the implementation, the analysis
4149         // could be wrong. As a consequence, we are just conservative here.
4150         if (Caller == Parallel51RFI.Declaration) {
4151           ParallelLevels.indicatePessimisticFixpoint();
4152           return true;
4153         }
4154 
4155         ParallelLevels ^= CAA.ParallelLevels;
4156 
4157         return true;
4158       }
4159 
4160       // We lost track of the caller of the associated function, any kernel
4161       // could reach now.
4162       ParallelLevels.indicatePessimisticFixpoint();
4163 
4164       return true;
4165     };
4166 
4167     bool AllCallSitesKnown = true;
4168     if (!A.checkForAllCallSites(PredCallSite, *this,
4169                                 true /* RequireAllCallSites */,
4170                                 AllCallSitesKnown))
4171       ParallelLevels.indicatePessimisticFixpoint();
4172   }
4173 };
4174 
4175 /// The call site kernel info abstract attribute, basically, what can we say
4176 /// about a call site with regards to the KernelInfoState. For now this simply
4177 /// forwards the information from the callee.
4178 struct AAKernelInfoCallSite : AAKernelInfo {
4179   AAKernelInfoCallSite(const IRPosition &IRP, Attributor &A)
4180       : AAKernelInfo(IRP, A) {}
4181 
4182   /// See AbstractAttribute::initialize(...).
4183   void initialize(Attributor &A) override {
4184     AAKernelInfo::initialize(A);
4185 
4186     CallBase &CB = cast<CallBase>(getAssociatedValue());
4187     Function *Callee = getAssociatedFunction();
4188 
4189     auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
4190         *this, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
4191 
4192     // Check for SPMD-mode assumptions.
4193     if (AssumptionAA.hasAssumption("ompx_spmd_amenable")) {
4194       SPMDCompatibilityTracker.indicateOptimisticFixpoint();
4195       indicateOptimisticFixpoint();
4196     }
4197 
4198     // First weed out calls we do not care about, that is readonly/readnone
4199     // calls, intrinsics, and "no_openmp" calls. Neither of these can reach a
4200     // parallel region or anything else we are looking for.
4201     if (!CB.mayWriteToMemory() || isa<IntrinsicInst>(CB)) {
4202       indicateOptimisticFixpoint();
4203       return;
4204     }
4205 
4206     // Next we check if we know the callee. If it is a known OpenMP function
4207     // we will handle them explicitly in the switch below. If it is not, we
4208     // will use an AAKernelInfo object on the callee to gather information and
4209     // merge that into the current state. The latter happens in the updateImpl.
4210     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4211     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4212     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4213       // Unknown caller or declarations are not analyzable, we give up.
4214       if (!Callee || !A.isFunctionIPOAmendable(*Callee)) {
4215 
4216         // Unknown callees might contain parallel regions, except if they have
4217         // an appropriate assumption attached.
4218         if (!(AssumptionAA.hasAssumption("omp_no_openmp") ||
4219               AssumptionAA.hasAssumption("omp_no_parallelism")))
4220           ReachedUnknownParallelRegions.insert(&CB);
4221 
4222         // If SPMDCompatibilityTracker is not fixed, we need to give up on the
4223         // idea we can run something unknown in SPMD-mode.
4224         if (!SPMDCompatibilityTracker.isAtFixpoint()) {
4225           SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4226           SPMDCompatibilityTracker.insert(&CB);
4227         }
4228 
4229         // We have updated the state for this unknown call properly, there won't
4230         // be any change so we indicate a fixpoint.
4231         indicateOptimisticFixpoint();
4232       }
4233       // If the callee is known and can be used in IPO, we will update the state
4234       // based on the callee state in updateImpl.
4235       return;
4236     }
4237 
4238     const unsigned int WrapperFunctionArgNo = 6;
4239     RuntimeFunction RF = It->getSecond();
4240     switch (RF) {
4241     // All the functions we know are compatible with SPMD mode.
4242     case OMPRTL___kmpc_is_spmd_exec_mode:
4243     case OMPRTL___kmpc_distribute_static_fini:
4244     case OMPRTL___kmpc_for_static_fini:
4245     case OMPRTL___kmpc_global_thread_num:
4246     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4247     case OMPRTL___kmpc_get_hardware_num_blocks:
4248     case OMPRTL___kmpc_single:
4249     case OMPRTL___kmpc_end_single:
4250     case OMPRTL___kmpc_master:
4251     case OMPRTL___kmpc_end_master:
4252     case OMPRTL___kmpc_barrier:
4253     case OMPRTL___kmpc_nvptx_parallel_reduce_nowait_v2:
4254     case OMPRTL___kmpc_nvptx_teams_reduce_nowait_v2:
4255     case OMPRTL___kmpc_nvptx_end_reduce_nowait:
4256       break;
4257     case OMPRTL___kmpc_distribute_static_init_4:
4258     case OMPRTL___kmpc_distribute_static_init_4u:
4259     case OMPRTL___kmpc_distribute_static_init_8:
4260     case OMPRTL___kmpc_distribute_static_init_8u:
4261     case OMPRTL___kmpc_for_static_init_4:
4262     case OMPRTL___kmpc_for_static_init_4u:
4263     case OMPRTL___kmpc_for_static_init_8:
4264     case OMPRTL___kmpc_for_static_init_8u: {
4265       // Check the schedule and allow static schedule in SPMD mode.
4266       unsigned ScheduleArgOpNo = 2;
4267       auto *ScheduleTypeCI =
4268           dyn_cast<ConstantInt>(CB.getArgOperand(ScheduleArgOpNo));
4269       unsigned ScheduleTypeVal =
4270           ScheduleTypeCI ? ScheduleTypeCI->getZExtValue() : 0;
4271       switch (OMPScheduleType(ScheduleTypeVal)) {
4272       case OMPScheduleType::Static:
4273       case OMPScheduleType::StaticChunked:
4274       case OMPScheduleType::Distribute:
4275       case OMPScheduleType::DistributeChunked:
4276         break;
4277       default:
4278         SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4279         SPMDCompatibilityTracker.insert(&CB);
4280         break;
4281       };
4282     } break;
4283     case OMPRTL___kmpc_target_init:
4284       KernelInitCB = &CB;
4285       break;
4286     case OMPRTL___kmpc_target_deinit:
4287       KernelDeinitCB = &CB;
4288       break;
4289     case OMPRTL___kmpc_parallel_51:
4290       if (auto *ParallelRegion = dyn_cast<Function>(
4291               CB.getArgOperand(WrapperFunctionArgNo)->stripPointerCasts())) {
4292         ReachedKnownParallelRegions.insert(ParallelRegion);
4293         break;
4294       }
4295       // The condition above should usually get the parallel region function
4296       // pointer and record it. In the off chance it doesn't we assume the
4297       // worst.
4298       ReachedUnknownParallelRegions.insert(&CB);
4299       break;
4300     case OMPRTL___kmpc_omp_task:
4301       // We do not look into tasks right now, just give up.
4302       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4303       SPMDCompatibilityTracker.insert(&CB);
4304       ReachedUnknownParallelRegions.insert(&CB);
4305       break;
4306     case OMPRTL___kmpc_alloc_shared:
4307     case OMPRTL___kmpc_free_shared:
4308       // Return without setting a fixpoint, to be resolved in updateImpl.
4309       return;
4310     default:
4311       // Unknown OpenMP runtime calls cannot be executed in SPMD-mode,
4312       // generally. However, they do not hide parallel regions.
4313       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4314       SPMDCompatibilityTracker.insert(&CB);
4315       break;
4316     }
4317     // All other OpenMP runtime calls will not reach parallel regions so they
4318     // can be safely ignored for now. Since it is a known OpenMP runtime call we
4319     // have now modeled all effects and there is no need for any update.
4320     indicateOptimisticFixpoint();
4321   }
4322 
4323   ChangeStatus updateImpl(Attributor &A) override {
4324     // TODO: Once we have call site specific value information we can provide
4325     //       call site specific liveness information and then it makes
4326     //       sense to specialize attributes for call sites arguments instead of
4327     //       redirecting requests to the callee argument.
4328     Function *F = getAssociatedFunction();
4329 
4330     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4331     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(F);
4332 
4333     // If F is not a runtime function, propagate the AAKernelInfo of the callee.
4334     if (It == OMPInfoCache.RuntimeFunctionIDMap.end()) {
4335       const IRPosition &FnPos = IRPosition::function(*F);
4336       auto &FnAA = A.getAAFor<AAKernelInfo>(*this, FnPos, DepClassTy::REQUIRED);
4337       if (getState() == FnAA.getState())
4338         return ChangeStatus::UNCHANGED;
4339       getState() = FnAA.getState();
4340       return ChangeStatus::CHANGED;
4341     }
4342 
4343     // F is a runtime function that allocates or frees memory, check
4344     // AAHeapToStack and AAHeapToShared.
4345     KernelInfoState StateBefore = getState();
4346     assert((It->getSecond() == OMPRTL___kmpc_alloc_shared ||
4347             It->getSecond() == OMPRTL___kmpc_free_shared) &&
4348            "Expected a __kmpc_alloc_shared or __kmpc_free_shared runtime call");
4349 
4350     CallBase &CB = cast<CallBase>(getAssociatedValue());
4351 
4352     auto &HeapToStackAA = A.getAAFor<AAHeapToStack>(
4353         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4354     auto &HeapToSharedAA = A.getAAFor<AAHeapToShared>(
4355         *this, IRPosition::function(*CB.getCaller()), DepClassTy::OPTIONAL);
4356 
4357     RuntimeFunction RF = It->getSecond();
4358 
4359     switch (RF) {
4360     // If neither HeapToStack nor HeapToShared assume the call is removed,
4361     // assume SPMD incompatibility.
4362     case OMPRTL___kmpc_alloc_shared:
4363       if (!HeapToStackAA.isAssumedHeapToStack(CB) &&
4364           !HeapToSharedAA.isAssumedHeapToShared(CB))
4365         SPMDCompatibilityTracker.insert(&CB);
4366       break;
4367     case OMPRTL___kmpc_free_shared:
4368       if (!HeapToStackAA.isAssumedHeapToStackRemovedFree(CB) &&
4369           !HeapToSharedAA.isAssumedHeapToSharedRemovedFree(CB))
4370         SPMDCompatibilityTracker.insert(&CB);
4371       break;
4372     default:
4373       SPMDCompatibilityTracker.indicatePessimisticFixpoint();
4374       SPMDCompatibilityTracker.insert(&CB);
4375     }
4376 
4377     return StateBefore == getState() ? ChangeStatus::UNCHANGED
4378                                      : ChangeStatus::CHANGED;
4379   }
4380 };
4381 
4382 struct AAFoldRuntimeCall
4383     : public StateWrapper<BooleanState, AbstractAttribute> {
4384   using Base = StateWrapper<BooleanState, AbstractAttribute>;
4385 
4386   AAFoldRuntimeCall(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
4387 
4388   /// Statistics are tracked as part of manifest for now.
4389   void trackStatistics() const override {}
4390 
4391   /// Create an abstract attribute biew for the position \p IRP.
4392   static AAFoldRuntimeCall &createForPosition(const IRPosition &IRP,
4393                                               Attributor &A);
4394 
4395   /// See AbstractAttribute::getName()
4396   const std::string getName() const override { return "AAFoldRuntimeCall"; }
4397 
4398   /// See AbstractAttribute::getIdAddr()
4399   const char *getIdAddr() const override { return &ID; }
4400 
4401   /// This function should return true if the type of the \p AA is
4402   /// AAFoldRuntimeCall
4403   static bool classof(const AbstractAttribute *AA) {
4404     return (AA->getIdAddr() == &ID);
4405   }
4406 
4407   static const char ID;
4408 };
4409 
4410 struct AAFoldRuntimeCallCallSiteReturned : AAFoldRuntimeCall {
4411   AAFoldRuntimeCallCallSiteReturned(const IRPosition &IRP, Attributor &A)
4412       : AAFoldRuntimeCall(IRP, A) {}
4413 
4414   /// See AbstractAttribute::getAsStr()
4415   const std::string getAsStr() const override {
4416     if (!isValidState())
4417       return "<invalid>";
4418 
4419     std::string Str("simplified value: ");
4420 
4421     if (!SimplifiedValue.hasValue())
4422       return Str + std::string("none");
4423 
4424     if (!SimplifiedValue.getValue())
4425       return Str + std::string("nullptr");
4426 
4427     if (ConstantInt *CI = dyn_cast<ConstantInt>(SimplifiedValue.getValue()))
4428       return Str + std::to_string(CI->getSExtValue());
4429 
4430     return Str + std::string("unknown");
4431   }
4432 
4433   void initialize(Attributor &A) override {
4434     if (DisableOpenMPOptFolding)
4435       indicatePessimisticFixpoint();
4436 
4437     Function *Callee = getAssociatedFunction();
4438 
4439     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
4440     const auto &It = OMPInfoCache.RuntimeFunctionIDMap.find(Callee);
4441     assert(It != OMPInfoCache.RuntimeFunctionIDMap.end() &&
4442            "Expected a known OpenMP runtime function");
4443 
4444     RFKind = It->getSecond();
4445 
4446     CallBase &CB = cast<CallBase>(getAssociatedValue());
4447     A.registerSimplificationCallback(
4448         IRPosition::callsite_returned(CB),
4449         [&](const IRPosition &IRP, const AbstractAttribute *AA,
4450             bool &UsedAssumedInformation) -> Optional<Value *> {
4451           assert((isValidState() || (SimplifiedValue.hasValue() &&
4452                                      SimplifiedValue.getValue() == nullptr)) &&
4453                  "Unexpected invalid state!");
4454 
4455           if (!isAtFixpoint()) {
4456             UsedAssumedInformation = true;
4457             if (AA)
4458               A.recordDependence(*this, *AA, DepClassTy::OPTIONAL);
4459           }
4460           return SimplifiedValue;
4461         });
4462   }
4463 
4464   ChangeStatus updateImpl(Attributor &A) override {
4465     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4466     switch (RFKind) {
4467     case OMPRTL___kmpc_is_spmd_exec_mode:
4468       Changed |= foldIsSPMDExecMode(A);
4469       break;
4470     case OMPRTL___kmpc_is_generic_main_thread_id:
4471       Changed |= foldIsGenericMainThread(A);
4472       break;
4473     case OMPRTL___kmpc_parallel_level:
4474       Changed |= foldParallelLevel(A);
4475       break;
4476     case OMPRTL___kmpc_get_hardware_num_threads_in_block:
4477       Changed = Changed | foldKernelFnAttribute(A, "omp_target_thread_limit");
4478       break;
4479     case OMPRTL___kmpc_get_hardware_num_blocks:
4480       Changed = Changed | foldKernelFnAttribute(A, "omp_target_num_teams");
4481       break;
4482     default:
4483       llvm_unreachable("Unhandled OpenMP runtime function!");
4484     }
4485 
4486     return Changed;
4487   }
4488 
4489   ChangeStatus manifest(Attributor &A) override {
4490     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4491 
4492     if (SimplifiedValue.hasValue() && SimplifiedValue.getValue()) {
4493       Instruction &I = *getCtxI();
4494       A.changeValueAfterManifest(I, **SimplifiedValue);
4495       A.deleteAfterManifest(I);
4496 
4497       CallBase *CB = dyn_cast<CallBase>(&I);
4498       auto Remark = [&](OptimizationRemark OR) {
4499         if (auto *C = dyn_cast<ConstantInt>(*SimplifiedValue))
4500           return OR << "Replacing OpenMP runtime call "
4501                     << CB->getCalledFunction()->getName() << " with "
4502                     << ore::NV("FoldedValue", C->getZExtValue()) << ".";
4503         return OR << "Replacing OpenMP runtime call "
4504                   << CB->getCalledFunction()->getName() << ".";
4505       };
4506 
4507       if (CB && EnableVerboseRemarks)
4508         A.emitRemark<OptimizationRemark>(CB, "OMP180", Remark);
4509 
4510       LLVM_DEBUG(dbgs() << TAG << "Replacing runtime call: " << I << " with "
4511                         << **SimplifiedValue << "\n");
4512 
4513       Changed = ChangeStatus::CHANGED;
4514     }
4515 
4516     return Changed;
4517   }
4518 
4519   ChangeStatus indicatePessimisticFixpoint() override {
4520     SimplifiedValue = nullptr;
4521     return AAFoldRuntimeCall::indicatePessimisticFixpoint();
4522   }
4523 
4524 private:
4525   /// Fold __kmpc_is_spmd_exec_mode into a constant if possible.
4526   ChangeStatus foldIsSPMDExecMode(Attributor &A) {
4527     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4528 
4529     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4530     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4531     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4532         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4533 
4534     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4535       return indicatePessimisticFixpoint();
4536 
4537     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4538       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4539                                           DepClassTy::REQUIRED);
4540 
4541       if (!AA.isValidState()) {
4542         SimplifiedValue = nullptr;
4543         return indicatePessimisticFixpoint();
4544       }
4545 
4546       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4547         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4548           ++KnownSPMDCount;
4549         else
4550           ++AssumedSPMDCount;
4551       } else {
4552         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4553           ++KnownNonSPMDCount;
4554         else
4555           ++AssumedNonSPMDCount;
4556       }
4557     }
4558 
4559     if ((AssumedSPMDCount + KnownSPMDCount) &&
4560         (AssumedNonSPMDCount + KnownNonSPMDCount))
4561       return indicatePessimisticFixpoint();
4562 
4563     auto &Ctx = getAnchorValue().getContext();
4564     if (KnownSPMDCount || AssumedSPMDCount) {
4565       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4566              "Expected only SPMD kernels!");
4567       // All reaching kernels are in SPMD mode. Update all function calls to
4568       // __kmpc_is_spmd_exec_mode to 1.
4569       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4570     } else if (KnownNonSPMDCount || AssumedNonSPMDCount) {
4571       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4572              "Expected only non-SPMD kernels!");
4573       // All reaching kernels are in non-SPMD mode. Update all function
4574       // calls to __kmpc_is_spmd_exec_mode to 0.
4575       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), false);
4576     } else {
4577       // We have empty reaching kernels, therefore we cannot tell if the
4578       // associated call site can be folded. At this moment, SimplifiedValue
4579       // must be none.
4580       assert(!SimplifiedValue.hasValue() && "SimplifiedValue should be none");
4581     }
4582 
4583     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4584                                                     : ChangeStatus::CHANGED;
4585   }
4586 
4587   /// Fold __kmpc_is_generic_main_thread_id into a constant if possible.
4588   ChangeStatus foldIsGenericMainThread(Attributor &A) {
4589     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4590 
4591     CallBase &CB = cast<CallBase>(getAssociatedValue());
4592     Function *F = CB.getFunction();
4593     const auto &ExecutionDomainAA = A.getAAFor<AAExecutionDomain>(
4594         *this, IRPosition::function(*F), DepClassTy::REQUIRED);
4595 
4596     if (!ExecutionDomainAA.isValidState())
4597       return indicatePessimisticFixpoint();
4598 
4599     auto &Ctx = getAnchorValue().getContext();
4600     if (ExecutionDomainAA.isExecutedByInitialThreadOnly(CB))
4601       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), true);
4602     else
4603       return indicatePessimisticFixpoint();
4604 
4605     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4606                                                     : ChangeStatus::CHANGED;
4607   }
4608 
4609   /// Fold __kmpc_parallel_level into a constant if possible.
4610   ChangeStatus foldParallelLevel(Attributor &A) {
4611     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4612 
4613     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4614         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4615 
4616     if (!CallerKernelInfoAA.ParallelLevels.isValidState())
4617       return indicatePessimisticFixpoint();
4618 
4619     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4620       return indicatePessimisticFixpoint();
4621 
4622     if (CallerKernelInfoAA.ReachingKernelEntries.empty()) {
4623       assert(!SimplifiedValue.hasValue() &&
4624              "SimplifiedValue should keep none at this point");
4625       return ChangeStatus::UNCHANGED;
4626     }
4627 
4628     unsigned AssumedSPMDCount = 0, KnownSPMDCount = 0;
4629     unsigned AssumedNonSPMDCount = 0, KnownNonSPMDCount = 0;
4630     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4631       auto &AA = A.getAAFor<AAKernelInfo>(*this, IRPosition::function(*K),
4632                                           DepClassTy::REQUIRED);
4633       if (!AA.SPMDCompatibilityTracker.isValidState())
4634         return indicatePessimisticFixpoint();
4635 
4636       if (AA.SPMDCompatibilityTracker.isAssumed()) {
4637         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4638           ++KnownSPMDCount;
4639         else
4640           ++AssumedSPMDCount;
4641       } else {
4642         if (AA.SPMDCompatibilityTracker.isAtFixpoint())
4643           ++KnownNonSPMDCount;
4644         else
4645           ++AssumedNonSPMDCount;
4646       }
4647     }
4648 
4649     if ((AssumedSPMDCount + KnownSPMDCount) &&
4650         (AssumedNonSPMDCount + KnownNonSPMDCount))
4651       return indicatePessimisticFixpoint();
4652 
4653     auto &Ctx = getAnchorValue().getContext();
4654     // If the caller can only be reached by SPMD kernel entries, the parallel
4655     // level is 1. Similarly, if the caller can only be reached by non-SPMD
4656     // kernel entries, it is 0.
4657     if (AssumedSPMDCount || KnownSPMDCount) {
4658       assert(KnownNonSPMDCount == 0 && AssumedNonSPMDCount == 0 &&
4659              "Expected only SPMD kernels!");
4660       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 1);
4661     } else {
4662       assert(KnownSPMDCount == 0 && AssumedSPMDCount == 0 &&
4663              "Expected only non-SPMD kernels!");
4664       SimplifiedValue = ConstantInt::get(Type::getInt8Ty(Ctx), 0);
4665     }
4666     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4667                                                     : ChangeStatus::CHANGED;
4668   }
4669 
4670   ChangeStatus foldKernelFnAttribute(Attributor &A, llvm::StringRef Attr) {
4671     // Specialize only if all the calls agree with the attribute constant value
4672     int32_t CurrentAttrValue = -1;
4673     Optional<Value *> SimplifiedValueBefore = SimplifiedValue;
4674 
4675     auto &CallerKernelInfoAA = A.getAAFor<AAKernelInfo>(
4676         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
4677 
4678     if (!CallerKernelInfoAA.ReachingKernelEntries.isValidState())
4679       return indicatePessimisticFixpoint();
4680 
4681     // Iterate over the kernels that reach this function
4682     for (Kernel K : CallerKernelInfoAA.ReachingKernelEntries) {
4683       int32_t NextAttrVal = -1;
4684       if (K->hasFnAttribute(Attr))
4685         NextAttrVal =
4686             std::stoi(K->getFnAttribute(Attr).getValueAsString().str());
4687 
4688       if (NextAttrVal == -1 ||
4689           (CurrentAttrValue != -1 && CurrentAttrValue != NextAttrVal))
4690         return indicatePessimisticFixpoint();
4691       CurrentAttrValue = NextAttrVal;
4692     }
4693 
4694     if (CurrentAttrValue != -1) {
4695       auto &Ctx = getAnchorValue().getContext();
4696       SimplifiedValue =
4697           ConstantInt::get(Type::getInt32Ty(Ctx), CurrentAttrValue);
4698     }
4699     return SimplifiedValue == SimplifiedValueBefore ? ChangeStatus::UNCHANGED
4700                                                     : ChangeStatus::CHANGED;
4701   }
4702 
4703   /// An optional value the associated value is assumed to fold to. That is, we
4704   /// assume the associated value (which is a call) can be replaced by this
4705   /// simplified value.
4706   Optional<Value *> SimplifiedValue;
4707 
4708   /// The runtime function kind of the callee of the associated call site.
4709   RuntimeFunction RFKind;
4710 };
4711 
4712 } // namespace
4713 
4714 /// Register folding callsite
4715 void OpenMPOpt::registerFoldRuntimeCall(RuntimeFunction RF) {
4716   auto &RFI = OMPInfoCache.RFIs[RF];
4717   RFI.foreachUse(SCC, [&](Use &U, Function &F) {
4718     CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &RFI);
4719     if (!CI)
4720       return false;
4721     A.getOrCreateAAFor<AAFoldRuntimeCall>(
4722         IRPosition::callsite_returned(*CI), /* QueryingAA */ nullptr,
4723         DepClassTy::NONE, /* ForceUpdate */ false,
4724         /* UpdateAfterInit */ false);
4725     return false;
4726   });
4727 }
4728 
4729 void OpenMPOpt::registerAAs(bool IsModulePass) {
4730   if (SCC.empty())
4731 
4732     return;
4733   if (IsModulePass) {
4734     // Ensure we create the AAKernelInfo AAs first and without triggering an
4735     // update. This will make sure we register all value simplification
4736     // callbacks before any other AA has the chance to create an AAValueSimplify
4737     // or similar.
4738     for (Function *Kernel : OMPInfoCache.Kernels)
4739       A.getOrCreateAAFor<AAKernelInfo>(
4740           IRPosition::function(*Kernel), /* QueryingAA */ nullptr,
4741           DepClassTy::NONE, /* ForceUpdate */ false,
4742           /* UpdateAfterInit */ false);
4743 
4744     registerFoldRuntimeCall(OMPRTL___kmpc_is_generic_main_thread_id);
4745     registerFoldRuntimeCall(OMPRTL___kmpc_is_spmd_exec_mode);
4746     registerFoldRuntimeCall(OMPRTL___kmpc_parallel_level);
4747     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_threads_in_block);
4748     registerFoldRuntimeCall(OMPRTL___kmpc_get_hardware_num_blocks);
4749   }
4750 
4751   // Create CallSite AA for all Getters.
4752   for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) {
4753     auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)];
4754 
4755     auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter];
4756 
4757     auto CreateAA = [&](Use &U, Function &Caller) {
4758       CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI);
4759       if (!CI)
4760         return false;
4761 
4762       auto &CB = cast<CallBase>(*CI);
4763 
4764       IRPosition CBPos = IRPosition::callsite_function(CB);
4765       A.getOrCreateAAFor<AAICVTracker>(CBPos);
4766       return false;
4767     };
4768 
4769     GetterRFI.foreachUse(SCC, CreateAA);
4770   }
4771   auto &GlobalizationRFI = OMPInfoCache.RFIs[OMPRTL___kmpc_alloc_shared];
4772   auto CreateAA = [&](Use &U, Function &F) {
4773     A.getOrCreateAAFor<AAHeapToShared>(IRPosition::function(F));
4774     return false;
4775   };
4776   if (!DisableOpenMPOptDeglobalization)
4777     GlobalizationRFI.foreachUse(SCC, CreateAA);
4778 
4779   // Create an ExecutionDomain AA for every function and a HeapToStack AA for
4780   // every function if there is a device kernel.
4781   if (!isOpenMPDevice(M))
4782     return;
4783 
4784   for (auto *F : SCC) {
4785     if (F->isDeclaration())
4786       continue;
4787 
4788     A.getOrCreateAAFor<AAExecutionDomain>(IRPosition::function(*F));
4789     if (!DisableOpenMPOptDeglobalization)
4790       A.getOrCreateAAFor<AAHeapToStack>(IRPosition::function(*F));
4791 
4792     for (auto &I : instructions(*F)) {
4793       if (auto *LI = dyn_cast<LoadInst>(&I)) {
4794         bool UsedAssumedInformation = false;
4795         A.getAssumedSimplified(IRPosition::value(*LI), /* AA */ nullptr,
4796                                UsedAssumedInformation);
4797       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
4798         A.getOrCreateAAFor<AAIsDead>(IRPosition::value(*SI));
4799       }
4800     }
4801   }
4802 }
4803 
4804 const char AAICVTracker::ID = 0;
4805 const char AAKernelInfo::ID = 0;
4806 const char AAExecutionDomain::ID = 0;
4807 const char AAHeapToShared::ID = 0;
4808 const char AAFoldRuntimeCall::ID = 0;
4809 
4810 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP,
4811                                               Attributor &A) {
4812   AAICVTracker *AA = nullptr;
4813   switch (IRP.getPositionKind()) {
4814   case IRPosition::IRP_INVALID:
4815   case IRPosition::IRP_FLOAT:
4816   case IRPosition::IRP_ARGUMENT:
4817   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4818     llvm_unreachable("ICVTracker can only be created for function position!");
4819   case IRPosition::IRP_RETURNED:
4820     AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A);
4821     break;
4822   case IRPosition::IRP_CALL_SITE_RETURNED:
4823     AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A);
4824     break;
4825   case IRPosition::IRP_CALL_SITE:
4826     AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A);
4827     break;
4828   case IRPosition::IRP_FUNCTION:
4829     AA = new (A.Allocator) AAICVTrackerFunction(IRP, A);
4830     break;
4831   }
4832 
4833   return *AA;
4834 }
4835 
4836 AAExecutionDomain &AAExecutionDomain::createForPosition(const IRPosition &IRP,
4837                                                         Attributor &A) {
4838   AAExecutionDomainFunction *AA = nullptr;
4839   switch (IRP.getPositionKind()) {
4840   case IRPosition::IRP_INVALID:
4841   case IRPosition::IRP_FLOAT:
4842   case IRPosition::IRP_ARGUMENT:
4843   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4844   case IRPosition::IRP_RETURNED:
4845   case IRPosition::IRP_CALL_SITE_RETURNED:
4846   case IRPosition::IRP_CALL_SITE:
4847     llvm_unreachable(
4848         "AAExecutionDomain can only be created for function position!");
4849   case IRPosition::IRP_FUNCTION:
4850     AA = new (A.Allocator) AAExecutionDomainFunction(IRP, A);
4851     break;
4852   }
4853 
4854   return *AA;
4855 }
4856 
4857 AAHeapToShared &AAHeapToShared::createForPosition(const IRPosition &IRP,
4858                                                   Attributor &A) {
4859   AAHeapToSharedFunction *AA = nullptr;
4860   switch (IRP.getPositionKind()) {
4861   case IRPosition::IRP_INVALID:
4862   case IRPosition::IRP_FLOAT:
4863   case IRPosition::IRP_ARGUMENT:
4864   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4865   case IRPosition::IRP_RETURNED:
4866   case IRPosition::IRP_CALL_SITE_RETURNED:
4867   case IRPosition::IRP_CALL_SITE:
4868     llvm_unreachable(
4869         "AAHeapToShared can only be created for function position!");
4870   case IRPosition::IRP_FUNCTION:
4871     AA = new (A.Allocator) AAHeapToSharedFunction(IRP, A);
4872     break;
4873   }
4874 
4875   return *AA;
4876 }
4877 
4878 AAKernelInfo &AAKernelInfo::createForPosition(const IRPosition &IRP,
4879                                               Attributor &A) {
4880   AAKernelInfo *AA = nullptr;
4881   switch (IRP.getPositionKind()) {
4882   case IRPosition::IRP_INVALID:
4883   case IRPosition::IRP_FLOAT:
4884   case IRPosition::IRP_ARGUMENT:
4885   case IRPosition::IRP_RETURNED:
4886   case IRPosition::IRP_CALL_SITE_RETURNED:
4887   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4888     llvm_unreachable("KernelInfo can only be created for function position!");
4889   case IRPosition::IRP_CALL_SITE:
4890     AA = new (A.Allocator) AAKernelInfoCallSite(IRP, A);
4891     break;
4892   case IRPosition::IRP_FUNCTION:
4893     AA = new (A.Allocator) AAKernelInfoFunction(IRP, A);
4894     break;
4895   }
4896 
4897   return *AA;
4898 }
4899 
4900 AAFoldRuntimeCall &AAFoldRuntimeCall::createForPosition(const IRPosition &IRP,
4901                                                         Attributor &A) {
4902   AAFoldRuntimeCall *AA = nullptr;
4903   switch (IRP.getPositionKind()) {
4904   case IRPosition::IRP_INVALID:
4905   case IRPosition::IRP_FLOAT:
4906   case IRPosition::IRP_ARGUMENT:
4907   case IRPosition::IRP_RETURNED:
4908   case IRPosition::IRP_FUNCTION:
4909   case IRPosition::IRP_CALL_SITE:
4910   case IRPosition::IRP_CALL_SITE_ARGUMENT:
4911     llvm_unreachable("KernelInfo can only be created for call site position!");
4912   case IRPosition::IRP_CALL_SITE_RETURNED:
4913     AA = new (A.Allocator) AAFoldRuntimeCallCallSiteReturned(IRP, A);
4914     break;
4915   }
4916 
4917   return *AA;
4918 }
4919 
4920 PreservedAnalyses OpenMPOptPass::run(Module &M, ModuleAnalysisManager &AM) {
4921   if (!containsOpenMP(M))
4922     return PreservedAnalyses::all();
4923   if (DisableOpenMPOptimizations)
4924     return PreservedAnalyses::all();
4925 
4926   FunctionAnalysisManager &FAM =
4927       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4928   KernelSet Kernels = getDeviceKernels(M);
4929 
4930   if (PrintModuleBeforeOptimizations)
4931     LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt Module Pass:\n" << M);
4932 
4933   auto IsCalled = [&](Function &F) {
4934     if (Kernels.contains(&F))
4935       return true;
4936     for (const User *U : F.users())
4937       if (!isa<BlockAddress>(U))
4938         return true;
4939     return false;
4940   };
4941 
4942   auto EmitRemark = [&](Function &F) {
4943     auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
4944     ORE.emit([&]() {
4945       OptimizationRemarkAnalysis ORA(DEBUG_TYPE, "OMP140", &F);
4946       return ORA << "Could not internalize function. "
4947                  << "Some optimizations may not be possible. [OMP140]";
4948     });
4949   };
4950 
4951   // Create internal copies of each function if this is a kernel Module. This
4952   // allows iterprocedural passes to see every call edge.
4953   DenseMap<Function *, Function *> InternalizedMap;
4954   if (isOpenMPDevice(M)) {
4955     SmallPtrSet<Function *, 16> InternalizeFns;
4956     for (Function &F : M)
4957       if (!F.isDeclaration() && !Kernels.contains(&F) && IsCalled(F) &&
4958           !DisableInternalization) {
4959         if (Attributor::isInternalizable(F)) {
4960           InternalizeFns.insert(&F);
4961         } else if (!F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::Cold)) {
4962           EmitRemark(F);
4963         }
4964       }
4965 
4966     Attributor::internalizeFunctions(InternalizeFns, InternalizedMap);
4967   }
4968 
4969   // Look at every function in the Module unless it was internalized.
4970   SmallVector<Function *, 16> SCC;
4971   for (Function &F : M)
4972     if (!F.isDeclaration() && !InternalizedMap.lookup(&F))
4973       SCC.push_back(&F);
4974 
4975   if (SCC.empty())
4976     return PreservedAnalyses::all();
4977 
4978   AnalysisGetter AG(FAM);
4979 
4980   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
4981     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
4982   };
4983 
4984   BumpPtrAllocator Allocator;
4985   CallGraphUpdater CGUpdater;
4986 
4987   SetVector<Function *> Functions(SCC.begin(), SCC.end());
4988   OMPInformationCache InfoCache(M, AG, Allocator, /*CGSCC*/ Functions, Kernels);
4989 
4990   unsigned MaxFixpointIterations =
4991       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
4992   Attributor A(Functions, InfoCache, CGUpdater, nullptr, true, false,
4993                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
4994 
4995   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
4996   bool Changed = OMPOpt.run(true);
4997 
4998   // Optionally inline device functions for potentially better performance.
4999   if (AlwaysInlineDeviceFunctions && isOpenMPDevice(M))
5000     for (Function &F : M)
5001       if (!F.isDeclaration() && !Kernels.contains(&F) &&
5002           !F.hasFnAttribute(Attribute::NoInline))
5003         F.addFnAttr(Attribute::AlwaysInline);
5004 
5005   if (PrintModuleAfterOptimizations)
5006     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt Module Pass:\n" << M);
5007 
5008   if (Changed)
5009     return PreservedAnalyses::none();
5010 
5011   return PreservedAnalyses::all();
5012 }
5013 
5014 PreservedAnalyses OpenMPOptCGSCCPass::run(LazyCallGraph::SCC &C,
5015                                           CGSCCAnalysisManager &AM,
5016                                           LazyCallGraph &CG,
5017                                           CGSCCUpdateResult &UR) {
5018   if (!containsOpenMP(*C.begin()->getFunction().getParent()))
5019     return PreservedAnalyses::all();
5020   if (DisableOpenMPOptimizations)
5021     return PreservedAnalyses::all();
5022 
5023   SmallVector<Function *, 16> SCC;
5024   // If there are kernels in the module, we have to run on all SCC's.
5025   for (LazyCallGraph::Node &N : C) {
5026     Function *Fn = &N.getFunction();
5027     SCC.push_back(Fn);
5028   }
5029 
5030   if (SCC.empty())
5031     return PreservedAnalyses::all();
5032 
5033   Module &M = *C.begin()->getFunction().getParent();
5034 
5035   if (PrintModuleBeforeOptimizations)
5036     LLVM_DEBUG(dbgs() << TAG << "Module before OpenMPOpt CGSCC Pass:\n" << M);
5037 
5038   KernelSet Kernels = getDeviceKernels(M);
5039 
5040   FunctionAnalysisManager &FAM =
5041       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
5042 
5043   AnalysisGetter AG(FAM);
5044 
5045   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
5046     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
5047   };
5048 
5049   BumpPtrAllocator Allocator;
5050   CallGraphUpdater CGUpdater;
5051   CGUpdater.initialize(CG, C, AM, UR);
5052 
5053   SetVector<Function *> Functions(SCC.begin(), SCC.end());
5054   OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
5055                                 /*CGSCC*/ Functions, Kernels);
5056 
5057   unsigned MaxFixpointIterations =
5058       (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5059   Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5060                MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5061 
5062   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5063   bool Changed = OMPOpt.run(false);
5064 
5065   if (PrintModuleAfterOptimizations)
5066     LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5067 
5068   if (Changed)
5069     return PreservedAnalyses::none();
5070 
5071   return PreservedAnalyses::all();
5072 }
5073 
5074 namespace {
5075 
5076 struct OpenMPOptCGSCCLegacyPass : public CallGraphSCCPass {
5077   CallGraphUpdater CGUpdater;
5078   static char ID;
5079 
5080   OpenMPOptCGSCCLegacyPass() : CallGraphSCCPass(ID) {
5081     initializeOpenMPOptCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
5082   }
5083 
5084   void getAnalysisUsage(AnalysisUsage &AU) const override {
5085     CallGraphSCCPass::getAnalysisUsage(AU);
5086   }
5087 
5088   bool runOnSCC(CallGraphSCC &CGSCC) override {
5089     if (!containsOpenMP(CGSCC.getCallGraph().getModule()))
5090       return false;
5091     if (DisableOpenMPOptimizations || skipSCC(CGSCC))
5092       return false;
5093 
5094     SmallVector<Function *, 16> SCC;
5095     // If there are kernels in the module, we have to run on all SCC's.
5096     for (CallGraphNode *CGN : CGSCC) {
5097       Function *Fn = CGN->getFunction();
5098       if (!Fn || Fn->isDeclaration())
5099         continue;
5100       SCC.push_back(Fn);
5101     }
5102 
5103     if (SCC.empty())
5104       return false;
5105 
5106     Module &M = CGSCC.getCallGraph().getModule();
5107     KernelSet Kernels = getDeviceKernels(M);
5108 
5109     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
5110     CGUpdater.initialize(CG, CGSCC);
5111 
5112     // Maintain a map of functions to avoid rebuilding the ORE
5113     DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap;
5114     auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & {
5115       std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F];
5116       if (!ORE)
5117         ORE = std::make_unique<OptimizationRemarkEmitter>(F);
5118       return *ORE;
5119     };
5120 
5121     AnalysisGetter AG;
5122     SetVector<Function *> Functions(SCC.begin(), SCC.end());
5123     BumpPtrAllocator Allocator;
5124     OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG,
5125                                   Allocator,
5126                                   /*CGSCC*/ Functions, Kernels);
5127 
5128     unsigned MaxFixpointIterations =
5129         (isOpenMPDevice(M)) ? SetFixpointIterations : 32;
5130     Attributor A(Functions, InfoCache, CGUpdater, nullptr, false, true,
5131                  MaxFixpointIterations, OREGetter, DEBUG_TYPE);
5132 
5133     OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
5134     bool Result = OMPOpt.run(false);
5135 
5136     if (PrintModuleAfterOptimizations)
5137       LLVM_DEBUG(dbgs() << TAG << "Module after OpenMPOpt CGSCC Pass:\n" << M);
5138 
5139     return Result;
5140   }
5141 
5142   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
5143 };
5144 
5145 } // end anonymous namespace
5146 
5147 KernelSet llvm::omp::getDeviceKernels(Module &M) {
5148   // TODO: Create a more cross-platform way of determining device kernels.
5149   NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
5150   KernelSet Kernels;
5151 
5152   if (!MD)
5153     return Kernels;
5154 
5155   for (auto *Op : MD->operands()) {
5156     if (Op->getNumOperands() < 2)
5157       continue;
5158     MDString *KindID = dyn_cast<MDString>(Op->getOperand(1));
5159     if (!KindID || KindID->getString() != "kernel")
5160       continue;
5161 
5162     Function *KernelFn =
5163         mdconst::dyn_extract_or_null<Function>(Op->getOperand(0));
5164     if (!KernelFn)
5165       continue;
5166 
5167     ++NumOpenMPTargetRegionKernels;
5168 
5169     Kernels.insert(KernelFn);
5170   }
5171 
5172   return Kernels;
5173 }
5174 
5175 bool llvm::omp::containsOpenMP(Module &M) {
5176   Metadata *MD = M.getModuleFlag("openmp");
5177   if (!MD)
5178     return false;
5179 
5180   return true;
5181 }
5182 
5183 bool llvm::omp::isOpenMPDevice(Module &M) {
5184   Metadata *MD = M.getModuleFlag("openmp-device");
5185   if (!MD)
5186     return false;
5187 
5188   return true;
5189 }
5190 
5191 char OpenMPOptCGSCCLegacyPass::ID = 0;
5192 
5193 INITIALIZE_PASS_BEGIN(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5194                       "OpenMP specific optimizations", false, false)
5195 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
5196 INITIALIZE_PASS_END(OpenMPOptCGSCCLegacyPass, "openmp-opt-cgscc",
5197                     "OpenMP specific optimizations", false, false)
5198 
5199 Pass *llvm::createOpenMPOptCGSCCLegacyPass() {
5200   return new OpenMPOptCGSCCLegacyPass();
5201 }
5202