1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // OpenMP specific optimizations:
10 //
11 // - Deduplication of runtime calls, e.g., omp_get_thread_num.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/OpenMPOpt.h"
16 
17 #include "llvm/ADT/EnumeratedArray.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/CallGraph.h"
20 #include "llvm/Analysis/CallGraphSCCPass.h"
21 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
22 #include "llvm/Frontend/OpenMP/OMPConstants.h"
23 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
24 #include "llvm/InitializePasses.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Transforms/IPO.h"
27 #include "llvm/Transforms/IPO/Attributor.h"
28 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 
31 using namespace llvm;
32 using namespace omp;
33 
34 #define DEBUG_TYPE "openmp-opt"
35 
36 static cl::opt<bool> DisableOpenMPOptimizations(
37     "openmp-opt-disable", cl::ZeroOrMore,
38     cl::desc("Disable OpenMP specific optimizations."), cl::Hidden,
39     cl::init(false));
40 
41 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false),
42                                     cl::Hidden);
43 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels",
44                                         cl::init(false), cl::Hidden);
45 
46 static cl::opt<bool> HideMemoryTransferLatency(
47     "openmp-hide-memory-transfer-latency",
48     cl::desc("[WIP] Tries to hide the latency of host to device memory"
49              " transfers"),
50     cl::Hidden, cl::init(false));
51 
52 
53 STATISTIC(NumOpenMPRuntimeCallsDeduplicated,
54           "Number of OpenMP runtime calls deduplicated");
55 STATISTIC(NumOpenMPParallelRegionsDeleted,
56           "Number of OpenMP parallel regions deleted");
57 STATISTIC(NumOpenMPRuntimeFunctionsIdentified,
58           "Number of OpenMP runtime functions identified");
59 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified,
60           "Number of OpenMP runtime function uses identified");
61 STATISTIC(NumOpenMPTargetRegionKernels,
62           "Number of OpenMP target region entry points (=kernels) identified");
63 STATISTIC(
64     NumOpenMPParallelRegionsReplacedInGPUStateMachine,
65     "Number of OpenMP parallel regions replaced with ID in GPU state machines");
66 
67 #if !defined(NDEBUG)
68 static constexpr auto TAG = "[" DEBUG_TYPE "]";
69 #endif
70 
71 namespace {
72 
73 struct AAICVTracker;
74 
75 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for
76 /// Attributor runs.
77 struct OMPInformationCache : public InformationCache {
78   OMPInformationCache(Module &M, AnalysisGetter &AG,
79                       BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
80                       SmallPtrSetImpl<Kernel> &Kernels)
81       : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
82         Kernels(Kernels) {
83 
84     OMPBuilder.initialize();
85     initializeRuntimeFunctions();
86     initializeInternalControlVars();
87   }
88 
89   /// Generic information that describes an internal control variable.
90   struct InternalControlVarInfo {
91     /// The kind, as described by InternalControlVar enum.
92     InternalControlVar Kind;
93 
94     /// The name of the ICV.
95     StringRef Name;
96 
97     /// Environment variable associated with this ICV.
98     StringRef EnvVarName;
99 
100     /// Initial value kind.
101     ICVInitValue InitKind;
102 
103     /// Initial value.
104     ConstantInt *InitValue;
105 
106     /// Setter RTL function associated with this ICV.
107     RuntimeFunction Setter;
108 
109     /// Getter RTL function associated with this ICV.
110     RuntimeFunction Getter;
111 
112     /// RTL Function corresponding to the override clause of this ICV
113     RuntimeFunction Clause;
114   };
115 
116   /// Generic information that describes a runtime function
117   struct RuntimeFunctionInfo {
118 
119     /// The kind, as described by the RuntimeFunction enum.
120     RuntimeFunction Kind;
121 
122     /// The name of the function.
123     StringRef Name;
124 
125     /// Flag to indicate a variadic function.
126     bool IsVarArg;
127 
128     /// The return type of the function.
129     Type *ReturnType;
130 
131     /// The argument types of the function.
132     SmallVector<Type *, 8> ArgumentTypes;
133 
134     /// The declaration if available.
135     Function *Declaration = nullptr;
136 
137     /// Uses of this runtime function per function containing the use.
138     using UseVector = SmallVector<Use *, 16>;
139 
140     /// Clear UsesMap for runtime function.
141     void clearUsesMap() { UsesMap.clear(); }
142 
143     /// Boolean conversion that is true if the runtime function was found.
144     operator bool() const { return Declaration; }
145 
146     /// Return the vector of uses in function \p F.
147     UseVector &getOrCreateUseVector(Function *F) {
148       std::shared_ptr<UseVector> &UV = UsesMap[F];
149       if (!UV)
150         UV = std::make_shared<UseVector>();
151       return *UV;
152     }
153 
154     /// Return the vector of uses in function \p F or `nullptr` if there are
155     /// none.
156     const UseVector *getUseVector(Function &F) const {
157       auto I = UsesMap.find(&F);
158       if (I != UsesMap.end())
159         return I->second.get();
160       return nullptr;
161     }
162 
163     /// Return how many functions contain uses of this runtime function.
164     size_t getNumFunctionsWithUses() const { return UsesMap.size(); }
165 
166     /// Return the number of arguments (or the minimal number for variadic
167     /// functions).
168     size_t getNumArgs() const { return ArgumentTypes.size(); }
169 
170     /// Run the callback \p CB on each use and forget the use if the result is
171     /// true. The callback will be fed the function in which the use was
172     /// encountered as second argument.
173     void foreachUse(SmallVectorImpl<Function *> &SCC,
174                     function_ref<bool(Use &, Function &)> CB) {
175       for (Function *F : SCC)
176         foreachUse(CB, F);
177     }
178 
179     /// Run the callback \p CB on each use within the function \p F and forget
180     /// the use if the result is true.
181     void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) {
182       SmallVector<unsigned, 8> ToBeDeleted;
183       ToBeDeleted.clear();
184 
185       unsigned Idx = 0;
186       UseVector &UV = getOrCreateUseVector(F);
187 
188       for (Use *U : UV) {
189         if (CB(*U, *F))
190           ToBeDeleted.push_back(Idx);
191         ++Idx;
192       }
193 
194       // Remove the to-be-deleted indices in reverse order as prior
195       // modifications will not modify the smaller indices.
196       while (!ToBeDeleted.empty()) {
197         unsigned Idx = ToBeDeleted.pop_back_val();
198         UV[Idx] = UV.back();
199         UV.pop_back();
200       }
201     }
202 
203   private:
204     /// Map from functions to all uses of this runtime function contained in
205     /// them.
206     DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap;
207   };
208 
209   /// An OpenMP-IR-Builder instance
210   OpenMPIRBuilder OMPBuilder;
211 
212   /// Map from runtime function kind to the runtime function description.
213   EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction,
214                   RuntimeFunction::OMPRTL___last>
215       RFIs;
216 
217   /// Map from ICV kind to the ICV description.
218   EnumeratedArray<InternalControlVarInfo, InternalControlVar,
219                   InternalControlVar::ICV___last>
220       ICVs;
221 
222   /// Helper to initialize all internal control variable information for those
223   /// defined in OMPKinds.def.
224   void initializeInternalControlVars() {
225 #define ICV_RT_SET(_Name, RTL)                                                 \
226   {                                                                            \
227     auto &ICV = ICVs[_Name];                                                   \
228     ICV.Setter = RTL;                                                          \
229   }
230 #define ICV_RT_GET(Name, RTL)                                                  \
231   {                                                                            \
232     auto &ICV = ICVs[Name];                                                    \
233     ICV.Getter = RTL;                                                          \
234   }
235 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init)                           \
236   {                                                                            \
237     auto &ICV = ICVs[Enum];                                                    \
238     ICV.Name = _Name;                                                          \
239     ICV.Kind = Enum;                                                           \
240     ICV.InitKind = Init;                                                       \
241     ICV.EnvVarName = _EnvVarName;                                              \
242     switch (ICV.InitKind) {                                                    \
243     case ICV_IMPLEMENTATION_DEFINED:                                           \
244       ICV.InitValue = nullptr;                                                 \
245       break;                                                                   \
246     case ICV_ZERO:                                                             \
247       ICV.InitValue = ConstantInt::get(                                        \
248           Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0);                \
249       break;                                                                   \
250     case ICV_FALSE:                                                            \
251       ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext());    \
252       break;                                                                   \
253     case ICV_LAST:                                                             \
254       break;                                                                   \
255     }                                                                          \
256   }
257 #include "llvm/Frontend/OpenMP/OMPKinds.def"
258   }
259 
260   /// Returns true if the function declaration \p F matches the runtime
261   /// function types, that is, return type \p RTFRetType, and argument types
262   /// \p RTFArgTypes.
263   static bool declMatchesRTFTypes(Function *F, Type *RTFRetType,
264                                   SmallVector<Type *, 8> &RTFArgTypes) {
265     // TODO: We should output information to the user (under debug output
266     //       and via remarks).
267 
268     if (!F)
269       return false;
270     if (F->getReturnType() != RTFRetType)
271       return false;
272     if (F->arg_size() != RTFArgTypes.size())
273       return false;
274 
275     auto RTFTyIt = RTFArgTypes.begin();
276     for (Argument &Arg : F->args()) {
277       if (Arg.getType() != *RTFTyIt)
278         return false;
279 
280       ++RTFTyIt;
281     }
282 
283     return true;
284   }
285 
286   // Helper to collect all uses of the declaration in the UsesMap.
287   unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) {
288     unsigned NumUses = 0;
289     if (!RFI.Declaration)
290       return NumUses;
291     OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration);
292 
293     if (CollectStats) {
294       NumOpenMPRuntimeFunctionsIdentified += 1;
295       NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses();
296     }
297 
298     // TODO: We directly convert uses into proper calls and unknown uses.
299     for (Use &U : RFI.Declaration->uses()) {
300       if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) {
301         if (ModuleSlice.count(UserI->getFunction())) {
302           RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U);
303           ++NumUses;
304         }
305       } else {
306         RFI.getOrCreateUseVector(nullptr).push_back(&U);
307         ++NumUses;
308       }
309     }
310     return NumUses;
311   }
312 
313   // Helper function to recollect uses of all runtime functions.
314   void recollectUses() {
315     for (int Idx = 0; Idx < RFIs.size(); ++Idx) {
316       auto &RFI = RFIs[static_cast<RuntimeFunction>(Idx)];
317       RFI.clearUsesMap();
318       collectUses(RFI, /*CollectStats*/ false);
319     }
320   }
321 
322   /// Helper to initialize all runtime function information for those defined
323   /// in OpenMPKinds.def.
324   void initializeRuntimeFunctions() {
325     Module &M = *((*ModuleSlice.begin())->getParent());
326 
327     // Helper macros for handling __VA_ARGS__ in OMP_RTL
328 #define OMP_TYPE(VarName, ...)                                                 \
329   Type *VarName = OMPBuilder.VarName;                                          \
330   (void)VarName;
331 
332 #define OMP_ARRAY_TYPE(VarName, ...)                                           \
333   ArrayType *VarName##Ty = OMPBuilder.VarName##Ty;                             \
334   (void)VarName##Ty;                                                           \
335   PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy;                     \
336   (void)VarName##PtrTy;
337 
338 #define OMP_FUNCTION_TYPE(VarName, ...)                                        \
339   FunctionType *VarName = OMPBuilder.VarName;                                  \
340   (void)VarName;                                                               \
341   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
342   (void)VarName##Ptr;
343 
344 #define OMP_STRUCT_TYPE(VarName, ...)                                          \
345   StructType *VarName = OMPBuilder.VarName;                                    \
346   (void)VarName;                                                               \
347   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
348   (void)VarName##Ptr;
349 
350 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...)                     \
351   {                                                                            \
352     SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__});                           \
353     Function *F = M.getFunction(_Name);                                        \
354     if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) {           \
355       auto &RFI = RFIs[_Enum];                                                 \
356       RFI.Kind = _Enum;                                                        \
357       RFI.Name = _Name;                                                        \
358       RFI.IsVarArg = _IsVarArg;                                                \
359       RFI.ReturnType = OMPBuilder._ReturnType;                                 \
360       RFI.ArgumentTypes = std::move(ArgsTypes);                                \
361       RFI.Declaration = F;                                                     \
362       unsigned NumUses = collectUses(RFI);                                     \
363       (void)NumUses;                                                           \
364       LLVM_DEBUG({                                                             \
365         dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not")           \
366                << " found\n";                                                  \
367         if (RFI.Declaration)                                                   \
368           dbgs() << TAG << "-> got " << NumUses << " uses in "                 \
369                  << RFI.getNumFunctionsWithUses()                              \
370                  << " different functions.\n";                                 \
371       });                                                                      \
372     }                                                                          \
373   }
374 #include "llvm/Frontend/OpenMP/OMPKinds.def"
375 
376     // TODO: We should attach the attributes defined in OMPKinds.def.
377   }
378 
379   /// Collection of known kernels (\see Kernel) in the module.
380   SmallPtrSetImpl<Kernel> &Kernels;
381 };
382 
383 /// Used to map the values physically (in the IR) stored in an offload
384 /// array, to a vector in memory.
385 struct OffloadArray {
386   /// Physical array (in the IR).
387   AllocaInst *Array = nullptr;
388   /// Mapped values.
389   SmallVector<Value *, 8> StoredValues;
390   /// Last stores made in the offload array.
391   SmallVector<StoreInst *, 8> LastAccesses;
392 
393   OffloadArray() = default;
394 
395   /// Initializes the OffloadArray with the values stored in \p Array before
396   /// instruction \p Before is reached. Returns false if the initialization
397   /// fails.
398   /// This MUST be used immediately after the construction of the object.
399   bool initialize(AllocaInst &Array, Instruction &Before) {
400     if (!Array.getAllocatedType()->isArrayTy())
401       return false;
402 
403     if (!getValues(Array, Before))
404       return false;
405 
406     this->Array = &Array;
407     return true;
408   }
409 
410   static const unsigned BasePtrsArgNum = 2;
411   static const unsigned PtrsArgNum = 3;
412   static const unsigned SizesArgNum = 4;
413 
414 private:
415   /// Traverses the BasicBlock where \p Array is, collecting the stores made to
416   /// \p Array, leaving StoredValues with the values stored before the
417   /// instruction \p Before is reached.
418   bool getValues(AllocaInst &Array, Instruction &Before) {
419     // Initialize container.
420     const uint64_t NumValues =
421         Array.getAllocatedType()->getArrayNumElements();
422     StoredValues.assign(NumValues, nullptr);
423     LastAccesses.assign(NumValues, nullptr);
424 
425     // TODO: This assumes the instruction \p Before is in the same
426     //  BasicBlock as Array. Make it general, for any control flow graph.
427     BasicBlock *BB = Array.getParent();
428     if (BB != Before.getParent())
429       return false;
430 
431     const DataLayout &DL = Array.getModule()->getDataLayout();
432     const unsigned int PointerSize = DL.getPointerSize();
433 
434     for (Instruction &I : *BB) {
435       if (&I == &Before)
436         break;
437 
438       if (!isa<StoreInst>(&I))
439         continue;
440 
441       auto *S = cast<StoreInst>(&I);
442       int64_t Offset = -1;
443       auto *Dst = GetPointerBaseWithConstantOffset(S->getPointerOperand(),
444                                                    Offset, DL);
445       if (Dst == &Array) {
446         int64_t Idx = Offset / PointerSize;
447         StoredValues[Idx] = getUnderlyingObject(S->getValueOperand());
448         LastAccesses[Idx] = S;
449       }
450     }
451 
452     return isFilled();
453   }
454 
455   /// Returns true if all values in StoredValues and
456   /// LastAccesses are not nullptrs.
457   bool isFilled() {
458     const unsigned NumValues = StoredValues.size();
459     for (unsigned I = 0; I < NumValues; ++I) {
460       if (!StoredValues[I] || !LastAccesses[I])
461         return false;
462     }
463 
464     return true;
465   }
466 };
467 
468 struct OpenMPOpt {
469 
470   using OptimizationRemarkGetter =
471       function_ref<OptimizationRemarkEmitter &(Function *)>;
472 
473   OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater,
474             OptimizationRemarkGetter OREGetter,
475             OMPInformationCache &OMPInfoCache, Attributor &A)
476       : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater),
477         OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {}
478 
479   /// Check if any remarks are enabled for openmp-opt
480   bool remarksEnabled() {
481     auto &Ctx = M.getContext();
482     return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE);
483   }
484 
485   /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice.
486   bool run() {
487     if (SCC.empty())
488       return false;
489 
490     bool Changed = false;
491 
492     LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size()
493                       << " functions in a slice with "
494                       << OMPInfoCache.ModuleSlice.size() << " functions\n");
495 
496     if (PrintICVValues)
497       printICVs();
498     if (PrintOpenMPKernels)
499       printKernels();
500 
501     Changed |= rewriteDeviceCodeStateMachine();
502 
503     Changed |= runAttributor();
504 
505     // Recollect uses, in case Attributor deleted any.
506     OMPInfoCache.recollectUses();
507 
508     Changed |= deduplicateRuntimeCalls();
509     Changed |= deleteParallelRegions();
510     if (HideMemoryTransferLatency)
511       Changed |= hideMemTransfersLatency();
512     if (remarksEnabled())
513       analysisGlobalization();
514 
515     return Changed;
516   }
517 
518   /// Print initial ICV values for testing.
519   /// FIXME: This should be done from the Attributor once it is added.
520   void printICVs() const {
521     InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel,
522                                  ICV_proc_bind};
523 
524     for (Function *F : OMPInfoCache.ModuleSlice) {
525       for (auto ICV : ICVs) {
526         auto ICVInfo = OMPInfoCache.ICVs[ICV];
527         auto Remark = [&](OptimizationRemark OR) {
528           return OR << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name)
529                     << " Value: "
530                     << (ICVInfo.InitValue
531                             ? ICVInfo.InitValue->getValue().toString(10, true)
532                             : "IMPLEMENTATION_DEFINED");
533         };
534 
535         emitRemarkOnFunction(F, "OpenMPICVTracker", Remark);
536       }
537     }
538   }
539 
540   /// Print OpenMP GPU kernels for testing.
541   void printKernels() const {
542     for (Function *F : SCC) {
543       if (!OMPInfoCache.Kernels.count(F))
544         continue;
545 
546       auto Remark = [&](OptimizationRemark OR) {
547         return OR << "OpenMP GPU kernel "
548                   << ore::NV("OpenMPGPUKernel", F->getName()) << "\n";
549       };
550 
551       emitRemarkOnFunction(F, "OpenMPGPU", Remark);
552     }
553   }
554 
555   /// Return the call if \p U is a callee use in a regular call. If \p RFI is
556   /// given it has to be the callee or a nullptr is returned.
557   static CallInst *getCallIfRegularCall(
558       Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
559     CallInst *CI = dyn_cast<CallInst>(U.getUser());
560     if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() &&
561         (!RFI || CI->getCalledFunction() == RFI->Declaration))
562       return CI;
563     return nullptr;
564   }
565 
566   /// Return the call if \p V is a regular call. If \p RFI is given it has to be
567   /// the callee or a nullptr is returned.
568   static CallInst *getCallIfRegularCall(
569       Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
570     CallInst *CI = dyn_cast<CallInst>(&V);
571     if (CI && !CI->hasOperandBundles() &&
572         (!RFI || CI->getCalledFunction() == RFI->Declaration))
573       return CI;
574     return nullptr;
575   }
576 
577 private:
578   /// Try to delete parallel regions if possible.
579   bool deleteParallelRegions() {
580     const unsigned CallbackCalleeOperand = 2;
581 
582     OMPInformationCache::RuntimeFunctionInfo &RFI =
583         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
584 
585     if (!RFI.Declaration)
586       return false;
587 
588     bool Changed = false;
589     auto DeleteCallCB = [&](Use &U, Function &) {
590       CallInst *CI = getCallIfRegularCall(U);
591       if (!CI)
592         return false;
593       auto *Fn = dyn_cast<Function>(
594           CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts());
595       if (!Fn)
596         return false;
597       if (!Fn->onlyReadsMemory())
598         return false;
599       if (!Fn->hasFnAttribute(Attribute::WillReturn))
600         return false;
601 
602       LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in "
603                         << CI->getCaller()->getName() << "\n");
604 
605       auto Remark = [&](OptimizationRemark OR) {
606         return OR << "Parallel region in "
607                   << ore::NV("OpenMPParallelDelete", CI->getCaller()->getName())
608                   << " deleted";
609       };
610       emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionDeletion",
611                                      Remark);
612 
613       CGUpdater.removeCallSite(*CI);
614       CI->eraseFromParent();
615       Changed = true;
616       ++NumOpenMPParallelRegionsDeleted;
617       return true;
618     };
619 
620     RFI.foreachUse(SCC, DeleteCallCB);
621 
622     return Changed;
623   }
624 
625   /// Try to eliminate runtime calls by reusing existing ones.
626   bool deduplicateRuntimeCalls() {
627     bool Changed = false;
628 
629     RuntimeFunction DeduplicableRuntimeCallIDs[] = {
630         OMPRTL_omp_get_num_threads,
631         OMPRTL_omp_in_parallel,
632         OMPRTL_omp_get_cancellation,
633         OMPRTL_omp_get_thread_limit,
634         OMPRTL_omp_get_supported_active_levels,
635         OMPRTL_omp_get_level,
636         OMPRTL_omp_get_ancestor_thread_num,
637         OMPRTL_omp_get_team_size,
638         OMPRTL_omp_get_active_level,
639         OMPRTL_omp_in_final,
640         OMPRTL_omp_get_proc_bind,
641         OMPRTL_omp_get_num_places,
642         OMPRTL_omp_get_num_procs,
643         OMPRTL_omp_get_place_num,
644         OMPRTL_omp_get_partition_num_places,
645         OMPRTL_omp_get_partition_place_nums};
646 
647     // Global-tid is handled separately.
648     SmallSetVector<Value *, 16> GTIdArgs;
649     collectGlobalThreadIdArguments(GTIdArgs);
650     LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size()
651                       << " global thread ID arguments\n");
652 
653     for (Function *F : SCC) {
654       for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs)
655         Changed |= deduplicateRuntimeCalls(
656             *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]);
657 
658       // __kmpc_global_thread_num is special as we can replace it with an
659       // argument in enough cases to make it worth trying.
660       Value *GTIdArg = nullptr;
661       for (Argument &Arg : F->args())
662         if (GTIdArgs.count(&Arg)) {
663           GTIdArg = &Arg;
664           break;
665         }
666       Changed |= deduplicateRuntimeCalls(
667           *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg);
668     }
669 
670     return Changed;
671   }
672 
673   /// Tries to hide the latency of runtime calls that involve host to
674   /// device memory transfers by splitting them into their "issue" and "wait"
675   /// versions. The "issue" is moved upwards as much as possible. The "wait" is
676   /// moved downards as much as possible. The "issue" issues the memory transfer
677   /// asynchronously, returning a handle. The "wait" waits in the returned
678   /// handle for the memory transfer to finish.
679   bool hideMemTransfersLatency() {
680     auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper];
681     bool Changed = false;
682     auto SplitMemTransfers = [&](Use &U, Function &Decl) {
683       auto *RTCall = getCallIfRegularCall(U, &RFI);
684       if (!RTCall)
685         return false;
686 
687       OffloadArray OffloadArrays[3];
688       if (!getValuesInOffloadArrays(*RTCall, OffloadArrays))
689         return false;
690 
691       LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays));
692 
693       // TODO: Check if can be moved upwards.
694       bool WasSplit = false;
695       Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall);
696       if (WaitMovementPoint)
697         WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint);
698 
699       Changed |= WasSplit;
700       return WasSplit;
701     };
702     RFI.foreachUse(SCC, SplitMemTransfers);
703 
704     return Changed;
705   }
706 
707   void analysisGlobalization() {
708     RuntimeFunction GlobalizationRuntimeIDs[] = {
709         OMPRTL___kmpc_data_sharing_coalesced_push_stack,
710         OMPRTL___kmpc_data_sharing_push_stack};
711 
712     for (const auto GlobalizationCallID : GlobalizationRuntimeIDs) {
713       auto &RFI = OMPInfoCache.RFIs[GlobalizationCallID];
714 
715       auto CheckGlobalization = [&](Use &U, Function &Decl) {
716         if (CallInst *CI = getCallIfRegularCall(U, &RFI)) {
717           auto Remark = [&](OptimizationRemarkAnalysis ORA) {
718             return ORA
719                    << "Found thread data sharing on the GPU. "
720                    << "Expect degraded performance due to data globalization.";
721           };
722           emitRemark<OptimizationRemarkAnalysis>(CI, "OpenMPGlobalization",
723                                                  Remark);
724         }
725 
726         return false;
727       };
728 
729       RFI.foreachUse(SCC, CheckGlobalization);
730     }
731     return;
732   }
733 
734   /// Maps the values stored in the offload arrays passed as arguments to
735   /// \p RuntimeCall into the offload arrays in \p OAs.
736   bool getValuesInOffloadArrays(CallInst &RuntimeCall,
737                                 MutableArrayRef<OffloadArray> OAs) {
738     assert(OAs.size() == 3 && "Need space for three offload arrays!");
739 
740     // A runtime call that involves memory offloading looks something like:
741     // call void @__tgt_target_data_begin_mapper(arg0, arg1,
742     //   i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes,
743     // ...)
744     // So, the idea is to access the allocas that allocate space for these
745     // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes.
746     // Therefore:
747     // i8** %offload_baseptrs.
748     Value *BasePtrsArg =
749         RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum);
750     // i8** %offload_ptrs.
751     Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum);
752     // i8** %offload_sizes.
753     Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum);
754 
755     // Get values stored in **offload_baseptrs.
756     auto *V = getUnderlyingObject(BasePtrsArg);
757     if (!isa<AllocaInst>(V))
758       return false;
759     auto *BasePtrsArray = cast<AllocaInst>(V);
760     if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall))
761       return false;
762 
763     // Get values stored in **offload_baseptrs.
764     V = getUnderlyingObject(PtrsArg);
765     if (!isa<AllocaInst>(V))
766       return false;
767     auto *PtrsArray = cast<AllocaInst>(V);
768     if (!OAs[1].initialize(*PtrsArray, RuntimeCall))
769       return false;
770 
771     // Get values stored in **offload_sizes.
772     V = getUnderlyingObject(SizesArg);
773     // If it's a [constant] global array don't analyze it.
774     if (isa<GlobalValue>(V))
775       return isa<Constant>(V);
776     if (!isa<AllocaInst>(V))
777       return false;
778 
779     auto *SizesArray = cast<AllocaInst>(V);
780     if (!OAs[2].initialize(*SizesArray, RuntimeCall))
781       return false;
782 
783     return true;
784   }
785 
786   /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG.
787   /// For now this is a way to test that the function getValuesInOffloadArrays
788   /// is working properly.
789   /// TODO: Move this to a unittest when unittests are available for OpenMPOpt.
790   void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) {
791     assert(OAs.size() == 3 && "There are three offload arrays to debug!");
792 
793     LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n");
794     std::string ValuesStr;
795     raw_string_ostream Printer(ValuesStr);
796     std::string Separator = " --- ";
797 
798     for (auto *BP : OAs[0].StoredValues) {
799       BP->print(Printer);
800       Printer << Separator;
801     }
802     LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n");
803     ValuesStr.clear();
804 
805     for (auto *P : OAs[1].StoredValues) {
806       P->print(Printer);
807       Printer << Separator;
808     }
809     LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n");
810     ValuesStr.clear();
811 
812     for (auto *S : OAs[2].StoredValues) {
813       S->print(Printer);
814       Printer << Separator;
815     }
816     LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n");
817   }
818 
819   /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be
820   /// moved. Returns nullptr if the movement is not possible, or not worth it.
821   Instruction *canBeMovedDownwards(CallInst &RuntimeCall) {
822     // FIXME: This traverses only the BasicBlock where RuntimeCall is.
823     //  Make it traverse the CFG.
824 
825     Instruction *CurrentI = &RuntimeCall;
826     bool IsWorthIt = false;
827     while ((CurrentI = CurrentI->getNextNode())) {
828 
829       // TODO: Once we detect the regions to be offloaded we should use the
830       //  alias analysis manager to check if CurrentI may modify one of
831       //  the offloaded regions.
832       if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) {
833         if (IsWorthIt)
834           return CurrentI;
835 
836         return nullptr;
837       }
838 
839       // FIXME: For now if we move it over anything without side effect
840       //  is worth it.
841       IsWorthIt = true;
842     }
843 
844     // Return end of BasicBlock.
845     return RuntimeCall.getParent()->getTerminator();
846   }
847 
848   /// Splits \p RuntimeCall into its "issue" and "wait" counterparts.
849   bool splitTargetDataBeginRTC(CallInst &RuntimeCall,
850                                Instruction &WaitMovementPoint) {
851     // Create stack allocated handle (__tgt_async_info) at the beginning of the
852     // function. Used for storing information of the async transfer, allowing to
853     // wait on it later.
854     auto &IRBuilder = OMPInfoCache.OMPBuilder;
855     auto *F = RuntimeCall.getCaller();
856     Instruction *FirstInst = &(F->getEntryBlock().front());
857     AllocaInst *Handle = new AllocaInst(
858         IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst);
859 
860     // Add "issue" runtime call declaration:
861     // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32,
862     //   i8**, i8**, i64*, i64*)
863     FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction(
864         M, OMPRTL___tgt_target_data_begin_mapper_issue);
865 
866     // Change RuntimeCall call site for its asynchronous version.
867     SmallVector<Value *, 8> Args;
868     for (auto &Arg : RuntimeCall.args())
869       Args.push_back(Arg.get());
870     Args.push_back(Handle);
871 
872     CallInst *IssueCallsite =
873         CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
874     RuntimeCall.eraseFromParent();
875 
876     // Add "wait" runtime call declaration:
877     // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info)
878     FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction(
879         M, OMPRTL___tgt_target_data_begin_mapper_wait);
880 
881     // Add call site to WaitDecl.
882     const unsigned DeviceIDArgNum = 0;
883     Value *WaitParams[2] = {
884         IssueCallsite->getArgOperand(DeviceIDArgNum), // device_id.
885         Handle                                        // handle to wait on.
886     };
887     CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
888 
889     return true;
890   }
891 
892   static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent,
893                                     bool GlobalOnly, bool &SingleChoice) {
894     if (CurrentIdent == NextIdent)
895       return CurrentIdent;
896 
897     // TODO: Figure out how to actually combine multiple debug locations. For
898     //       now we just keep an existing one if there is a single choice.
899     if (!GlobalOnly || isa<GlobalValue>(NextIdent)) {
900       SingleChoice = !CurrentIdent;
901       return NextIdent;
902     }
903     return nullptr;
904   }
905 
906   /// Return an `struct ident_t*` value that represents the ones used in the
907   /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not
908   /// return a local `struct ident_t*`. For now, if we cannot find a suitable
909   /// return value we create one from scratch. We also do not yet combine
910   /// information, e.g., the source locations, see combinedIdentStruct.
911   Value *
912   getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI,
913                                  Function &F, bool GlobalOnly) {
914     bool SingleChoice = true;
915     Value *Ident = nullptr;
916     auto CombineIdentStruct = [&](Use &U, Function &Caller) {
917       CallInst *CI = getCallIfRegularCall(U, &RFI);
918       if (!CI || &F != &Caller)
919         return false;
920       Ident = combinedIdentStruct(Ident, CI->getArgOperand(0),
921                                   /* GlobalOnly */ true, SingleChoice);
922       return false;
923     };
924     RFI.foreachUse(SCC, CombineIdentStruct);
925 
926     if (!Ident || !SingleChoice) {
927       // The IRBuilder uses the insertion block to get to the module, this is
928       // unfortunate but we work around it for now.
929       if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock())
930         OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy(
931             &F.getEntryBlock(), F.getEntryBlock().begin()));
932       // Create a fallback location if non was found.
933       // TODO: Use the debug locations of the calls instead.
934       Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr();
935       Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc);
936     }
937     return Ident;
938   }
939 
940   /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or
941   /// \p ReplVal if given.
942   bool deduplicateRuntimeCalls(Function &F,
943                                OMPInformationCache::RuntimeFunctionInfo &RFI,
944                                Value *ReplVal = nullptr) {
945     auto *UV = RFI.getUseVector(F);
946     if (!UV || UV->size() + (ReplVal != nullptr) < 2)
947       return false;
948 
949     LLVM_DEBUG(
950         dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name
951                << (ReplVal ? " with an existing value\n" : "\n") << "\n");
952 
953     assert((!ReplVal || (isa<Argument>(ReplVal) &&
954                          cast<Argument>(ReplVal)->getParent() == &F)) &&
955            "Unexpected replacement value!");
956 
957     // TODO: Use dominance to find a good position instead.
958     auto CanBeMoved = [this](CallBase &CB) {
959       unsigned NumArgs = CB.getNumArgOperands();
960       if (NumArgs == 0)
961         return true;
962       if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)
963         return false;
964       for (unsigned u = 1; u < NumArgs; ++u)
965         if (isa<Instruction>(CB.getArgOperand(u)))
966           return false;
967       return true;
968     };
969 
970     if (!ReplVal) {
971       for (Use *U : *UV)
972         if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) {
973           if (!CanBeMoved(*CI))
974             continue;
975 
976           auto Remark = [&](OptimizationRemark OR) {
977             auto newLoc = &*F.getEntryBlock().getFirstInsertionPt();
978             return OR << "OpenMP runtime call "
979                       << ore::NV("OpenMPOptRuntime", RFI.Name) << " moved to "
980                       << ore::NV("OpenMPRuntimeMoves", newLoc->getDebugLoc());
981           };
982           emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeCodeMotion", Remark);
983 
984           CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt());
985           ReplVal = CI;
986           break;
987         }
988       if (!ReplVal)
989         return false;
990     }
991 
992     // If we use a call as a replacement value we need to make sure the ident is
993     // valid at the new location. For now we just pick a global one, either
994     // existing and used by one of the calls, or created from scratch.
995     if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) {
996       if (CI->getNumArgOperands() > 0 &&
997           CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) {
998         Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F,
999                                                       /* GlobalOnly */ true);
1000         CI->setArgOperand(0, Ident);
1001       }
1002     }
1003 
1004     bool Changed = false;
1005     auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) {
1006       CallInst *CI = getCallIfRegularCall(U, &RFI);
1007       if (!CI || CI == ReplVal || &F != &Caller)
1008         return false;
1009       assert(CI->getCaller() == &F && "Unexpected call!");
1010 
1011       auto Remark = [&](OptimizationRemark OR) {
1012         return OR << "OpenMP runtime call "
1013                   << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated";
1014       };
1015       emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeDeduplicated", Remark);
1016 
1017       CGUpdater.removeCallSite(*CI);
1018       CI->replaceAllUsesWith(ReplVal);
1019       CI->eraseFromParent();
1020       ++NumOpenMPRuntimeCallsDeduplicated;
1021       Changed = true;
1022       return true;
1023     };
1024     RFI.foreachUse(SCC, ReplaceAndDeleteCB);
1025 
1026     return Changed;
1027   }
1028 
1029   /// Collect arguments that represent the global thread id in \p GTIdArgs.
1030   void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> &GTIdArgs) {
1031     // TODO: Below we basically perform a fixpoint iteration with a pessimistic
1032     //       initialization. We could define an AbstractAttribute instead and
1033     //       run the Attributor here once it can be run as an SCC pass.
1034 
1035     // Helper to check the argument \p ArgNo at all call sites of \p F for
1036     // a GTId.
1037     auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) {
1038       if (!F.hasLocalLinkage())
1039         return false;
1040       for (Use &U : F.uses()) {
1041         if (CallInst *CI = getCallIfRegularCall(U)) {
1042           Value *ArgOp = CI->getArgOperand(ArgNo);
1043           if (CI == &RefCI || GTIdArgs.count(ArgOp) ||
1044               getCallIfRegularCall(
1045                   *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]))
1046             continue;
1047         }
1048         return false;
1049       }
1050       return true;
1051     };
1052 
1053     // Helper to identify uses of a GTId as GTId arguments.
1054     auto AddUserArgs = [&](Value &GTId) {
1055       for (Use &U : GTId.uses())
1056         if (CallInst *CI = dyn_cast<CallInst>(U.getUser()))
1057           if (CI->isArgOperand(&U))
1058             if (Function *Callee = CI->getCalledFunction())
1059               if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI))
1060                 GTIdArgs.insert(Callee->getArg(U.getOperandNo()));
1061     };
1062 
1063     // The argument users of __kmpc_global_thread_num calls are GTIds.
1064     OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI =
1065         OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num];
1066 
1067     GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) {
1068       if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI))
1069         AddUserArgs(*CI);
1070       return false;
1071     });
1072 
1073     // Transitively search for more arguments by looking at the users of the
1074     // ones we know already. During the search the GTIdArgs vector is extended
1075     // so we cannot cache the size nor can we use a range based for.
1076     for (unsigned u = 0; u < GTIdArgs.size(); ++u)
1077       AddUserArgs(*GTIdArgs[u]);
1078   }
1079 
1080   /// Kernel (=GPU) optimizations and utility functions
1081   ///
1082   ///{{
1083 
1084   /// Check if \p F is a kernel, hence entry point for target offloading.
1085   bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); }
1086 
1087   /// Cache to remember the unique kernel for a function.
1088   DenseMap<Function *, Optional<Kernel>> UniqueKernelMap;
1089 
1090   /// Find the unique kernel that will execute \p F, if any.
1091   Kernel getUniqueKernelFor(Function &F);
1092 
1093   /// Find the unique kernel that will execute \p I, if any.
1094   Kernel getUniqueKernelFor(Instruction &I) {
1095     return getUniqueKernelFor(*I.getFunction());
1096   }
1097 
1098   /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in
1099   /// the cases we can avoid taking the address of a function.
1100   bool rewriteDeviceCodeStateMachine();
1101 
1102   ///
1103   ///}}
1104 
1105   /// Emit a remark generically
1106   ///
1107   /// This template function can be used to generically emit a remark. The
1108   /// RemarkKind should be one of the following:
1109   ///   - OptimizationRemark to indicate a successful optimization attempt
1110   ///   - OptimizationRemarkMissed to report a failed optimization attempt
1111   ///   - OptimizationRemarkAnalysis to provide additional information about an
1112   ///     optimization attempt
1113   ///
1114   /// The remark is built using a callback function provided by the caller that
1115   /// takes a RemarkKind as input and returns a RemarkKind.
1116   template <typename RemarkKind,
1117             typename RemarkCallBack = function_ref<RemarkKind(RemarkKind &&)>>
1118   void emitRemark(Instruction *Inst, StringRef RemarkName,
1119                   RemarkCallBack &&RemarkCB) const {
1120     Function *F = Inst->getParent()->getParent();
1121     auto &ORE = OREGetter(F);
1122 
1123     ORE.emit(
1124         [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, Inst)); });
1125   }
1126 
1127   /// Emit a remark on a function. Since only OptimizationRemark is supporting
1128   /// this, it can't be made generic.
1129   void
1130   emitRemarkOnFunction(Function *F, StringRef RemarkName,
1131                        function_ref<OptimizationRemark(OptimizationRemark &&)>
1132                            &&RemarkCB) const {
1133     auto &ORE = OREGetter(F);
1134 
1135     ORE.emit([&]() {
1136       return RemarkCB(OptimizationRemark(DEBUG_TYPE, RemarkName, F));
1137     });
1138   }
1139 
1140   /// The underlying module.
1141   Module &M;
1142 
1143   /// The SCC we are operating on.
1144   SmallVectorImpl<Function *> &SCC;
1145 
1146   /// Callback to update the call graph, the first argument is a removed call,
1147   /// the second an optional replacement call.
1148   CallGraphUpdater &CGUpdater;
1149 
1150   /// Callback to get an OptimizationRemarkEmitter from a Function *
1151   OptimizationRemarkGetter OREGetter;
1152 
1153   /// OpenMP-specific information cache. Also Used for Attributor runs.
1154   OMPInformationCache &OMPInfoCache;
1155 
1156   /// Attributor instance.
1157   Attributor &A;
1158 
1159   /// Helper function to run Attributor on SCC.
1160   bool runAttributor() {
1161     if (SCC.empty())
1162       return false;
1163 
1164     registerAAs();
1165 
1166     ChangeStatus Changed = A.run();
1167 
1168     LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size()
1169                       << " functions, result: " << Changed << ".\n");
1170 
1171     return Changed == ChangeStatus::CHANGED;
1172   }
1173 
1174   /// Populate the Attributor with abstract attribute opportunities in the
1175   /// function.
1176   void registerAAs() {
1177     if (SCC.empty())
1178       return;
1179 
1180     // Create CallSite AA for all Getters.
1181     for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) {
1182       auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)];
1183 
1184       auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter];
1185 
1186       auto CreateAA = [&](Use &U, Function &Caller) {
1187         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI);
1188         if (!CI)
1189           return false;
1190 
1191         auto &CB = cast<CallBase>(*CI);
1192 
1193         IRPosition CBPos = IRPosition::callsite_function(CB);
1194         A.getOrCreateAAFor<AAICVTracker>(CBPos);
1195         return false;
1196       };
1197 
1198       GetterRFI.foreachUse(SCC, CreateAA);
1199     }
1200   }
1201 };
1202 
1203 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) {
1204   if (!OMPInfoCache.ModuleSlice.count(&F))
1205     return nullptr;
1206 
1207   // Use a scope to keep the lifetime of the CachedKernel short.
1208   {
1209     Optional<Kernel> &CachedKernel = UniqueKernelMap[&F];
1210     if (CachedKernel)
1211       return *CachedKernel;
1212 
1213     // TODO: We should use an AA to create an (optimistic and callback
1214     //       call-aware) call graph. For now we stick to simple patterns that
1215     //       are less powerful, basically the worst fixpoint.
1216     if (isKernel(F)) {
1217       CachedKernel = Kernel(&F);
1218       return *CachedKernel;
1219     }
1220 
1221     CachedKernel = nullptr;
1222     if (!F.hasLocalLinkage())
1223       return nullptr;
1224   }
1225 
1226   auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel {
1227     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
1228       // Allow use in equality comparisons.
1229       if (Cmp->isEquality())
1230         return getUniqueKernelFor(*Cmp);
1231       return nullptr;
1232     }
1233     if (auto *CB = dyn_cast<CallBase>(U.getUser())) {
1234       // Allow direct calls.
1235       if (CB->isCallee(&U))
1236         return getUniqueKernelFor(*CB);
1237       // Allow the use in __kmpc_kernel_prepare_parallel calls.
1238       if (Function *Callee = CB->getCalledFunction())
1239         if (Callee->getName() == "__kmpc_kernel_prepare_parallel")
1240           return getUniqueKernelFor(*CB);
1241       return nullptr;
1242     }
1243     // Disallow every other use.
1244     return nullptr;
1245   };
1246 
1247   // TODO: In the future we want to track more than just a unique kernel.
1248   SmallPtrSet<Kernel, 2> PotentialKernels;
1249   OMPInformationCache::foreachUse(F, [&](const Use &U) {
1250     PotentialKernels.insert(GetUniqueKernelForUse(U));
1251   });
1252 
1253   Kernel K = nullptr;
1254   if (PotentialKernels.size() == 1)
1255     K = *PotentialKernels.begin();
1256 
1257   // Cache the result.
1258   UniqueKernelMap[&F] = K;
1259 
1260   return K;
1261 }
1262 
1263 bool OpenMPOpt::rewriteDeviceCodeStateMachine() {
1264   OMPInformationCache::RuntimeFunctionInfo &KernelPrepareParallelRFI =
1265       OMPInfoCache.RFIs[OMPRTL___kmpc_kernel_prepare_parallel];
1266 
1267   bool Changed = false;
1268   if (!KernelPrepareParallelRFI)
1269     return Changed;
1270 
1271   for (Function *F : SCC) {
1272 
1273     // Check if the function is uses in a __kmpc_kernel_prepare_parallel call at
1274     // all.
1275     bool UnknownUse = false;
1276     bool KernelPrepareUse = false;
1277     unsigned NumDirectCalls = 0;
1278 
1279     SmallVector<Use *, 2> ToBeReplacedStateMachineUses;
1280     OMPInformationCache::foreachUse(*F, [&](Use &U) {
1281       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
1282         if (CB->isCallee(&U)) {
1283           ++NumDirectCalls;
1284           return;
1285         }
1286 
1287       if (isa<ICmpInst>(U.getUser())) {
1288         ToBeReplacedStateMachineUses.push_back(&U);
1289         return;
1290       }
1291       if (!KernelPrepareUse && OpenMPOpt::getCallIfRegularCall(
1292                                    *U.getUser(), &KernelPrepareParallelRFI)) {
1293         KernelPrepareUse = true;
1294         ToBeReplacedStateMachineUses.push_back(&U);
1295         return;
1296       }
1297       UnknownUse = true;
1298     });
1299 
1300     // Do not emit a remark if we haven't seen a __kmpc_kernel_prepare_parallel
1301     // use.
1302     if (!KernelPrepareUse)
1303       continue;
1304 
1305     {
1306       auto Remark = [&](OptimizationRemark OR) {
1307         return OR << "Found a parallel region that is called in a target "
1308                      "region but not part of a combined target construct nor "
1309                      "nesed inside a target construct without intermediate "
1310                      "code. This can lead to excessive register usage for "
1311                      "unrelated target regions in the same translation unit "
1312                      "due to spurious call edges assumed by ptxas.";
1313       };
1314       emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark);
1315     }
1316 
1317     // If this ever hits, we should investigate.
1318     // TODO: Checking the number of uses is not a necessary restriction and
1319     // should be lifted.
1320     if (UnknownUse || NumDirectCalls != 1 ||
1321         ToBeReplacedStateMachineUses.size() != 2) {
1322       {
1323         auto Remark = [&](OptimizationRemark OR) {
1324           return OR << "Parallel region is used in "
1325                     << (UnknownUse ? "unknown" : "unexpected")
1326                     << " ways; will not attempt to rewrite the state machine.";
1327         };
1328         emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark);
1329       }
1330       continue;
1331     }
1332 
1333     // Even if we have __kmpc_kernel_prepare_parallel calls, we (for now) give
1334     // up if the function is not called from a unique kernel.
1335     Kernel K = getUniqueKernelFor(*F);
1336     if (!K) {
1337       {
1338         auto Remark = [&](OptimizationRemark OR) {
1339           return OR << "Parallel region is not known to be called from a "
1340                        "unique single target region, maybe the surrounding "
1341                        "function has external linkage?; will not attempt to "
1342                        "rewrite the state machine use.";
1343         };
1344         emitRemarkOnFunction(F, "OpenMPParallelRegionInMultipleKernesl",
1345                              Remark);
1346       }
1347       continue;
1348     }
1349 
1350     // We now know F is a parallel body function called only from the kernel K.
1351     // We also identified the state machine uses in which we replace the
1352     // function pointer by a new global symbol for identification purposes. This
1353     // ensures only direct calls to the function are left.
1354 
1355     {
1356       auto RemarkParalleRegion = [&](OptimizationRemark OR) {
1357         return OR << "Specialize parallel region that is only reached from a "
1358                      "single target region to avoid spurious call edges and "
1359                      "excessive register usage in other target regions. "
1360                      "(parallel region ID: "
1361                   << ore::NV("OpenMPParallelRegion", F->getName())
1362                   << ", kernel ID: "
1363                   << ore::NV("OpenMPTargetRegion", K->getName()) << ")";
1364       };
1365       emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD",
1366                            RemarkParalleRegion);
1367       auto RemarkKernel = [&](OptimizationRemark OR) {
1368         return OR << "Target region containing the parallel region that is "
1369                      "specialized. (parallel region ID: "
1370                   << ore::NV("OpenMPParallelRegion", F->getName())
1371                   << ", kernel ID: "
1372                   << ore::NV("OpenMPTargetRegion", K->getName()) << ")";
1373       };
1374       emitRemarkOnFunction(K, "OpenMPParallelRegionInNonSPMD", RemarkKernel);
1375     }
1376 
1377     Module &M = *F->getParent();
1378     Type *Int8Ty = Type::getInt8Ty(M.getContext());
1379 
1380     auto *ID = new GlobalVariable(
1381         M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage,
1382         UndefValue::get(Int8Ty), F->getName() + ".ID");
1383 
1384     for (Use *U : ToBeReplacedStateMachineUses)
1385       U->set(ConstantExpr::getBitCast(ID, U->get()->getType()));
1386 
1387     ++NumOpenMPParallelRegionsReplacedInGPUStateMachine;
1388 
1389     Changed = true;
1390   }
1391 
1392   return Changed;
1393 }
1394 
1395 /// Abstract Attribute for tracking ICV values.
1396 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> {
1397   using Base = StateWrapper<BooleanState, AbstractAttribute>;
1398   AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
1399 
1400   void initialize(Attributor &A) override {
1401     Function *F = getAnchorScope();
1402     if (!F || !A.isFunctionIPOAmendable(*F))
1403       indicatePessimisticFixpoint();
1404   }
1405 
1406   /// Returns true if value is assumed to be tracked.
1407   bool isAssumedTracked() const { return getAssumed(); }
1408 
1409   /// Returns true if value is known to be tracked.
1410   bool isKnownTracked() const { return getAssumed(); }
1411 
1412   /// Create an abstract attribute biew for the position \p IRP.
1413   static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A);
1414 
1415   /// Return the value with which \p I can be replaced for specific \p ICV.
1416   virtual Optional<Value *> getReplacementValue(InternalControlVar ICV,
1417                                                 const Instruction *I,
1418                                                 Attributor &A) const {
1419     return None;
1420   }
1421 
1422   /// Return an assumed unique ICV value if a single candidate is found. If
1423   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1424   /// Optional::NoneType.
1425   virtual Optional<Value *>
1426   getUniqueReplacementValue(InternalControlVar ICV) const = 0;
1427 
1428   // Currently only nthreads is being tracked.
1429   // this array will only grow with time.
1430   InternalControlVar TrackableICVs[1] = {ICV_nthreads};
1431 
1432   /// See AbstractAttribute::getName()
1433   const std::string getName() const override { return "AAICVTracker"; }
1434 
1435   /// See AbstractAttribute::getIdAddr()
1436   const char *getIdAddr() const override { return &ID; }
1437 
1438   /// This function should return true if the type of the \p AA is AAICVTracker
1439   static bool classof(const AbstractAttribute *AA) {
1440     return (AA->getIdAddr() == &ID);
1441   }
1442 
1443   static const char ID;
1444 };
1445 
1446 struct AAICVTrackerFunction : public AAICVTracker {
1447   AAICVTrackerFunction(const IRPosition &IRP, Attributor &A)
1448       : AAICVTracker(IRP, A) {}
1449 
1450   // FIXME: come up with better string.
1451   const std::string getAsStr() const override { return "ICVTrackerFunction"; }
1452 
1453   // FIXME: come up with some stats.
1454   void trackStatistics() const override {}
1455 
1456   /// We don't manifest anything for this AA.
1457   ChangeStatus manifest(Attributor &A) override {
1458     return ChangeStatus::UNCHANGED;
1459   }
1460 
1461   // Map of ICV to their values at specific program point.
1462   EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar,
1463                   InternalControlVar::ICV___last>
1464       ICVReplacementValuesMap;
1465 
1466   ChangeStatus updateImpl(Attributor &A) override {
1467     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1468 
1469     Function *F = getAnchorScope();
1470 
1471     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1472 
1473     for (InternalControlVar ICV : TrackableICVs) {
1474       auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
1475 
1476       auto &ValuesMap = ICVReplacementValuesMap[ICV];
1477       auto TrackValues = [&](Use &U, Function &) {
1478         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U);
1479         if (!CI)
1480           return false;
1481 
1482         // FIXME: handle setters with more that 1 arguments.
1483         /// Track new value.
1484         if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second)
1485           HasChanged = ChangeStatus::CHANGED;
1486 
1487         return false;
1488       };
1489 
1490       auto CallCheck = [&](Instruction &I) {
1491         Optional<Value *> ReplVal = getValueForCall(A, &I, ICV);
1492         if (ReplVal.hasValue() &&
1493             ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
1494           HasChanged = ChangeStatus::CHANGED;
1495 
1496         return true;
1497       };
1498 
1499       // Track all changes of an ICV.
1500       SetterRFI.foreachUse(TrackValues, F);
1501 
1502       A.checkForAllInstructions(CallCheck, *this, {Instruction::Call},
1503                                 /* CheckBBLivenessOnly */ true);
1504 
1505       /// TODO: Figure out a way to avoid adding entry in
1506       /// ICVReplacementValuesMap
1507       Instruction *Entry = &F->getEntryBlock().front();
1508       if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry))
1509         ValuesMap.insert(std::make_pair(Entry, nullptr));
1510     }
1511 
1512     return HasChanged;
1513   }
1514 
1515   /// Hepler to check if \p I is a call and get the value for it if it is
1516   /// unique.
1517   Optional<Value *> getValueForCall(Attributor &A, const Instruction *I,
1518                                     InternalControlVar &ICV) const {
1519 
1520     const auto *CB = dyn_cast<CallBase>(I);
1521     if (!CB)
1522       return None;
1523 
1524     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1525     auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter];
1526     auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
1527     Function *CalledFunction = CB->getCalledFunction();
1528 
1529     // Indirect call, assume ICV changes.
1530     if (CalledFunction == nullptr)
1531       return nullptr;
1532     if (CalledFunction == GetterRFI.Declaration)
1533       return None;
1534     if (CalledFunction == SetterRFI.Declaration) {
1535       if (ICVReplacementValuesMap[ICV].count(I))
1536         return ICVReplacementValuesMap[ICV].lookup(I);
1537 
1538       return nullptr;
1539     }
1540 
1541     // Since we don't know, assume it changes the ICV.
1542     if (CalledFunction->isDeclaration())
1543       return nullptr;
1544 
1545     const auto &ICVTrackingAA =
1546         A.getAAFor<AAICVTracker>(*this, IRPosition::callsite_returned(*CB));
1547 
1548     if (ICVTrackingAA.isAssumedTracked())
1549       return ICVTrackingAA.getUniqueReplacementValue(ICV);
1550 
1551     // If we don't know, assume it changes.
1552     return nullptr;
1553   }
1554 
1555   // We don't check unique value for a function, so return None.
1556   Optional<Value *>
1557   getUniqueReplacementValue(InternalControlVar ICV) const override {
1558     return None;
1559   }
1560 
1561   /// Return the value with which \p I can be replaced for specific \p ICV.
1562   Optional<Value *> getReplacementValue(InternalControlVar ICV,
1563                                         const Instruction *I,
1564                                         Attributor &A) const override {
1565     const auto &ValuesMap = ICVReplacementValuesMap[ICV];
1566     if (ValuesMap.count(I))
1567       return ValuesMap.lookup(I);
1568 
1569     SmallVector<const Instruction *, 16> Worklist;
1570     SmallPtrSet<const Instruction *, 16> Visited;
1571     Worklist.push_back(I);
1572 
1573     Optional<Value *> ReplVal;
1574 
1575     while (!Worklist.empty()) {
1576       const Instruction *CurrInst = Worklist.pop_back_val();
1577       if (!Visited.insert(CurrInst).second)
1578         continue;
1579 
1580       const BasicBlock *CurrBB = CurrInst->getParent();
1581 
1582       // Go up and look for all potential setters/calls that might change the
1583       // ICV.
1584       while ((CurrInst = CurrInst->getPrevNode())) {
1585         if (ValuesMap.count(CurrInst)) {
1586           Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
1587           // Unknown value, track new.
1588           if (!ReplVal.hasValue()) {
1589             ReplVal = NewReplVal;
1590             break;
1591           }
1592 
1593           // If we found a new value, we can't know the icv value anymore.
1594           if (NewReplVal.hasValue())
1595             if (ReplVal != NewReplVal)
1596               return nullptr;
1597 
1598           break;
1599         }
1600 
1601         Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV);
1602         if (!NewReplVal.hasValue())
1603           continue;
1604 
1605         // Unknown value, track new.
1606         if (!ReplVal.hasValue()) {
1607           ReplVal = NewReplVal;
1608           break;
1609         }
1610 
1611         // if (NewReplVal.hasValue())
1612         // We found a new value, we can't know the icv value anymore.
1613         if (ReplVal != NewReplVal)
1614           return nullptr;
1615       }
1616 
1617       // If we are in the same BB and we have a value, we are done.
1618       if (CurrBB == I->getParent() && ReplVal.hasValue())
1619         return ReplVal;
1620 
1621       // Go through all predecessors and add terminators for analysis.
1622       for (const BasicBlock *Pred : predecessors(CurrBB))
1623         if (const Instruction *Terminator = Pred->getTerminator())
1624           Worklist.push_back(Terminator);
1625     }
1626 
1627     return ReplVal;
1628   }
1629 };
1630 
1631 struct AAICVTrackerFunctionReturned : AAICVTracker {
1632   AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A)
1633       : AAICVTracker(IRP, A) {}
1634 
1635   // FIXME: come up with better string.
1636   const std::string getAsStr() const override {
1637     return "ICVTrackerFunctionReturned";
1638   }
1639 
1640   // FIXME: come up with some stats.
1641   void trackStatistics() const override {}
1642 
1643   /// We don't manifest anything for this AA.
1644   ChangeStatus manifest(Attributor &A) override {
1645     return ChangeStatus::UNCHANGED;
1646   }
1647 
1648   // Map of ICV to their values at specific program point.
1649   EnumeratedArray<Optional<Value *>, InternalControlVar,
1650                   InternalControlVar::ICV___last>
1651       ICVReplacementValuesMap;
1652 
1653   /// Return the value with which \p I can be replaced for specific \p ICV.
1654   Optional<Value *>
1655   getUniqueReplacementValue(InternalControlVar ICV) const override {
1656     return ICVReplacementValuesMap[ICV];
1657   }
1658 
1659   ChangeStatus updateImpl(Attributor &A) override {
1660     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1661     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1662         *this, IRPosition::function(*getAnchorScope()));
1663 
1664     if (!ICVTrackingAA.isAssumedTracked())
1665       return indicatePessimisticFixpoint();
1666 
1667     for (InternalControlVar ICV : TrackableICVs) {
1668       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
1669       Optional<Value *> UniqueICVValue;
1670 
1671       auto CheckReturnInst = [&](Instruction &I) {
1672         Optional<Value *> NewReplVal =
1673             ICVTrackingAA.getReplacementValue(ICV, &I, A);
1674 
1675         // If we found a second ICV value there is no unique returned value.
1676         if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal)
1677           return false;
1678 
1679         UniqueICVValue = NewReplVal;
1680 
1681         return true;
1682       };
1683 
1684       if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret},
1685                                      /* CheckBBLivenessOnly */ true))
1686         UniqueICVValue = nullptr;
1687 
1688       if (UniqueICVValue == ReplVal)
1689         continue;
1690 
1691       ReplVal = UniqueICVValue;
1692       Changed = ChangeStatus::CHANGED;
1693     }
1694 
1695     return Changed;
1696   }
1697 };
1698 
1699 struct AAICVTrackerCallSite : AAICVTracker {
1700   AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A)
1701       : AAICVTracker(IRP, A) {}
1702 
1703   void initialize(Attributor &A) override {
1704     Function *F = getAnchorScope();
1705     if (!F || !A.isFunctionIPOAmendable(*F))
1706       indicatePessimisticFixpoint();
1707 
1708     // We only initialize this AA for getters, so we need to know which ICV it
1709     // gets.
1710     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1711     for (InternalControlVar ICV : TrackableICVs) {
1712       auto ICVInfo = OMPInfoCache.ICVs[ICV];
1713       auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter];
1714       if (Getter.Declaration == getAssociatedFunction()) {
1715         AssociatedICV = ICVInfo.Kind;
1716         return;
1717       }
1718     }
1719 
1720     /// Unknown ICV.
1721     indicatePessimisticFixpoint();
1722   }
1723 
1724   ChangeStatus manifest(Attributor &A) override {
1725     if (!ReplVal.hasValue() || !ReplVal.getValue())
1726       return ChangeStatus::UNCHANGED;
1727 
1728     A.changeValueAfterManifest(*getCtxI(), **ReplVal);
1729     A.deleteAfterManifest(*getCtxI());
1730 
1731     return ChangeStatus::CHANGED;
1732   }
1733 
1734   // FIXME: come up with better string.
1735   const std::string getAsStr() const override { return "ICVTrackerCallSite"; }
1736 
1737   // FIXME: come up with some stats.
1738   void trackStatistics() const override {}
1739 
1740   InternalControlVar AssociatedICV;
1741   Optional<Value *> ReplVal;
1742 
1743   ChangeStatus updateImpl(Attributor &A) override {
1744     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1745         *this, IRPosition::function(*getAnchorScope()));
1746 
1747     // We don't have any information, so we assume it changes the ICV.
1748     if (!ICVTrackingAA.isAssumedTracked())
1749       return indicatePessimisticFixpoint();
1750 
1751     Optional<Value *> NewReplVal =
1752         ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A);
1753 
1754     if (ReplVal == NewReplVal)
1755       return ChangeStatus::UNCHANGED;
1756 
1757     ReplVal = NewReplVal;
1758     return ChangeStatus::CHANGED;
1759   }
1760 
1761   // Return the value with which associated value can be replaced for specific
1762   // \p ICV.
1763   Optional<Value *>
1764   getUniqueReplacementValue(InternalControlVar ICV) const override {
1765     return ReplVal;
1766   }
1767 };
1768 
1769 struct AAICVTrackerCallSiteReturned : AAICVTracker {
1770   AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A)
1771       : AAICVTracker(IRP, A) {}
1772 
1773   // FIXME: come up with better string.
1774   const std::string getAsStr() const override {
1775     return "ICVTrackerCallSiteReturned";
1776   }
1777 
1778   // FIXME: come up with some stats.
1779   void trackStatistics() const override {}
1780 
1781   /// We don't manifest anything for this AA.
1782   ChangeStatus manifest(Attributor &A) override {
1783     return ChangeStatus::UNCHANGED;
1784   }
1785 
1786   // Map of ICV to their values at specific program point.
1787   EnumeratedArray<Optional<Value *>, InternalControlVar,
1788                   InternalControlVar::ICV___last>
1789       ICVReplacementValuesMap;
1790 
1791   /// Return the value with which associated value can be replaced for specific
1792   /// \p ICV.
1793   Optional<Value *>
1794   getUniqueReplacementValue(InternalControlVar ICV) const override {
1795     return ICVReplacementValuesMap[ICV];
1796   }
1797 
1798   ChangeStatus updateImpl(Attributor &A) override {
1799     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1800     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1801         *this, IRPosition::returned(*getAssociatedFunction()));
1802 
1803     // We don't have any information, so we assume it changes the ICV.
1804     if (!ICVTrackingAA.isAssumedTracked())
1805       return indicatePessimisticFixpoint();
1806 
1807     for (InternalControlVar ICV : TrackableICVs) {
1808       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
1809       Optional<Value *> NewReplVal =
1810           ICVTrackingAA.getUniqueReplacementValue(ICV);
1811 
1812       if (ReplVal == NewReplVal)
1813         continue;
1814 
1815       ReplVal = NewReplVal;
1816       Changed = ChangeStatus::CHANGED;
1817     }
1818     return Changed;
1819   }
1820 };
1821 } // namespace
1822 
1823 const char AAICVTracker::ID = 0;
1824 
1825 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP,
1826                                               Attributor &A) {
1827   AAICVTracker *AA = nullptr;
1828   switch (IRP.getPositionKind()) {
1829   case IRPosition::IRP_INVALID:
1830   case IRPosition::IRP_FLOAT:
1831   case IRPosition::IRP_ARGUMENT:
1832   case IRPosition::IRP_CALL_SITE_ARGUMENT:
1833     llvm_unreachable("ICVTracker can only be created for function position!");
1834   case IRPosition::IRP_RETURNED:
1835     AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A);
1836     break;
1837   case IRPosition::IRP_CALL_SITE_RETURNED:
1838     AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A);
1839     break;
1840   case IRPosition::IRP_CALL_SITE:
1841     AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A);
1842     break;
1843   case IRPosition::IRP_FUNCTION:
1844     AA = new (A.Allocator) AAICVTrackerFunction(IRP, A);
1845     break;
1846   }
1847 
1848   return *AA;
1849 }
1850 
1851 PreservedAnalyses OpenMPOptPass::run(LazyCallGraph::SCC &C,
1852                                      CGSCCAnalysisManager &AM,
1853                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
1854   if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule))
1855     return PreservedAnalyses::all();
1856 
1857   if (DisableOpenMPOptimizations)
1858     return PreservedAnalyses::all();
1859 
1860   SmallVector<Function *, 16> SCC;
1861   // If there are kernels in the module, we have to run on all SCC's.
1862   bool SCCIsInteresting = !OMPInModule.getKernels().empty();
1863   for (LazyCallGraph::Node &N : C) {
1864     Function *Fn = &N.getFunction();
1865     SCC.push_back(Fn);
1866 
1867     // Do we already know that the SCC contains kernels,
1868     // or that OpenMP functions are called from this SCC?
1869     if (SCCIsInteresting)
1870       continue;
1871     // If not, let's check that.
1872     SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn);
1873   }
1874 
1875   if (!SCCIsInteresting || SCC.empty())
1876     return PreservedAnalyses::all();
1877 
1878   FunctionAnalysisManager &FAM =
1879       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1880 
1881   AnalysisGetter AG(FAM);
1882 
1883   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
1884     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
1885   };
1886 
1887   CallGraphUpdater CGUpdater;
1888   CGUpdater.initialize(CG, C, AM, UR);
1889 
1890   SetVector<Function *> Functions(SCC.begin(), SCC.end());
1891   BumpPtrAllocator Allocator;
1892   OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
1893                                 /*CGSCC*/ Functions, OMPInModule.getKernels());
1894 
1895   Attributor A(Functions, InfoCache, CGUpdater);
1896 
1897   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
1898   bool Changed = OMPOpt.run();
1899   if (Changed)
1900     return PreservedAnalyses::none();
1901 
1902   return PreservedAnalyses::all();
1903 }
1904 
1905 namespace {
1906 
1907 struct OpenMPOptLegacyPass : public CallGraphSCCPass {
1908   CallGraphUpdater CGUpdater;
1909   OpenMPInModule OMPInModule;
1910   static char ID;
1911 
1912   OpenMPOptLegacyPass() : CallGraphSCCPass(ID) {
1913     initializeOpenMPOptLegacyPassPass(*PassRegistry::getPassRegistry());
1914   }
1915 
1916   void getAnalysisUsage(AnalysisUsage &AU) const override {
1917     CallGraphSCCPass::getAnalysisUsage(AU);
1918   }
1919 
1920   bool doInitialization(CallGraph &CG) override {
1921     // Disable the pass if there is no OpenMP (runtime call) in the module.
1922     containsOpenMP(CG.getModule(), OMPInModule);
1923     return false;
1924   }
1925 
1926   bool runOnSCC(CallGraphSCC &CGSCC) override {
1927     if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule))
1928       return false;
1929     if (DisableOpenMPOptimizations || skipSCC(CGSCC))
1930       return false;
1931 
1932     SmallVector<Function *, 16> SCC;
1933     // If there are kernels in the module, we have to run on all SCC's.
1934     bool SCCIsInteresting = !OMPInModule.getKernels().empty();
1935     for (CallGraphNode *CGN : CGSCC) {
1936       Function *Fn = CGN->getFunction();
1937       if (!Fn || Fn->isDeclaration())
1938         continue;
1939       SCC.push_back(Fn);
1940 
1941       // Do we already know that the SCC contains kernels,
1942       // or that OpenMP functions are called from this SCC?
1943       if (SCCIsInteresting)
1944         continue;
1945       // If not, let's check that.
1946       SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn);
1947     }
1948 
1949     if (!SCCIsInteresting || SCC.empty())
1950       return false;
1951 
1952     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1953     CGUpdater.initialize(CG, CGSCC);
1954 
1955     // Maintain a map of functions to avoid rebuilding the ORE
1956     DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap;
1957     auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & {
1958       std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F];
1959       if (!ORE)
1960         ORE = std::make_unique<OptimizationRemarkEmitter>(F);
1961       return *ORE;
1962     };
1963 
1964     AnalysisGetter AG;
1965     SetVector<Function *> Functions(SCC.begin(), SCC.end());
1966     BumpPtrAllocator Allocator;
1967     OMPInformationCache InfoCache(
1968         *(Functions.back()->getParent()), AG, Allocator,
1969         /*CGSCC*/ Functions, OMPInModule.getKernels());
1970 
1971     Attributor A(Functions, InfoCache, CGUpdater);
1972 
1973     OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
1974     return OMPOpt.run();
1975   }
1976 
1977   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
1978 };
1979 
1980 } // end anonymous namespace
1981 
1982 void OpenMPInModule::identifyKernels(Module &M) {
1983 
1984   NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1985   if (!MD)
1986     return;
1987 
1988   for (auto *Op : MD->operands()) {
1989     if (Op->getNumOperands() < 2)
1990       continue;
1991     MDString *KindID = dyn_cast<MDString>(Op->getOperand(1));
1992     if (!KindID || KindID->getString() != "kernel")
1993       continue;
1994 
1995     Function *KernelFn =
1996         mdconst::dyn_extract_or_null<Function>(Op->getOperand(0));
1997     if (!KernelFn)
1998       continue;
1999 
2000     ++NumOpenMPTargetRegionKernels;
2001 
2002     Kernels.insert(KernelFn);
2003   }
2004 }
2005 
2006 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) {
2007   if (OMPInModule.isKnown())
2008     return OMPInModule;
2009 
2010   auto RecordFunctionsContainingUsesOf = [&](Function *F) {
2011     for (User *U : F->users())
2012       if (auto *I = dyn_cast<Instruction>(U))
2013         OMPInModule.FuncsWithOMPRuntimeCalls.insert(I->getFunction());
2014   };
2015 
2016   // MSVC doesn't like long if-else chains for some reason and instead just
2017   // issues an error. Work around it..
2018   do {
2019 #define OMP_RTL(_Enum, _Name, ...)                                             \
2020   if (Function *F = M.getFunction(_Name)) {                                    \
2021     RecordFunctionsContainingUsesOf(F);                                        \
2022     OMPInModule = true;                                                        \
2023   }
2024 #include "llvm/Frontend/OpenMP/OMPKinds.def"
2025   } while (false);
2026 
2027   // Identify kernels once. TODO: We should split the OMPInformationCache into a
2028   // module and an SCC part. The kernel information, among other things, could
2029   // go into the module part.
2030   if (OMPInModule.isKnown() && OMPInModule) {
2031     OMPInModule.identifyKernels(M);
2032     return true;
2033   }
2034 
2035   return OMPInModule = false;
2036 }
2037 
2038 char OpenMPOptLegacyPass::ID = 0;
2039 
2040 INITIALIZE_PASS_BEGIN(OpenMPOptLegacyPass, "openmpopt",
2041                       "OpenMP specific optimizations", false, false)
2042 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2043 INITIALIZE_PASS_END(OpenMPOptLegacyPass, "openmpopt",
2044                     "OpenMP specific optimizations", false, false)
2045 
2046 Pass *llvm::createOpenMPOptLegacyPass() { return new OpenMPOptLegacyPass(); }
2047