1 //===-- IPO/OpenMPOpt.cpp - Collection of OpenMP specific optimizations ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // OpenMP specific optimizations:
10 //
11 // - Deduplication of runtime calls, e.g., omp_get_thread_num.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/OpenMPOpt.h"
16 
17 #include "llvm/ADT/EnumeratedArray.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/CallGraph.h"
20 #include "llvm/Analysis/CallGraphSCCPass.h"
21 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
22 #include "llvm/Frontend/OpenMP/OMPConstants.h"
23 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
24 #include "llvm/InitializePasses.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Transforms/IPO.h"
27 #include "llvm/Transforms/IPO/Attributor.h"
28 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 
31 using namespace llvm;
32 using namespace omp;
33 
34 #define DEBUG_TYPE "openmp-opt"
35 
36 static cl::opt<bool> DisableOpenMPOptimizations(
37     "openmp-opt-disable", cl::ZeroOrMore,
38     cl::desc("Disable OpenMP specific optimizations."), cl::Hidden,
39     cl::init(false));
40 
41 static cl::opt<bool> PrintICVValues("openmp-print-icv-values", cl::init(false),
42                                     cl::Hidden);
43 static cl::opt<bool> PrintOpenMPKernels("openmp-print-gpu-kernels",
44                                         cl::init(false), cl::Hidden);
45 
46 static cl::opt<bool> HideMemoryTransferLatency(
47     "openmp-hide-memory-transfer-latency",
48     cl::desc("[WIP] Tries to hide the latency of host to device memory"
49              " transfers"),
50     cl::Hidden, cl::init(false));
51 
52 
53 STATISTIC(NumOpenMPRuntimeCallsDeduplicated,
54           "Number of OpenMP runtime calls deduplicated");
55 STATISTIC(NumOpenMPParallelRegionsDeleted,
56           "Number of OpenMP parallel regions deleted");
57 STATISTIC(NumOpenMPRuntimeFunctionsIdentified,
58           "Number of OpenMP runtime functions identified");
59 STATISTIC(NumOpenMPRuntimeFunctionUsesIdentified,
60           "Number of OpenMP runtime function uses identified");
61 STATISTIC(NumOpenMPTargetRegionKernels,
62           "Number of OpenMP target region entry points (=kernels) identified");
63 STATISTIC(
64     NumOpenMPParallelRegionsReplacedInGPUStateMachine,
65     "Number of OpenMP parallel regions replaced with ID in GPU state machines");
66 
67 #if !defined(NDEBUG)
68 static constexpr auto TAG = "[" DEBUG_TYPE "]";
69 #endif
70 
71 namespace {
72 
73 struct AAICVTracker;
74 
75 /// OpenMP specific information. For now, stores RFIs and ICVs also needed for
76 /// Attributor runs.
77 struct OMPInformationCache : public InformationCache {
78   OMPInformationCache(Module &M, AnalysisGetter &AG,
79                       BumpPtrAllocator &Allocator, SetVector<Function *> &CGSCC,
80                       SmallPtrSetImpl<Kernel> &Kernels)
81       : InformationCache(M, AG, Allocator, &CGSCC), OMPBuilder(M),
82         Kernels(Kernels) {
83 
84     OMPBuilder.initialize();
85     initializeRuntimeFunctions();
86     initializeInternalControlVars();
87   }
88 
89   /// Generic information that describes an internal control variable.
90   struct InternalControlVarInfo {
91     /// The kind, as described by InternalControlVar enum.
92     InternalControlVar Kind;
93 
94     /// The name of the ICV.
95     StringRef Name;
96 
97     /// Environment variable associated with this ICV.
98     StringRef EnvVarName;
99 
100     /// Initial value kind.
101     ICVInitValue InitKind;
102 
103     /// Initial value.
104     ConstantInt *InitValue;
105 
106     /// Setter RTL function associated with this ICV.
107     RuntimeFunction Setter;
108 
109     /// Getter RTL function associated with this ICV.
110     RuntimeFunction Getter;
111 
112     /// RTL Function corresponding to the override clause of this ICV
113     RuntimeFunction Clause;
114   };
115 
116   /// Generic information that describes a runtime function
117   struct RuntimeFunctionInfo {
118 
119     /// The kind, as described by the RuntimeFunction enum.
120     RuntimeFunction Kind;
121 
122     /// The name of the function.
123     StringRef Name;
124 
125     /// Flag to indicate a variadic function.
126     bool IsVarArg;
127 
128     /// The return type of the function.
129     Type *ReturnType;
130 
131     /// The argument types of the function.
132     SmallVector<Type *, 8> ArgumentTypes;
133 
134     /// The declaration if available.
135     Function *Declaration = nullptr;
136 
137     /// Uses of this runtime function per function containing the use.
138     using UseVector = SmallVector<Use *, 16>;
139 
140     /// Clear UsesMap for runtime function.
141     void clearUsesMap() { UsesMap.clear(); }
142 
143     /// Boolean conversion that is true if the runtime function was found.
144     operator bool() const { return Declaration; }
145 
146     /// Return the vector of uses in function \p F.
147     UseVector &getOrCreateUseVector(Function *F) {
148       std::shared_ptr<UseVector> &UV = UsesMap[F];
149       if (!UV)
150         UV = std::make_shared<UseVector>();
151       return *UV;
152     }
153 
154     /// Return the vector of uses in function \p F or `nullptr` if there are
155     /// none.
156     const UseVector *getUseVector(Function &F) const {
157       auto I = UsesMap.find(&F);
158       if (I != UsesMap.end())
159         return I->second.get();
160       return nullptr;
161     }
162 
163     /// Return how many functions contain uses of this runtime function.
164     size_t getNumFunctionsWithUses() const { return UsesMap.size(); }
165 
166     /// Return the number of arguments (or the minimal number for variadic
167     /// functions).
168     size_t getNumArgs() const { return ArgumentTypes.size(); }
169 
170     /// Run the callback \p CB on each use and forget the use if the result is
171     /// true. The callback will be fed the function in which the use was
172     /// encountered as second argument.
173     void foreachUse(SmallVectorImpl<Function *> &SCC,
174                     function_ref<bool(Use &, Function &)> CB) {
175       for (Function *F : SCC)
176         foreachUse(CB, F);
177     }
178 
179     /// Run the callback \p CB on each use within the function \p F and forget
180     /// the use if the result is true.
181     void foreachUse(function_ref<bool(Use &, Function &)> CB, Function *F) {
182       SmallVector<unsigned, 8> ToBeDeleted;
183       ToBeDeleted.clear();
184 
185       unsigned Idx = 0;
186       UseVector &UV = getOrCreateUseVector(F);
187 
188       for (Use *U : UV) {
189         if (CB(*U, *F))
190           ToBeDeleted.push_back(Idx);
191         ++Idx;
192       }
193 
194       // Remove the to-be-deleted indices in reverse order as prior
195       // modifications will not modify the smaller indices.
196       while (!ToBeDeleted.empty()) {
197         unsigned Idx = ToBeDeleted.pop_back_val();
198         UV[Idx] = UV.back();
199         UV.pop_back();
200       }
201     }
202 
203   private:
204     /// Map from functions to all uses of this runtime function contained in
205     /// them.
206     DenseMap<Function *, std::shared_ptr<UseVector>> UsesMap;
207   };
208 
209   /// An OpenMP-IR-Builder instance
210   OpenMPIRBuilder OMPBuilder;
211 
212   /// Map from runtime function kind to the runtime function description.
213   EnumeratedArray<RuntimeFunctionInfo, RuntimeFunction,
214                   RuntimeFunction::OMPRTL___last>
215       RFIs;
216 
217   /// Map from ICV kind to the ICV description.
218   EnumeratedArray<InternalControlVarInfo, InternalControlVar,
219                   InternalControlVar::ICV___last>
220       ICVs;
221 
222   /// Helper to initialize all internal control variable information for those
223   /// defined in OMPKinds.def.
224   void initializeInternalControlVars() {
225 #define ICV_RT_SET(_Name, RTL)                                                 \
226   {                                                                            \
227     auto &ICV = ICVs[_Name];                                                   \
228     ICV.Setter = RTL;                                                          \
229   }
230 #define ICV_RT_GET(Name, RTL)                                                  \
231   {                                                                            \
232     auto &ICV = ICVs[Name];                                                    \
233     ICV.Getter = RTL;                                                          \
234   }
235 #define ICV_DATA_ENV(Enum, _Name, _EnvVarName, Init)                           \
236   {                                                                            \
237     auto &ICV = ICVs[Enum];                                                    \
238     ICV.Name = _Name;                                                          \
239     ICV.Kind = Enum;                                                           \
240     ICV.InitKind = Init;                                                       \
241     ICV.EnvVarName = _EnvVarName;                                              \
242     switch (ICV.InitKind) {                                                    \
243     case ICV_IMPLEMENTATION_DEFINED:                                           \
244       ICV.InitValue = nullptr;                                                 \
245       break;                                                                   \
246     case ICV_ZERO:                                                             \
247       ICV.InitValue = ConstantInt::get(                                        \
248           Type::getInt32Ty(OMPBuilder.Int32->getContext()), 0);                \
249       break;                                                                   \
250     case ICV_FALSE:                                                            \
251       ICV.InitValue = ConstantInt::getFalse(OMPBuilder.Int1->getContext());    \
252       break;                                                                   \
253     case ICV_LAST:                                                             \
254       break;                                                                   \
255     }                                                                          \
256   }
257 #include "llvm/Frontend/OpenMP/OMPKinds.def"
258   }
259 
260   /// Returns true if the function declaration \p F matches the runtime
261   /// function types, that is, return type \p RTFRetType, and argument types
262   /// \p RTFArgTypes.
263   static bool declMatchesRTFTypes(Function *F, Type *RTFRetType,
264                                   SmallVector<Type *, 8> &RTFArgTypes) {
265     // TODO: We should output information to the user (under debug output
266     //       and via remarks).
267 
268     if (!F)
269       return false;
270     if (F->getReturnType() != RTFRetType)
271       return false;
272     if (F->arg_size() != RTFArgTypes.size())
273       return false;
274 
275     auto RTFTyIt = RTFArgTypes.begin();
276     for (Argument &Arg : F->args()) {
277       if (Arg.getType() != *RTFTyIt)
278         return false;
279 
280       ++RTFTyIt;
281     }
282 
283     return true;
284   }
285 
286   // Helper to collect all uses of the declaration in the UsesMap.
287   unsigned collectUses(RuntimeFunctionInfo &RFI, bool CollectStats = true) {
288     unsigned NumUses = 0;
289     if (!RFI.Declaration)
290       return NumUses;
291     OMPBuilder.addAttributes(RFI.Kind, *RFI.Declaration);
292 
293     if (CollectStats) {
294       NumOpenMPRuntimeFunctionsIdentified += 1;
295       NumOpenMPRuntimeFunctionUsesIdentified += RFI.Declaration->getNumUses();
296     }
297 
298     // TODO: We directly convert uses into proper calls and unknown uses.
299     for (Use &U : RFI.Declaration->uses()) {
300       if (Instruction *UserI = dyn_cast<Instruction>(U.getUser())) {
301         if (ModuleSlice.count(UserI->getFunction())) {
302           RFI.getOrCreateUseVector(UserI->getFunction()).push_back(&U);
303           ++NumUses;
304         }
305       } else {
306         RFI.getOrCreateUseVector(nullptr).push_back(&U);
307         ++NumUses;
308       }
309     }
310     return NumUses;
311   }
312 
313   // Helper function to recollect uses of all runtime functions.
314   void recollectUses() {
315     for (int Idx = 0; Idx < RFIs.size(); ++Idx) {
316       auto &RFI = RFIs[static_cast<RuntimeFunction>(Idx)];
317       RFI.clearUsesMap();
318       collectUses(RFI, /*CollectStats*/ false);
319     }
320   }
321 
322   /// Helper to initialize all runtime function information for those defined
323   /// in OpenMPKinds.def.
324   void initializeRuntimeFunctions() {
325     Module &M = *((*ModuleSlice.begin())->getParent());
326 
327     // Helper macros for handling __VA_ARGS__ in OMP_RTL
328 #define OMP_TYPE(VarName, ...)                                                 \
329   Type *VarName = OMPBuilder.VarName;                                          \
330   (void)VarName;
331 
332 #define OMP_ARRAY_TYPE(VarName, ...)                                           \
333   ArrayType *VarName##Ty = OMPBuilder.VarName##Ty;                             \
334   (void)VarName##Ty;                                                           \
335   PointerType *VarName##PtrTy = OMPBuilder.VarName##PtrTy;                     \
336   (void)VarName##PtrTy;
337 
338 #define OMP_FUNCTION_TYPE(VarName, ...)                                        \
339   FunctionType *VarName = OMPBuilder.VarName;                                  \
340   (void)VarName;                                                               \
341   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
342   (void)VarName##Ptr;
343 
344 #define OMP_STRUCT_TYPE(VarName, ...)                                          \
345   StructType *VarName = OMPBuilder.VarName;                                    \
346   (void)VarName;                                                               \
347   PointerType *VarName##Ptr = OMPBuilder.VarName##Ptr;                         \
348   (void)VarName##Ptr;
349 
350 #define OMP_RTL(_Enum, _Name, _IsVarArg, _ReturnType, ...)                     \
351   {                                                                            \
352     SmallVector<Type *, 8> ArgsTypes({__VA_ARGS__});                           \
353     Function *F = M.getFunction(_Name);                                        \
354     if (declMatchesRTFTypes(F, OMPBuilder._ReturnType, ArgsTypes)) {           \
355       auto &RFI = RFIs[_Enum];                                                 \
356       RFI.Kind = _Enum;                                                        \
357       RFI.Name = _Name;                                                        \
358       RFI.IsVarArg = _IsVarArg;                                                \
359       RFI.ReturnType = OMPBuilder._ReturnType;                                 \
360       RFI.ArgumentTypes = std::move(ArgsTypes);                                \
361       RFI.Declaration = F;                                                     \
362       unsigned NumUses = collectUses(RFI);                                     \
363       (void)NumUses;                                                           \
364       LLVM_DEBUG({                                                             \
365         dbgs() << TAG << RFI.Name << (RFI.Declaration ? "" : " not")           \
366                << " found\n";                                                  \
367         if (RFI.Declaration)                                                   \
368           dbgs() << TAG << "-> got " << NumUses << " uses in "                 \
369                  << RFI.getNumFunctionsWithUses()                              \
370                  << " different functions.\n";                                 \
371       });                                                                      \
372     }                                                                          \
373   }
374 #include "llvm/Frontend/OpenMP/OMPKinds.def"
375 
376     // TODO: We should attach the attributes defined in OMPKinds.def.
377   }
378 
379   /// Collection of known kernels (\see Kernel) in the module.
380   SmallPtrSetImpl<Kernel> &Kernels;
381 };
382 
383 /// Used to map the values physically (in the IR) stored in an offload
384 /// array, to a vector in memory.
385 struct OffloadArray {
386   /// Physical array (in the IR).
387   AllocaInst *Array = nullptr;
388   /// Mapped values.
389   SmallVector<Value *, 8> StoredValues;
390   /// Last stores made in the offload array.
391   SmallVector<StoreInst *, 8> LastAccesses;
392 
393   OffloadArray() = default;
394 
395   /// Initializes the OffloadArray with the values stored in \p Array before
396   /// instruction \p Before is reached. Returns false if the initialization
397   /// fails.
398   /// This MUST be used immediately after the construction of the object.
399   bool initialize(AllocaInst &Array, Instruction &Before) {
400     if (!Array.getAllocatedType()->isArrayTy())
401       return false;
402 
403     if (!getValues(Array, Before))
404       return false;
405 
406     this->Array = &Array;
407     return true;
408   }
409 
410   static const unsigned BasePtrsArgNum = 2;
411   static const unsigned PtrsArgNum = 3;
412   static const unsigned SizesArgNum = 4;
413 
414 private:
415   /// Traverses the BasicBlock where \p Array is, collecting the stores made to
416   /// \p Array, leaving StoredValues with the values stored before the
417   /// instruction \p Before is reached.
418   bool getValues(AllocaInst &Array, Instruction &Before) {
419     // Initialize container.
420     const uint64_t NumValues =
421         Array.getAllocatedType()->getArrayNumElements();
422     StoredValues.assign(NumValues, nullptr);
423     LastAccesses.assign(NumValues, nullptr);
424 
425     // TODO: This assumes the instruction \p Before is in the same
426     //  BasicBlock as Array. Make it general, for any control flow graph.
427     BasicBlock *BB = Array.getParent();
428     if (BB != Before.getParent())
429       return false;
430 
431     const DataLayout &DL = Array.getModule()->getDataLayout();
432     const unsigned int PointerSize = DL.getPointerSize();
433 
434     for (Instruction &I : *BB) {
435       if (&I == &Before)
436         break;
437 
438       if (!isa<StoreInst>(&I))
439         continue;
440 
441       auto *S = cast<StoreInst>(&I);
442       int64_t Offset = -1;
443       auto *Dst = GetPointerBaseWithConstantOffset(S->getPointerOperand(),
444                                                    Offset, DL);
445       if (Dst == &Array) {
446         int64_t Idx = Offset / PointerSize;
447         StoredValues[Idx] = getUnderlyingObject(S->getValueOperand());
448         LastAccesses[Idx] = S;
449       }
450     }
451 
452     return isFilled();
453   }
454 
455   /// Returns true if all values in StoredValues and
456   /// LastAccesses are not nullptrs.
457   bool isFilled() {
458     const unsigned NumValues = StoredValues.size();
459     for (unsigned I = 0; I < NumValues; ++I) {
460       if (!StoredValues[I] || !LastAccesses[I])
461         return false;
462     }
463 
464     return true;
465   }
466 };
467 
468 struct OpenMPOpt {
469 
470   using OptimizationRemarkGetter =
471       function_ref<OptimizationRemarkEmitter &(Function *)>;
472 
473   OpenMPOpt(SmallVectorImpl<Function *> &SCC, CallGraphUpdater &CGUpdater,
474             OptimizationRemarkGetter OREGetter,
475             OMPInformationCache &OMPInfoCache, Attributor &A)
476       : M(*(*SCC.begin())->getParent()), SCC(SCC), CGUpdater(CGUpdater),
477         OREGetter(OREGetter), OMPInfoCache(OMPInfoCache), A(A) {}
478 
479   /// Check if any remarks are enabled for openmp-opt
480   bool remarksEnabled() {
481     auto &Ctx = M.getContext();
482     return Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(DEBUG_TYPE);
483   }
484 
485   /// Run all OpenMP optimizations on the underlying SCC/ModuleSlice.
486   bool run() {
487     if (SCC.empty())
488       return false;
489 
490     bool Changed = false;
491 
492     LLVM_DEBUG(dbgs() << TAG << "Run on SCC with " << SCC.size()
493                       << " functions in a slice with "
494                       << OMPInfoCache.ModuleSlice.size() << " functions\n");
495 
496     if (PrintICVValues)
497       printICVs();
498     if (PrintOpenMPKernels)
499       printKernels();
500 
501     Changed |= rewriteDeviceCodeStateMachine();
502 
503     Changed |= runAttributor();
504 
505     // Recollect uses, in case Attributor deleted any.
506     OMPInfoCache.recollectUses();
507 
508     Changed |= deduplicateRuntimeCalls();
509     Changed |= deleteParallelRegions();
510     if (HideMemoryTransferLatency)
511       Changed |= hideMemTransfersLatency();
512     if (remarksEnabled())
513       analysisGlobalization();
514 
515     return Changed;
516   }
517 
518   /// Print initial ICV values for testing.
519   /// FIXME: This should be done from the Attributor once it is added.
520   void printICVs() const {
521     InternalControlVar ICVs[] = {ICV_nthreads, ICV_active_levels, ICV_cancel,
522                                  ICV_proc_bind};
523 
524     for (Function *F : OMPInfoCache.ModuleSlice) {
525       for (auto ICV : ICVs) {
526         auto ICVInfo = OMPInfoCache.ICVs[ICV];
527         auto Remark = [&](OptimizationRemark OR) {
528           return OR << "OpenMP ICV " << ore::NV("OpenMPICV", ICVInfo.Name)
529                     << " Value: "
530                     << (ICVInfo.InitValue
531                             ? ICVInfo.InitValue->getValue().toString(10, true)
532                             : "IMPLEMENTATION_DEFINED");
533         };
534 
535         emitRemarkOnFunction(F, "OpenMPICVTracker", Remark);
536       }
537     }
538   }
539 
540   /// Print OpenMP GPU kernels for testing.
541   void printKernels() const {
542     for (Function *F : SCC) {
543       if (!OMPInfoCache.Kernels.count(F))
544         continue;
545 
546       auto Remark = [&](OptimizationRemark OR) {
547         return OR << "OpenMP GPU kernel "
548                   << ore::NV("OpenMPGPUKernel", F->getName()) << "\n";
549       };
550 
551       emitRemarkOnFunction(F, "OpenMPGPU", Remark);
552     }
553   }
554 
555   /// Return the call if \p U is a callee use in a regular call. If \p RFI is
556   /// given it has to be the callee or a nullptr is returned.
557   static CallInst *getCallIfRegularCall(
558       Use &U, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
559     CallInst *CI = dyn_cast<CallInst>(U.getUser());
560     if (CI && CI->isCallee(&U) && !CI->hasOperandBundles() &&
561         (!RFI || CI->getCalledFunction() == RFI->Declaration))
562       return CI;
563     return nullptr;
564   }
565 
566   /// Return the call if \p V is a regular call. If \p RFI is given it has to be
567   /// the callee or a nullptr is returned.
568   static CallInst *getCallIfRegularCall(
569       Value &V, OMPInformationCache::RuntimeFunctionInfo *RFI = nullptr) {
570     CallInst *CI = dyn_cast<CallInst>(&V);
571     if (CI && !CI->hasOperandBundles() &&
572         (!RFI || CI->getCalledFunction() == RFI->Declaration))
573       return CI;
574     return nullptr;
575   }
576 
577 private:
578   /// Try to delete parallel regions if possible.
579   bool deleteParallelRegions() {
580     const unsigned CallbackCalleeOperand = 2;
581 
582     OMPInformationCache::RuntimeFunctionInfo &RFI =
583         OMPInfoCache.RFIs[OMPRTL___kmpc_fork_call];
584 
585     if (!RFI.Declaration)
586       return false;
587 
588     bool Changed = false;
589     auto DeleteCallCB = [&](Use &U, Function &) {
590       CallInst *CI = getCallIfRegularCall(U);
591       if (!CI)
592         return false;
593       auto *Fn = dyn_cast<Function>(
594           CI->getArgOperand(CallbackCalleeOperand)->stripPointerCasts());
595       if (!Fn)
596         return false;
597       if (!Fn->onlyReadsMemory())
598         return false;
599       if (!Fn->hasFnAttribute(Attribute::WillReturn))
600         return false;
601 
602       LLVM_DEBUG(dbgs() << TAG << "Delete read-only parallel region in "
603                         << CI->getCaller()->getName() << "\n");
604 
605       auto Remark = [&](OptimizationRemark OR) {
606         return OR << "Parallel region in "
607                   << ore::NV("OpenMPParallelDelete", CI->getCaller()->getName())
608                   << " deleted";
609       };
610       emitRemark<OptimizationRemark>(CI, "OpenMPParallelRegionDeletion",
611                                      Remark);
612 
613       CGUpdater.removeCallSite(*CI);
614       CI->eraseFromParent();
615       Changed = true;
616       ++NumOpenMPParallelRegionsDeleted;
617       return true;
618     };
619 
620     RFI.foreachUse(SCC, DeleteCallCB);
621 
622     return Changed;
623   }
624 
625   /// Try to eliminate runtime calls by reusing existing ones.
626   bool deduplicateRuntimeCalls() {
627     bool Changed = false;
628 
629     RuntimeFunction DeduplicableRuntimeCallIDs[] = {
630         OMPRTL_omp_get_num_threads,
631         OMPRTL_omp_in_parallel,
632         OMPRTL_omp_get_cancellation,
633         OMPRTL_omp_get_thread_limit,
634         OMPRTL_omp_get_supported_active_levels,
635         OMPRTL_omp_get_level,
636         OMPRTL_omp_get_ancestor_thread_num,
637         OMPRTL_omp_get_team_size,
638         OMPRTL_omp_get_active_level,
639         OMPRTL_omp_in_final,
640         OMPRTL_omp_get_proc_bind,
641         OMPRTL_omp_get_num_places,
642         OMPRTL_omp_get_num_procs,
643         OMPRTL_omp_get_place_num,
644         OMPRTL_omp_get_partition_num_places,
645         OMPRTL_omp_get_partition_place_nums};
646 
647     // Global-tid is handled separately.
648     SmallSetVector<Value *, 16> GTIdArgs;
649     collectGlobalThreadIdArguments(GTIdArgs);
650     LLVM_DEBUG(dbgs() << TAG << "Found " << GTIdArgs.size()
651                       << " global thread ID arguments\n");
652 
653     for (Function *F : SCC) {
654       for (auto DeduplicableRuntimeCallID : DeduplicableRuntimeCallIDs)
655         Changed |= deduplicateRuntimeCalls(
656             *F, OMPInfoCache.RFIs[DeduplicableRuntimeCallID]);
657 
658       // __kmpc_global_thread_num is special as we can replace it with an
659       // argument in enough cases to make it worth trying.
660       Value *GTIdArg = nullptr;
661       for (Argument &Arg : F->args())
662         if (GTIdArgs.count(&Arg)) {
663           GTIdArg = &Arg;
664           break;
665         }
666       Changed |= deduplicateRuntimeCalls(
667           *F, OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num], GTIdArg);
668     }
669 
670     return Changed;
671   }
672 
673   /// Tries to hide the latency of runtime calls that involve host to
674   /// device memory transfers by splitting them into their "issue" and "wait"
675   /// versions. The "issue" is moved upwards as much as possible. The "wait" is
676   /// moved downards as much as possible. The "issue" issues the memory transfer
677   /// asynchronously, returning a handle. The "wait" waits in the returned
678   /// handle for the memory transfer to finish.
679   bool hideMemTransfersLatency() {
680     auto &RFI = OMPInfoCache.RFIs[OMPRTL___tgt_target_data_begin_mapper];
681     bool Changed = false;
682     auto SplitMemTransfers = [&](Use &U, Function &Decl) {
683       auto *RTCall = getCallIfRegularCall(U, &RFI);
684       if (!RTCall)
685         return false;
686 
687       OffloadArray OffloadArrays[3];
688       if (!getValuesInOffloadArrays(*RTCall, OffloadArrays))
689         return false;
690 
691       LLVM_DEBUG(dumpValuesInOffloadArrays(OffloadArrays));
692 
693       // TODO: Check if can be moved upwards.
694       bool WasSplit = false;
695       Instruction *WaitMovementPoint = canBeMovedDownwards(*RTCall);
696       if (WaitMovementPoint)
697         WasSplit = splitTargetDataBeginRTC(*RTCall, *WaitMovementPoint);
698 
699       Changed |= WasSplit;
700       return WasSplit;
701     };
702     RFI.foreachUse(SCC, SplitMemTransfers);
703 
704     return Changed;
705   }
706 
707   void analysisGlobalization() {
708     auto &RFI =
709         OMPInfoCache.RFIs[OMPRTL___kmpc_data_sharing_coalesced_push_stack];
710 
711     auto checkGlobalization = [&](Use &U, Function &Decl) {
712       if (CallInst *CI = getCallIfRegularCall(U, &RFI)) {
713         auto Remark = [&](OptimizationRemarkAnalysis ORA) {
714           return ORA
715                  << "Found thread data sharing on the GPU. "
716                  << "Expect degraded performance due to data globalization.";
717         };
718         emitRemark<OptimizationRemarkAnalysis>(CI, "OpenMPGlobalization",
719                                                Remark);
720       }
721 
722       return false;
723     };
724 
725     RFI.foreachUse(SCC, checkGlobalization);
726     return;
727   }
728 
729   /// Maps the values stored in the offload arrays passed as arguments to
730   /// \p RuntimeCall into the offload arrays in \p OAs.
731   bool getValuesInOffloadArrays(CallInst &RuntimeCall,
732                                 MutableArrayRef<OffloadArray> OAs) {
733     assert(OAs.size() == 3 && "Need space for three offload arrays!");
734 
735     // A runtime call that involves memory offloading looks something like:
736     // call void @__tgt_target_data_begin_mapper(arg0, arg1,
737     //   i8** %offload_baseptrs, i8** %offload_ptrs, i64* %offload_sizes,
738     // ...)
739     // So, the idea is to access the allocas that allocate space for these
740     // offload arrays, offload_baseptrs, offload_ptrs, offload_sizes.
741     // Therefore:
742     // i8** %offload_baseptrs.
743     Value *BasePtrsArg =
744         RuntimeCall.getArgOperand(OffloadArray::BasePtrsArgNum);
745     // i8** %offload_ptrs.
746     Value *PtrsArg = RuntimeCall.getArgOperand(OffloadArray::PtrsArgNum);
747     // i8** %offload_sizes.
748     Value *SizesArg = RuntimeCall.getArgOperand(OffloadArray::SizesArgNum);
749 
750     // Get values stored in **offload_baseptrs.
751     auto *V = getUnderlyingObject(BasePtrsArg);
752     if (!isa<AllocaInst>(V))
753       return false;
754     auto *BasePtrsArray = cast<AllocaInst>(V);
755     if (!OAs[0].initialize(*BasePtrsArray, RuntimeCall))
756       return false;
757 
758     // Get values stored in **offload_baseptrs.
759     V = getUnderlyingObject(PtrsArg);
760     if (!isa<AllocaInst>(V))
761       return false;
762     auto *PtrsArray = cast<AllocaInst>(V);
763     if (!OAs[1].initialize(*PtrsArray, RuntimeCall))
764       return false;
765 
766     // Get values stored in **offload_sizes.
767     V = getUnderlyingObject(SizesArg);
768     // If it's a [constant] global array don't analyze it.
769     if (isa<GlobalValue>(V))
770       return isa<Constant>(V);
771     if (!isa<AllocaInst>(V))
772       return false;
773 
774     auto *SizesArray = cast<AllocaInst>(V);
775     if (!OAs[2].initialize(*SizesArray, RuntimeCall))
776       return false;
777 
778     return true;
779   }
780 
781   /// Prints the values in the OffloadArrays \p OAs using LLVM_DEBUG.
782   /// For now this is a way to test that the function getValuesInOffloadArrays
783   /// is working properly.
784   /// TODO: Move this to a unittest when unittests are available for OpenMPOpt.
785   void dumpValuesInOffloadArrays(ArrayRef<OffloadArray> OAs) {
786     assert(OAs.size() == 3 && "There are three offload arrays to debug!");
787 
788     LLVM_DEBUG(dbgs() << TAG << " Successfully got offload values:\n");
789     std::string ValuesStr;
790     raw_string_ostream Printer(ValuesStr);
791     std::string Separator = " --- ";
792 
793     for (auto *BP : OAs[0].StoredValues) {
794       BP->print(Printer);
795       Printer << Separator;
796     }
797     LLVM_DEBUG(dbgs() << "\t\toffload_baseptrs: " << Printer.str() << "\n");
798     ValuesStr.clear();
799 
800     for (auto *P : OAs[1].StoredValues) {
801       P->print(Printer);
802       Printer << Separator;
803     }
804     LLVM_DEBUG(dbgs() << "\t\toffload_ptrs: " << Printer.str() << "\n");
805     ValuesStr.clear();
806 
807     for (auto *S : OAs[2].StoredValues) {
808       S->print(Printer);
809       Printer << Separator;
810     }
811     LLVM_DEBUG(dbgs() << "\t\toffload_sizes: " << Printer.str() << "\n");
812   }
813 
814   /// Returns the instruction where the "wait" counterpart \p RuntimeCall can be
815   /// moved. Returns nullptr if the movement is not possible, or not worth it.
816   Instruction *canBeMovedDownwards(CallInst &RuntimeCall) {
817     // FIXME: This traverses only the BasicBlock where RuntimeCall is.
818     //  Make it traverse the CFG.
819 
820     Instruction *CurrentI = &RuntimeCall;
821     bool IsWorthIt = false;
822     while ((CurrentI = CurrentI->getNextNode())) {
823 
824       // TODO: Once we detect the regions to be offloaded we should use the
825       //  alias analysis manager to check if CurrentI may modify one of
826       //  the offloaded regions.
827       if (CurrentI->mayHaveSideEffects() || CurrentI->mayReadFromMemory()) {
828         if (IsWorthIt)
829           return CurrentI;
830 
831         return nullptr;
832       }
833 
834       // FIXME: For now if we move it over anything without side effect
835       //  is worth it.
836       IsWorthIt = true;
837     }
838 
839     // Return end of BasicBlock.
840     return RuntimeCall.getParent()->getTerminator();
841   }
842 
843   /// Splits \p RuntimeCall into its "issue" and "wait" counterparts.
844   bool splitTargetDataBeginRTC(CallInst &RuntimeCall,
845                                Instruction &WaitMovementPoint) {
846     // Create stack allocated handle (__tgt_async_info) at the beginning of the
847     // function. Used for storing information of the async transfer, allowing to
848     // wait on it later.
849     auto &IRBuilder = OMPInfoCache.OMPBuilder;
850     auto *F = RuntimeCall.getCaller();
851     Instruction *FirstInst = &(F->getEntryBlock().front());
852     AllocaInst *Handle = new AllocaInst(
853         IRBuilder.AsyncInfo, F->getAddressSpace(), "handle", FirstInst);
854 
855     // Add "issue" runtime call declaration:
856     // declare %struct.tgt_async_info @__tgt_target_data_begin_issue(i64, i32,
857     //   i8**, i8**, i64*, i64*)
858     FunctionCallee IssueDecl = IRBuilder.getOrCreateRuntimeFunction(
859         M, OMPRTL___tgt_target_data_begin_mapper_issue);
860 
861     // Change RuntimeCall call site for its asynchronous version.
862     SmallVector<Value *, 8> Args;
863     for (auto &Arg : RuntimeCall.args())
864       Args.push_back(Arg.get());
865     Args.push_back(Handle);
866 
867     CallInst *IssueCallsite =
868         CallInst::Create(IssueDecl, Args, /*NameStr=*/"", &RuntimeCall);
869     RuntimeCall.eraseFromParent();
870 
871     // Add "wait" runtime call declaration:
872     // declare void @__tgt_target_data_begin_wait(i64, %struct.__tgt_async_info)
873     FunctionCallee WaitDecl = IRBuilder.getOrCreateRuntimeFunction(
874         M, OMPRTL___tgt_target_data_begin_mapper_wait);
875 
876     // Add call site to WaitDecl.
877     const unsigned DeviceIDArgNum = 0;
878     Value *WaitParams[2] = {
879         IssueCallsite->getArgOperand(DeviceIDArgNum), // device_id.
880         Handle                                        // handle to wait on.
881     };
882     CallInst::Create(WaitDecl, WaitParams, /*NameStr=*/"", &WaitMovementPoint);
883 
884     return true;
885   }
886 
887   static Value *combinedIdentStruct(Value *CurrentIdent, Value *NextIdent,
888                                     bool GlobalOnly, bool &SingleChoice) {
889     if (CurrentIdent == NextIdent)
890       return CurrentIdent;
891 
892     // TODO: Figure out how to actually combine multiple debug locations. For
893     //       now we just keep an existing one if there is a single choice.
894     if (!GlobalOnly || isa<GlobalValue>(NextIdent)) {
895       SingleChoice = !CurrentIdent;
896       return NextIdent;
897     }
898     return nullptr;
899   }
900 
901   /// Return an `struct ident_t*` value that represents the ones used in the
902   /// calls of \p RFI inside of \p F. If \p GlobalOnly is true, we will not
903   /// return a local `struct ident_t*`. For now, if we cannot find a suitable
904   /// return value we create one from scratch. We also do not yet combine
905   /// information, e.g., the source locations, see combinedIdentStruct.
906   Value *
907   getCombinedIdentFromCallUsesIn(OMPInformationCache::RuntimeFunctionInfo &RFI,
908                                  Function &F, bool GlobalOnly) {
909     bool SingleChoice = true;
910     Value *Ident = nullptr;
911     auto CombineIdentStruct = [&](Use &U, Function &Caller) {
912       CallInst *CI = getCallIfRegularCall(U, &RFI);
913       if (!CI || &F != &Caller)
914         return false;
915       Ident = combinedIdentStruct(Ident, CI->getArgOperand(0),
916                                   /* GlobalOnly */ true, SingleChoice);
917       return false;
918     };
919     RFI.foreachUse(SCC, CombineIdentStruct);
920 
921     if (!Ident || !SingleChoice) {
922       // The IRBuilder uses the insertion block to get to the module, this is
923       // unfortunate but we work around it for now.
924       if (!OMPInfoCache.OMPBuilder.getInsertionPoint().getBlock())
925         OMPInfoCache.OMPBuilder.updateToLocation(OpenMPIRBuilder::InsertPointTy(
926             &F.getEntryBlock(), F.getEntryBlock().begin()));
927       // Create a fallback location if non was found.
928       // TODO: Use the debug locations of the calls instead.
929       Constant *Loc = OMPInfoCache.OMPBuilder.getOrCreateDefaultSrcLocStr();
930       Ident = OMPInfoCache.OMPBuilder.getOrCreateIdent(Loc);
931     }
932     return Ident;
933   }
934 
935   /// Try to eliminate calls of \p RFI in \p F by reusing an existing one or
936   /// \p ReplVal if given.
937   bool deduplicateRuntimeCalls(Function &F,
938                                OMPInformationCache::RuntimeFunctionInfo &RFI,
939                                Value *ReplVal = nullptr) {
940     auto *UV = RFI.getUseVector(F);
941     if (!UV || UV->size() + (ReplVal != nullptr) < 2)
942       return false;
943 
944     LLVM_DEBUG(
945         dbgs() << TAG << "Deduplicate " << UV->size() << " uses of " << RFI.Name
946                << (ReplVal ? " with an existing value\n" : "\n") << "\n");
947 
948     assert((!ReplVal || (isa<Argument>(ReplVal) &&
949                          cast<Argument>(ReplVal)->getParent() == &F)) &&
950            "Unexpected replacement value!");
951 
952     // TODO: Use dominance to find a good position instead.
953     auto CanBeMoved = [this](CallBase &CB) {
954       unsigned NumArgs = CB.getNumArgOperands();
955       if (NumArgs == 0)
956         return true;
957       if (CB.getArgOperand(0)->getType() != OMPInfoCache.OMPBuilder.IdentPtr)
958         return false;
959       for (unsigned u = 1; u < NumArgs; ++u)
960         if (isa<Instruction>(CB.getArgOperand(u)))
961           return false;
962       return true;
963     };
964 
965     if (!ReplVal) {
966       for (Use *U : *UV)
967         if (CallInst *CI = getCallIfRegularCall(*U, &RFI)) {
968           if (!CanBeMoved(*CI))
969             continue;
970 
971           auto Remark = [&](OptimizationRemark OR) {
972             auto newLoc = &*F.getEntryBlock().getFirstInsertionPt();
973             return OR << "OpenMP runtime call "
974                       << ore::NV("OpenMPOptRuntime", RFI.Name) << " moved to "
975                       << ore::NV("OpenMPRuntimeMoves", newLoc->getDebugLoc());
976           };
977           emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeCodeMotion", Remark);
978 
979           CI->moveBefore(&*F.getEntryBlock().getFirstInsertionPt());
980           ReplVal = CI;
981           break;
982         }
983       if (!ReplVal)
984         return false;
985     }
986 
987     // If we use a call as a replacement value we need to make sure the ident is
988     // valid at the new location. For now we just pick a global one, either
989     // existing and used by one of the calls, or created from scratch.
990     if (CallBase *CI = dyn_cast<CallBase>(ReplVal)) {
991       if (CI->getNumArgOperands() > 0 &&
992           CI->getArgOperand(0)->getType() == OMPInfoCache.OMPBuilder.IdentPtr) {
993         Value *Ident = getCombinedIdentFromCallUsesIn(RFI, F,
994                                                       /* GlobalOnly */ true);
995         CI->setArgOperand(0, Ident);
996       }
997     }
998 
999     bool Changed = false;
1000     auto ReplaceAndDeleteCB = [&](Use &U, Function &Caller) {
1001       CallInst *CI = getCallIfRegularCall(U, &RFI);
1002       if (!CI || CI == ReplVal || &F != &Caller)
1003         return false;
1004       assert(CI->getCaller() == &F && "Unexpected call!");
1005 
1006       auto Remark = [&](OptimizationRemark OR) {
1007         return OR << "OpenMP runtime call "
1008                   << ore::NV("OpenMPOptRuntime", RFI.Name) << " deduplicated";
1009       };
1010       emitRemark<OptimizationRemark>(CI, "OpenMPRuntimeDeduplicated", Remark);
1011 
1012       CGUpdater.removeCallSite(*CI);
1013       CI->replaceAllUsesWith(ReplVal);
1014       CI->eraseFromParent();
1015       ++NumOpenMPRuntimeCallsDeduplicated;
1016       Changed = true;
1017       return true;
1018     };
1019     RFI.foreachUse(SCC, ReplaceAndDeleteCB);
1020 
1021     return Changed;
1022   }
1023 
1024   /// Collect arguments that represent the global thread id in \p GTIdArgs.
1025   void collectGlobalThreadIdArguments(SmallSetVector<Value *, 16> &GTIdArgs) {
1026     // TODO: Below we basically perform a fixpoint iteration with a pessimistic
1027     //       initialization. We could define an AbstractAttribute instead and
1028     //       run the Attributor here once it can be run as an SCC pass.
1029 
1030     // Helper to check the argument \p ArgNo at all call sites of \p F for
1031     // a GTId.
1032     auto CallArgOpIsGTId = [&](Function &F, unsigned ArgNo, CallInst &RefCI) {
1033       if (!F.hasLocalLinkage())
1034         return false;
1035       for (Use &U : F.uses()) {
1036         if (CallInst *CI = getCallIfRegularCall(U)) {
1037           Value *ArgOp = CI->getArgOperand(ArgNo);
1038           if (CI == &RefCI || GTIdArgs.count(ArgOp) ||
1039               getCallIfRegularCall(
1040                   *ArgOp, &OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num]))
1041             continue;
1042         }
1043         return false;
1044       }
1045       return true;
1046     };
1047 
1048     // Helper to identify uses of a GTId as GTId arguments.
1049     auto AddUserArgs = [&](Value &GTId) {
1050       for (Use &U : GTId.uses())
1051         if (CallInst *CI = dyn_cast<CallInst>(U.getUser()))
1052           if (CI->isArgOperand(&U))
1053             if (Function *Callee = CI->getCalledFunction())
1054               if (CallArgOpIsGTId(*Callee, U.getOperandNo(), *CI))
1055                 GTIdArgs.insert(Callee->getArg(U.getOperandNo()));
1056     };
1057 
1058     // The argument users of __kmpc_global_thread_num calls are GTIds.
1059     OMPInformationCache::RuntimeFunctionInfo &GlobThreadNumRFI =
1060         OMPInfoCache.RFIs[OMPRTL___kmpc_global_thread_num];
1061 
1062     GlobThreadNumRFI.foreachUse(SCC, [&](Use &U, Function &F) {
1063       if (CallInst *CI = getCallIfRegularCall(U, &GlobThreadNumRFI))
1064         AddUserArgs(*CI);
1065       return false;
1066     });
1067 
1068     // Transitively search for more arguments by looking at the users of the
1069     // ones we know already. During the search the GTIdArgs vector is extended
1070     // so we cannot cache the size nor can we use a range based for.
1071     for (unsigned u = 0; u < GTIdArgs.size(); ++u)
1072       AddUserArgs(*GTIdArgs[u]);
1073   }
1074 
1075   /// Kernel (=GPU) optimizations and utility functions
1076   ///
1077   ///{{
1078 
1079   /// Check if \p F is a kernel, hence entry point for target offloading.
1080   bool isKernel(Function &F) { return OMPInfoCache.Kernels.count(&F); }
1081 
1082   /// Cache to remember the unique kernel for a function.
1083   DenseMap<Function *, Optional<Kernel>> UniqueKernelMap;
1084 
1085   /// Find the unique kernel that will execute \p F, if any.
1086   Kernel getUniqueKernelFor(Function &F);
1087 
1088   /// Find the unique kernel that will execute \p I, if any.
1089   Kernel getUniqueKernelFor(Instruction &I) {
1090     return getUniqueKernelFor(*I.getFunction());
1091   }
1092 
1093   /// Rewrite the device (=GPU) code state machine create in non-SPMD mode in
1094   /// the cases we can avoid taking the address of a function.
1095   bool rewriteDeviceCodeStateMachine();
1096 
1097   ///
1098   ///}}
1099 
1100   /// Emit a remark generically
1101   ///
1102   /// This template function can be used to generically emit a remark. The
1103   /// RemarkKind should be one of the following:
1104   ///   - OptimizationRemark to indicate a successful optimization attempt
1105   ///   - OptimizationRemarkMissed to report a failed optimization attempt
1106   ///   - OptimizationRemarkAnalysis to provide additional information about an
1107   ///     optimization attempt
1108   ///
1109   /// The remark is built using a callback function provided by the caller that
1110   /// takes a RemarkKind as input and returns a RemarkKind.
1111   template <typename RemarkKind,
1112             typename RemarkCallBack = function_ref<RemarkKind(RemarkKind &&)>>
1113   void emitRemark(Instruction *Inst, StringRef RemarkName,
1114                   RemarkCallBack &&RemarkCB) const {
1115     Function *F = Inst->getParent()->getParent();
1116     auto &ORE = OREGetter(F);
1117 
1118     ORE.emit(
1119         [&]() { return RemarkCB(RemarkKind(DEBUG_TYPE, RemarkName, Inst)); });
1120   }
1121 
1122   /// Emit a remark on a function. Since only OptimizationRemark is supporting
1123   /// this, it can't be made generic.
1124   void
1125   emitRemarkOnFunction(Function *F, StringRef RemarkName,
1126                        function_ref<OptimizationRemark(OptimizationRemark &&)>
1127                            &&RemarkCB) const {
1128     auto &ORE = OREGetter(F);
1129 
1130     ORE.emit([&]() {
1131       return RemarkCB(OptimizationRemark(DEBUG_TYPE, RemarkName, F));
1132     });
1133   }
1134 
1135   /// The underlying module.
1136   Module &M;
1137 
1138   /// The SCC we are operating on.
1139   SmallVectorImpl<Function *> &SCC;
1140 
1141   /// Callback to update the call graph, the first argument is a removed call,
1142   /// the second an optional replacement call.
1143   CallGraphUpdater &CGUpdater;
1144 
1145   /// Callback to get an OptimizationRemarkEmitter from a Function *
1146   OptimizationRemarkGetter OREGetter;
1147 
1148   /// OpenMP-specific information cache. Also Used for Attributor runs.
1149   OMPInformationCache &OMPInfoCache;
1150 
1151   /// Attributor instance.
1152   Attributor &A;
1153 
1154   /// Helper function to run Attributor on SCC.
1155   bool runAttributor() {
1156     if (SCC.empty())
1157       return false;
1158 
1159     registerAAs();
1160 
1161     ChangeStatus Changed = A.run();
1162 
1163     LLVM_DEBUG(dbgs() << "[Attributor] Done with " << SCC.size()
1164                       << " functions, result: " << Changed << ".\n");
1165 
1166     return Changed == ChangeStatus::CHANGED;
1167   }
1168 
1169   /// Populate the Attributor with abstract attribute opportunities in the
1170   /// function.
1171   void registerAAs() {
1172     if (SCC.empty())
1173       return;
1174 
1175     // Create CallSite AA for all Getters.
1176     for (int Idx = 0; Idx < OMPInfoCache.ICVs.size() - 1; ++Idx) {
1177       auto ICVInfo = OMPInfoCache.ICVs[static_cast<InternalControlVar>(Idx)];
1178 
1179       auto &GetterRFI = OMPInfoCache.RFIs[ICVInfo.Getter];
1180 
1181       auto CreateAA = [&](Use &U, Function &Caller) {
1182         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U, &GetterRFI);
1183         if (!CI)
1184           return false;
1185 
1186         auto &CB = cast<CallBase>(*CI);
1187 
1188         IRPosition CBPos = IRPosition::callsite_function(CB);
1189         A.getOrCreateAAFor<AAICVTracker>(CBPos);
1190         return false;
1191       };
1192 
1193       GetterRFI.foreachUse(SCC, CreateAA);
1194     }
1195   }
1196 };
1197 
1198 Kernel OpenMPOpt::getUniqueKernelFor(Function &F) {
1199   if (!OMPInfoCache.ModuleSlice.count(&F))
1200     return nullptr;
1201 
1202   // Use a scope to keep the lifetime of the CachedKernel short.
1203   {
1204     Optional<Kernel> &CachedKernel = UniqueKernelMap[&F];
1205     if (CachedKernel)
1206       return *CachedKernel;
1207 
1208     // TODO: We should use an AA to create an (optimistic and callback
1209     //       call-aware) call graph. For now we stick to simple patterns that
1210     //       are less powerful, basically the worst fixpoint.
1211     if (isKernel(F)) {
1212       CachedKernel = Kernel(&F);
1213       return *CachedKernel;
1214     }
1215 
1216     CachedKernel = nullptr;
1217     if (!F.hasLocalLinkage())
1218       return nullptr;
1219   }
1220 
1221   auto GetUniqueKernelForUse = [&](const Use &U) -> Kernel {
1222     if (auto *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
1223       // Allow use in equality comparisons.
1224       if (Cmp->isEquality())
1225         return getUniqueKernelFor(*Cmp);
1226       return nullptr;
1227     }
1228     if (auto *CB = dyn_cast<CallBase>(U.getUser())) {
1229       // Allow direct calls.
1230       if (CB->isCallee(&U))
1231         return getUniqueKernelFor(*CB);
1232       // Allow the use in __kmpc_kernel_prepare_parallel calls.
1233       if (Function *Callee = CB->getCalledFunction())
1234         if (Callee->getName() == "__kmpc_kernel_prepare_parallel")
1235           return getUniqueKernelFor(*CB);
1236       return nullptr;
1237     }
1238     // Disallow every other use.
1239     return nullptr;
1240   };
1241 
1242   // TODO: In the future we want to track more than just a unique kernel.
1243   SmallPtrSet<Kernel, 2> PotentialKernels;
1244   OMPInformationCache::foreachUse(F, [&](const Use &U) {
1245     PotentialKernels.insert(GetUniqueKernelForUse(U));
1246   });
1247 
1248   Kernel K = nullptr;
1249   if (PotentialKernels.size() == 1)
1250     K = *PotentialKernels.begin();
1251 
1252   // Cache the result.
1253   UniqueKernelMap[&F] = K;
1254 
1255   return K;
1256 }
1257 
1258 bool OpenMPOpt::rewriteDeviceCodeStateMachine() {
1259   OMPInformationCache::RuntimeFunctionInfo &KernelPrepareParallelRFI =
1260       OMPInfoCache.RFIs[OMPRTL___kmpc_kernel_prepare_parallel];
1261 
1262   bool Changed = false;
1263   if (!KernelPrepareParallelRFI)
1264     return Changed;
1265 
1266   for (Function *F : SCC) {
1267 
1268     // Check if the function is uses in a __kmpc_kernel_prepare_parallel call at
1269     // all.
1270     bool UnknownUse = false;
1271     bool KernelPrepareUse = false;
1272     unsigned NumDirectCalls = 0;
1273 
1274     SmallVector<Use *, 2> ToBeReplacedStateMachineUses;
1275     OMPInformationCache::foreachUse(*F, [&](Use &U) {
1276       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
1277         if (CB->isCallee(&U)) {
1278           ++NumDirectCalls;
1279           return;
1280         }
1281 
1282       if (isa<ICmpInst>(U.getUser())) {
1283         ToBeReplacedStateMachineUses.push_back(&U);
1284         return;
1285       }
1286       if (!KernelPrepareUse && OpenMPOpt::getCallIfRegularCall(
1287                                    *U.getUser(), &KernelPrepareParallelRFI)) {
1288         KernelPrepareUse = true;
1289         ToBeReplacedStateMachineUses.push_back(&U);
1290         return;
1291       }
1292       UnknownUse = true;
1293     });
1294 
1295     // Do not emit a remark if we haven't seen a __kmpc_kernel_prepare_parallel
1296     // use.
1297     if (!KernelPrepareUse)
1298       continue;
1299 
1300     {
1301       auto Remark = [&](OptimizationRemark OR) {
1302         return OR << "Found a parallel region that is called in a target "
1303                      "region but not part of a combined target construct nor "
1304                      "nesed inside a target construct without intermediate "
1305                      "code. This can lead to excessive register usage for "
1306                      "unrelated target regions in the same translation unit "
1307                      "due to spurious call edges assumed by ptxas.";
1308       };
1309       emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark);
1310     }
1311 
1312     // If this ever hits, we should investigate.
1313     // TODO: Checking the number of uses is not a necessary restriction and
1314     // should be lifted.
1315     if (UnknownUse || NumDirectCalls != 1 ||
1316         ToBeReplacedStateMachineUses.size() != 2) {
1317       {
1318         auto Remark = [&](OptimizationRemark OR) {
1319           return OR << "Parallel region is used in "
1320                     << (UnknownUse ? "unknown" : "unexpected")
1321                     << " ways; will not attempt to rewrite the state machine.";
1322         };
1323         emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD", Remark);
1324       }
1325       continue;
1326     }
1327 
1328     // Even if we have __kmpc_kernel_prepare_parallel calls, we (for now) give
1329     // up if the function is not called from a unique kernel.
1330     Kernel K = getUniqueKernelFor(*F);
1331     if (!K) {
1332       {
1333         auto Remark = [&](OptimizationRemark OR) {
1334           return OR << "Parallel region is not known to be called from a "
1335                        "unique single target region, maybe the surrounding "
1336                        "function has external linkage?; will not attempt to "
1337                        "rewrite the state machine use.";
1338         };
1339         emitRemarkOnFunction(F, "OpenMPParallelRegionInMultipleKernesl",
1340                              Remark);
1341       }
1342       continue;
1343     }
1344 
1345     // We now know F is a parallel body function called only from the kernel K.
1346     // We also identified the state machine uses in which we replace the
1347     // function pointer by a new global symbol for identification purposes. This
1348     // ensures only direct calls to the function are left.
1349 
1350     {
1351       auto RemarkParalleRegion = [&](OptimizationRemark OR) {
1352         return OR << "Specialize parallel region that is only reached from a "
1353                      "single target region to avoid spurious call edges and "
1354                      "excessive register usage in other target regions. "
1355                      "(parallel region ID: "
1356                   << ore::NV("OpenMPParallelRegion", F->getName())
1357                   << ", kernel ID: "
1358                   << ore::NV("OpenMPTargetRegion", K->getName()) << ")";
1359       };
1360       emitRemarkOnFunction(F, "OpenMPParallelRegionInNonSPMD",
1361                            RemarkParalleRegion);
1362       auto RemarkKernel = [&](OptimizationRemark OR) {
1363         return OR << "Target region containing the parallel region that is "
1364                      "specialized. (parallel region ID: "
1365                   << ore::NV("OpenMPParallelRegion", F->getName())
1366                   << ", kernel ID: "
1367                   << ore::NV("OpenMPTargetRegion", K->getName()) << ")";
1368       };
1369       emitRemarkOnFunction(K, "OpenMPParallelRegionInNonSPMD", RemarkKernel);
1370     }
1371 
1372     Module &M = *F->getParent();
1373     Type *Int8Ty = Type::getInt8Ty(M.getContext());
1374 
1375     auto *ID = new GlobalVariable(
1376         M, Int8Ty, /* isConstant */ true, GlobalValue::PrivateLinkage,
1377         UndefValue::get(Int8Ty), F->getName() + ".ID");
1378 
1379     for (Use *U : ToBeReplacedStateMachineUses)
1380       U->set(ConstantExpr::getBitCast(ID, U->get()->getType()));
1381 
1382     ++NumOpenMPParallelRegionsReplacedInGPUStateMachine;
1383 
1384     Changed = true;
1385   }
1386 
1387   return Changed;
1388 }
1389 
1390 /// Abstract Attribute for tracking ICV values.
1391 struct AAICVTracker : public StateWrapper<BooleanState, AbstractAttribute> {
1392   using Base = StateWrapper<BooleanState, AbstractAttribute>;
1393   AAICVTracker(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
1394 
1395   void initialize(Attributor &A) override {
1396     Function *F = getAnchorScope();
1397     if (!F || !A.isFunctionIPOAmendable(*F))
1398       indicatePessimisticFixpoint();
1399   }
1400 
1401   /// Returns true if value is assumed to be tracked.
1402   bool isAssumedTracked() const { return getAssumed(); }
1403 
1404   /// Returns true if value is known to be tracked.
1405   bool isKnownTracked() const { return getAssumed(); }
1406 
1407   /// Create an abstract attribute biew for the position \p IRP.
1408   static AAICVTracker &createForPosition(const IRPosition &IRP, Attributor &A);
1409 
1410   /// Return the value with which \p I can be replaced for specific \p ICV.
1411   virtual Optional<Value *> getReplacementValue(InternalControlVar ICV,
1412                                                 const Instruction *I,
1413                                                 Attributor &A) const {
1414     return None;
1415   }
1416 
1417   /// Return an assumed unique ICV value if a single candidate is found. If
1418   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1419   /// Optional::NoneType.
1420   virtual Optional<Value *>
1421   getUniqueReplacementValue(InternalControlVar ICV) const = 0;
1422 
1423   // Currently only nthreads is being tracked.
1424   // this array will only grow with time.
1425   InternalControlVar TrackableICVs[1] = {ICV_nthreads};
1426 
1427   /// See AbstractAttribute::getName()
1428   const std::string getName() const override { return "AAICVTracker"; }
1429 
1430   /// See AbstractAttribute::getIdAddr()
1431   const char *getIdAddr() const override { return &ID; }
1432 
1433   /// This function should return true if the type of the \p AA is AAICVTracker
1434   static bool classof(const AbstractAttribute *AA) {
1435     return (AA->getIdAddr() == &ID);
1436   }
1437 
1438   static const char ID;
1439 };
1440 
1441 struct AAICVTrackerFunction : public AAICVTracker {
1442   AAICVTrackerFunction(const IRPosition &IRP, Attributor &A)
1443       : AAICVTracker(IRP, A) {}
1444 
1445   // FIXME: come up with better string.
1446   const std::string getAsStr() const override { return "ICVTrackerFunction"; }
1447 
1448   // FIXME: come up with some stats.
1449   void trackStatistics() const override {}
1450 
1451   /// We don't manifest anything for this AA.
1452   ChangeStatus manifest(Attributor &A) override {
1453     return ChangeStatus::UNCHANGED;
1454   }
1455 
1456   // Map of ICV to their values at specific program point.
1457   EnumeratedArray<DenseMap<Instruction *, Value *>, InternalControlVar,
1458                   InternalControlVar::ICV___last>
1459       ICVReplacementValuesMap;
1460 
1461   ChangeStatus updateImpl(Attributor &A) override {
1462     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1463 
1464     Function *F = getAnchorScope();
1465 
1466     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1467 
1468     for (InternalControlVar ICV : TrackableICVs) {
1469       auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
1470 
1471       auto &ValuesMap = ICVReplacementValuesMap[ICV];
1472       auto TrackValues = [&](Use &U, Function &) {
1473         CallInst *CI = OpenMPOpt::getCallIfRegularCall(U);
1474         if (!CI)
1475           return false;
1476 
1477         // FIXME: handle setters with more that 1 arguments.
1478         /// Track new value.
1479         if (ValuesMap.insert(std::make_pair(CI, CI->getArgOperand(0))).second)
1480           HasChanged = ChangeStatus::CHANGED;
1481 
1482         return false;
1483       };
1484 
1485       auto CallCheck = [&](Instruction &I) {
1486         Optional<Value *> ReplVal = getValueForCall(A, &I, ICV);
1487         if (ReplVal.hasValue() &&
1488             ValuesMap.insert(std::make_pair(&I, *ReplVal)).second)
1489           HasChanged = ChangeStatus::CHANGED;
1490 
1491         return true;
1492       };
1493 
1494       // Track all changes of an ICV.
1495       SetterRFI.foreachUse(TrackValues, F);
1496 
1497       A.checkForAllInstructions(CallCheck, *this, {Instruction::Call},
1498                                 /* CheckBBLivenessOnly */ true);
1499 
1500       /// TODO: Figure out a way to avoid adding entry in
1501       /// ICVReplacementValuesMap
1502       Instruction *Entry = &F->getEntryBlock().front();
1503       if (HasChanged == ChangeStatus::CHANGED && !ValuesMap.count(Entry))
1504         ValuesMap.insert(std::make_pair(Entry, nullptr));
1505     }
1506 
1507     return HasChanged;
1508   }
1509 
1510   /// Hepler to check if \p I is a call and get the value for it if it is
1511   /// unique.
1512   Optional<Value *> getValueForCall(Attributor &A, const Instruction *I,
1513                                     InternalControlVar &ICV) const {
1514 
1515     const auto *CB = dyn_cast<CallBase>(I);
1516     if (!CB)
1517       return None;
1518 
1519     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1520     auto &GetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Getter];
1521     auto &SetterRFI = OMPInfoCache.RFIs[OMPInfoCache.ICVs[ICV].Setter];
1522     Function *CalledFunction = CB->getCalledFunction();
1523 
1524     // Indirect call, assume ICV changes.
1525     if (CalledFunction == nullptr)
1526       return nullptr;
1527     if (CalledFunction == GetterRFI.Declaration)
1528       return None;
1529     if (CalledFunction == SetterRFI.Declaration) {
1530       if (ICVReplacementValuesMap[ICV].count(I))
1531         return ICVReplacementValuesMap[ICV].lookup(I);
1532 
1533       return nullptr;
1534     }
1535 
1536     // Since we don't know, assume it changes the ICV.
1537     if (CalledFunction->isDeclaration())
1538       return nullptr;
1539 
1540     const auto &ICVTrackingAA =
1541         A.getAAFor<AAICVTracker>(*this, IRPosition::callsite_returned(*CB));
1542 
1543     if (ICVTrackingAA.isAssumedTracked())
1544       return ICVTrackingAA.getUniqueReplacementValue(ICV);
1545 
1546     // If we don't know, assume it changes.
1547     return nullptr;
1548   }
1549 
1550   // We don't check unique value for a function, so return None.
1551   Optional<Value *>
1552   getUniqueReplacementValue(InternalControlVar ICV) const override {
1553     return None;
1554   }
1555 
1556   /// Return the value with which \p I can be replaced for specific \p ICV.
1557   Optional<Value *> getReplacementValue(InternalControlVar ICV,
1558                                         const Instruction *I,
1559                                         Attributor &A) const override {
1560     const auto &ValuesMap = ICVReplacementValuesMap[ICV];
1561     if (ValuesMap.count(I))
1562       return ValuesMap.lookup(I);
1563 
1564     SmallVector<const Instruction *, 16> Worklist;
1565     SmallPtrSet<const Instruction *, 16> Visited;
1566     Worklist.push_back(I);
1567 
1568     Optional<Value *> ReplVal;
1569 
1570     while (!Worklist.empty()) {
1571       const Instruction *CurrInst = Worklist.pop_back_val();
1572       if (!Visited.insert(CurrInst).second)
1573         continue;
1574 
1575       const BasicBlock *CurrBB = CurrInst->getParent();
1576 
1577       // Go up and look for all potential setters/calls that might change the
1578       // ICV.
1579       while ((CurrInst = CurrInst->getPrevNode())) {
1580         if (ValuesMap.count(CurrInst)) {
1581           Optional<Value *> NewReplVal = ValuesMap.lookup(CurrInst);
1582           // Unknown value, track new.
1583           if (!ReplVal.hasValue()) {
1584             ReplVal = NewReplVal;
1585             break;
1586           }
1587 
1588           // If we found a new value, we can't know the icv value anymore.
1589           if (NewReplVal.hasValue())
1590             if (ReplVal != NewReplVal)
1591               return nullptr;
1592 
1593           break;
1594         }
1595 
1596         Optional<Value *> NewReplVal = getValueForCall(A, CurrInst, ICV);
1597         if (!NewReplVal.hasValue())
1598           continue;
1599 
1600         // Unknown value, track new.
1601         if (!ReplVal.hasValue()) {
1602           ReplVal = NewReplVal;
1603           break;
1604         }
1605 
1606         // if (NewReplVal.hasValue())
1607         // We found a new value, we can't know the icv value anymore.
1608         if (ReplVal != NewReplVal)
1609           return nullptr;
1610       }
1611 
1612       // If we are in the same BB and we have a value, we are done.
1613       if (CurrBB == I->getParent() && ReplVal.hasValue())
1614         return ReplVal;
1615 
1616       // Go through all predecessors and add terminators for analysis.
1617       for (const BasicBlock *Pred : predecessors(CurrBB))
1618         if (const Instruction *Terminator = Pred->getTerminator())
1619           Worklist.push_back(Terminator);
1620     }
1621 
1622     return ReplVal;
1623   }
1624 };
1625 
1626 struct AAICVTrackerFunctionReturned : AAICVTracker {
1627   AAICVTrackerFunctionReturned(const IRPosition &IRP, Attributor &A)
1628       : AAICVTracker(IRP, A) {}
1629 
1630   // FIXME: come up with better string.
1631   const std::string getAsStr() const override {
1632     return "ICVTrackerFunctionReturned";
1633   }
1634 
1635   // FIXME: come up with some stats.
1636   void trackStatistics() const override {}
1637 
1638   /// We don't manifest anything for this AA.
1639   ChangeStatus manifest(Attributor &A) override {
1640     return ChangeStatus::UNCHANGED;
1641   }
1642 
1643   // Map of ICV to their values at specific program point.
1644   EnumeratedArray<Optional<Value *>, InternalControlVar,
1645                   InternalControlVar::ICV___last>
1646       ICVReplacementValuesMap;
1647 
1648   /// Return the value with which \p I can be replaced for specific \p ICV.
1649   Optional<Value *>
1650   getUniqueReplacementValue(InternalControlVar ICV) const override {
1651     return ICVReplacementValuesMap[ICV];
1652   }
1653 
1654   ChangeStatus updateImpl(Attributor &A) override {
1655     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1656     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1657         *this, IRPosition::function(*getAnchorScope()));
1658 
1659     if (!ICVTrackingAA.isAssumedTracked())
1660       return indicatePessimisticFixpoint();
1661 
1662     for (InternalControlVar ICV : TrackableICVs) {
1663       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
1664       Optional<Value *> UniqueICVValue;
1665 
1666       auto CheckReturnInst = [&](Instruction &I) {
1667         Optional<Value *> NewReplVal =
1668             ICVTrackingAA.getReplacementValue(ICV, &I, A);
1669 
1670         // If we found a second ICV value there is no unique returned value.
1671         if (UniqueICVValue.hasValue() && UniqueICVValue != NewReplVal)
1672           return false;
1673 
1674         UniqueICVValue = NewReplVal;
1675 
1676         return true;
1677       };
1678 
1679       if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret},
1680                                      /* CheckBBLivenessOnly */ true))
1681         UniqueICVValue = nullptr;
1682 
1683       if (UniqueICVValue == ReplVal)
1684         continue;
1685 
1686       ReplVal = UniqueICVValue;
1687       Changed = ChangeStatus::CHANGED;
1688     }
1689 
1690     return Changed;
1691   }
1692 };
1693 
1694 struct AAICVTrackerCallSite : AAICVTracker {
1695   AAICVTrackerCallSite(const IRPosition &IRP, Attributor &A)
1696       : AAICVTracker(IRP, A) {}
1697 
1698   void initialize(Attributor &A) override {
1699     Function *F = getAnchorScope();
1700     if (!F || !A.isFunctionIPOAmendable(*F))
1701       indicatePessimisticFixpoint();
1702 
1703     // We only initialize this AA for getters, so we need to know which ICV it
1704     // gets.
1705     auto &OMPInfoCache = static_cast<OMPInformationCache &>(A.getInfoCache());
1706     for (InternalControlVar ICV : TrackableICVs) {
1707       auto ICVInfo = OMPInfoCache.ICVs[ICV];
1708       auto &Getter = OMPInfoCache.RFIs[ICVInfo.Getter];
1709       if (Getter.Declaration == getAssociatedFunction()) {
1710         AssociatedICV = ICVInfo.Kind;
1711         return;
1712       }
1713     }
1714 
1715     /// Unknown ICV.
1716     indicatePessimisticFixpoint();
1717   }
1718 
1719   ChangeStatus manifest(Attributor &A) override {
1720     if (!ReplVal.hasValue() || !ReplVal.getValue())
1721       return ChangeStatus::UNCHANGED;
1722 
1723     A.changeValueAfterManifest(*getCtxI(), **ReplVal);
1724     A.deleteAfterManifest(*getCtxI());
1725 
1726     return ChangeStatus::CHANGED;
1727   }
1728 
1729   // FIXME: come up with better string.
1730   const std::string getAsStr() const override { return "ICVTrackerCallSite"; }
1731 
1732   // FIXME: come up with some stats.
1733   void trackStatistics() const override {}
1734 
1735   InternalControlVar AssociatedICV;
1736   Optional<Value *> ReplVal;
1737 
1738   ChangeStatus updateImpl(Attributor &A) override {
1739     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1740         *this, IRPosition::function(*getAnchorScope()));
1741 
1742     // We don't have any information, so we assume it changes the ICV.
1743     if (!ICVTrackingAA.isAssumedTracked())
1744       return indicatePessimisticFixpoint();
1745 
1746     Optional<Value *> NewReplVal =
1747         ICVTrackingAA.getReplacementValue(AssociatedICV, getCtxI(), A);
1748 
1749     if (ReplVal == NewReplVal)
1750       return ChangeStatus::UNCHANGED;
1751 
1752     ReplVal = NewReplVal;
1753     return ChangeStatus::CHANGED;
1754   }
1755 
1756   // Return the value with which associated value can be replaced for specific
1757   // \p ICV.
1758   Optional<Value *>
1759   getUniqueReplacementValue(InternalControlVar ICV) const override {
1760     return ReplVal;
1761   }
1762 };
1763 
1764 struct AAICVTrackerCallSiteReturned : AAICVTracker {
1765   AAICVTrackerCallSiteReturned(const IRPosition &IRP, Attributor &A)
1766       : AAICVTracker(IRP, A) {}
1767 
1768   // FIXME: come up with better string.
1769   const std::string getAsStr() const override {
1770     return "ICVTrackerCallSiteReturned";
1771   }
1772 
1773   // FIXME: come up with some stats.
1774   void trackStatistics() const override {}
1775 
1776   /// We don't manifest anything for this AA.
1777   ChangeStatus manifest(Attributor &A) override {
1778     return ChangeStatus::UNCHANGED;
1779   }
1780 
1781   // Map of ICV to their values at specific program point.
1782   EnumeratedArray<Optional<Value *>, InternalControlVar,
1783                   InternalControlVar::ICV___last>
1784       ICVReplacementValuesMap;
1785 
1786   /// Return the value with which associated value can be replaced for specific
1787   /// \p ICV.
1788   Optional<Value *>
1789   getUniqueReplacementValue(InternalControlVar ICV) const override {
1790     return ICVReplacementValuesMap[ICV];
1791   }
1792 
1793   ChangeStatus updateImpl(Attributor &A) override {
1794     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1795     const auto &ICVTrackingAA = A.getAAFor<AAICVTracker>(
1796         *this, IRPosition::returned(*getAssociatedFunction()));
1797 
1798     // We don't have any information, so we assume it changes the ICV.
1799     if (!ICVTrackingAA.isAssumedTracked())
1800       return indicatePessimisticFixpoint();
1801 
1802     for (InternalControlVar ICV : TrackableICVs) {
1803       Optional<Value *> &ReplVal = ICVReplacementValuesMap[ICV];
1804       Optional<Value *> NewReplVal =
1805           ICVTrackingAA.getUniqueReplacementValue(ICV);
1806 
1807       if (ReplVal == NewReplVal)
1808         continue;
1809 
1810       ReplVal = NewReplVal;
1811       Changed = ChangeStatus::CHANGED;
1812     }
1813     return Changed;
1814   }
1815 };
1816 } // namespace
1817 
1818 const char AAICVTracker::ID = 0;
1819 
1820 AAICVTracker &AAICVTracker::createForPosition(const IRPosition &IRP,
1821                                               Attributor &A) {
1822   AAICVTracker *AA = nullptr;
1823   switch (IRP.getPositionKind()) {
1824   case IRPosition::IRP_INVALID:
1825   case IRPosition::IRP_FLOAT:
1826   case IRPosition::IRP_ARGUMENT:
1827   case IRPosition::IRP_CALL_SITE_ARGUMENT:
1828     llvm_unreachable("ICVTracker can only be created for function position!");
1829   case IRPosition::IRP_RETURNED:
1830     AA = new (A.Allocator) AAICVTrackerFunctionReturned(IRP, A);
1831     break;
1832   case IRPosition::IRP_CALL_SITE_RETURNED:
1833     AA = new (A.Allocator) AAICVTrackerCallSiteReturned(IRP, A);
1834     break;
1835   case IRPosition::IRP_CALL_SITE:
1836     AA = new (A.Allocator) AAICVTrackerCallSite(IRP, A);
1837     break;
1838   case IRPosition::IRP_FUNCTION:
1839     AA = new (A.Allocator) AAICVTrackerFunction(IRP, A);
1840     break;
1841   }
1842 
1843   return *AA;
1844 }
1845 
1846 PreservedAnalyses OpenMPOptPass::run(LazyCallGraph::SCC &C,
1847                                      CGSCCAnalysisManager &AM,
1848                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
1849   if (!containsOpenMP(*C.begin()->getFunction().getParent(), OMPInModule))
1850     return PreservedAnalyses::all();
1851 
1852   if (DisableOpenMPOptimizations)
1853     return PreservedAnalyses::all();
1854 
1855   SmallVector<Function *, 16> SCC;
1856   // If there are kernels in the module, we have to run on all SCC's.
1857   bool SCCIsInteresting = !OMPInModule.getKernels().empty();
1858   for (LazyCallGraph::Node &N : C) {
1859     Function *Fn = &N.getFunction();
1860     SCC.push_back(Fn);
1861 
1862     // Do we already know that the SCC contains kernels,
1863     // or that OpenMP functions are called from this SCC?
1864     if (SCCIsInteresting)
1865       continue;
1866     // If not, let's check that.
1867     SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn);
1868   }
1869 
1870   if (!SCCIsInteresting || SCC.empty())
1871     return PreservedAnalyses::all();
1872 
1873   FunctionAnalysisManager &FAM =
1874       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1875 
1876   AnalysisGetter AG(FAM);
1877 
1878   auto OREGetter = [&FAM](Function *F) -> OptimizationRemarkEmitter & {
1879     return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
1880   };
1881 
1882   CallGraphUpdater CGUpdater;
1883   CGUpdater.initialize(CG, C, AM, UR);
1884 
1885   SetVector<Function *> Functions(SCC.begin(), SCC.end());
1886   BumpPtrAllocator Allocator;
1887   OMPInformationCache InfoCache(*(Functions.back()->getParent()), AG, Allocator,
1888                                 /*CGSCC*/ Functions, OMPInModule.getKernels());
1889 
1890   Attributor A(Functions, InfoCache, CGUpdater);
1891 
1892   OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
1893   bool Changed = OMPOpt.run();
1894   if (Changed)
1895     return PreservedAnalyses::none();
1896 
1897   return PreservedAnalyses::all();
1898 }
1899 
1900 namespace {
1901 
1902 struct OpenMPOptLegacyPass : public CallGraphSCCPass {
1903   CallGraphUpdater CGUpdater;
1904   OpenMPInModule OMPInModule;
1905   static char ID;
1906 
1907   OpenMPOptLegacyPass() : CallGraphSCCPass(ID) {
1908     initializeOpenMPOptLegacyPassPass(*PassRegistry::getPassRegistry());
1909   }
1910 
1911   void getAnalysisUsage(AnalysisUsage &AU) const override {
1912     CallGraphSCCPass::getAnalysisUsage(AU);
1913   }
1914 
1915   bool doInitialization(CallGraph &CG) override {
1916     // Disable the pass if there is no OpenMP (runtime call) in the module.
1917     containsOpenMP(CG.getModule(), OMPInModule);
1918     return false;
1919   }
1920 
1921   bool runOnSCC(CallGraphSCC &CGSCC) override {
1922     if (!containsOpenMP(CGSCC.getCallGraph().getModule(), OMPInModule))
1923       return false;
1924     if (DisableOpenMPOptimizations || skipSCC(CGSCC))
1925       return false;
1926 
1927     SmallVector<Function *, 16> SCC;
1928     // If there are kernels in the module, we have to run on all SCC's.
1929     bool SCCIsInteresting = !OMPInModule.getKernels().empty();
1930     for (CallGraphNode *CGN : CGSCC) {
1931       Function *Fn = CGN->getFunction();
1932       if (!Fn || Fn->isDeclaration())
1933         continue;
1934       SCC.push_back(Fn);
1935 
1936       // Do we already know that the SCC contains kernels,
1937       // or that OpenMP functions are called from this SCC?
1938       if (SCCIsInteresting)
1939         continue;
1940       // If not, let's check that.
1941       SCCIsInteresting |= OMPInModule.containsOMPRuntimeCalls(Fn);
1942     }
1943 
1944     if (!SCCIsInteresting || SCC.empty())
1945       return false;
1946 
1947     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1948     CGUpdater.initialize(CG, CGSCC);
1949 
1950     // Maintain a map of functions to avoid rebuilding the ORE
1951     DenseMap<Function *, std::unique_ptr<OptimizationRemarkEmitter>> OREMap;
1952     auto OREGetter = [&OREMap](Function *F) -> OptimizationRemarkEmitter & {
1953       std::unique_ptr<OptimizationRemarkEmitter> &ORE = OREMap[F];
1954       if (!ORE)
1955         ORE = std::make_unique<OptimizationRemarkEmitter>(F);
1956       return *ORE;
1957     };
1958 
1959     AnalysisGetter AG;
1960     SetVector<Function *> Functions(SCC.begin(), SCC.end());
1961     BumpPtrAllocator Allocator;
1962     OMPInformationCache InfoCache(
1963         *(Functions.back()->getParent()), AG, Allocator,
1964         /*CGSCC*/ Functions, OMPInModule.getKernels());
1965 
1966     Attributor A(Functions, InfoCache, CGUpdater);
1967 
1968     OpenMPOpt OMPOpt(SCC, CGUpdater, OREGetter, InfoCache, A);
1969     return OMPOpt.run();
1970   }
1971 
1972   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
1973 };
1974 
1975 } // end anonymous namespace
1976 
1977 void OpenMPInModule::identifyKernels(Module &M) {
1978 
1979   NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1980   if (!MD)
1981     return;
1982 
1983   for (auto *Op : MD->operands()) {
1984     if (Op->getNumOperands() < 2)
1985       continue;
1986     MDString *KindID = dyn_cast<MDString>(Op->getOperand(1));
1987     if (!KindID || KindID->getString() != "kernel")
1988       continue;
1989 
1990     Function *KernelFn =
1991         mdconst::dyn_extract_or_null<Function>(Op->getOperand(0));
1992     if (!KernelFn)
1993       continue;
1994 
1995     ++NumOpenMPTargetRegionKernels;
1996 
1997     Kernels.insert(KernelFn);
1998   }
1999 }
2000 
2001 bool llvm::omp::containsOpenMP(Module &M, OpenMPInModule &OMPInModule) {
2002   if (OMPInModule.isKnown())
2003     return OMPInModule;
2004 
2005   auto RecordFunctionsContainingUsesOf = [&](Function *F) {
2006     for (User *U : F->users())
2007       if (auto *I = dyn_cast<Instruction>(U))
2008         OMPInModule.FuncsWithOMPRuntimeCalls.insert(I->getFunction());
2009   };
2010 
2011   // MSVC doesn't like long if-else chains for some reason and instead just
2012   // issues an error. Work around it..
2013   do {
2014 #define OMP_RTL(_Enum, _Name, ...)                                             \
2015   if (Function *F = M.getFunction(_Name)) {                                    \
2016     RecordFunctionsContainingUsesOf(F);                                        \
2017     OMPInModule = true;                                                        \
2018   }
2019 #include "llvm/Frontend/OpenMP/OMPKinds.def"
2020   } while (false);
2021 
2022   // Identify kernels once. TODO: We should split the OMPInformationCache into a
2023   // module and an SCC part. The kernel information, among other things, could
2024   // go into the module part.
2025   if (OMPInModule.isKnown() && OMPInModule) {
2026     OMPInModule.identifyKernels(M);
2027     return true;
2028   }
2029 
2030   return OMPInModule = false;
2031 }
2032 
2033 char OpenMPOptLegacyPass::ID = 0;
2034 
2035 INITIALIZE_PASS_BEGIN(OpenMPOptLegacyPass, "openmpopt",
2036                       "OpenMP specific optimizations", false, false)
2037 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2038 INITIALIZE_PASS_END(OpenMPOptLegacyPass, "openmpopt",
2039                     "OpenMP specific optimizations", false, false)
2040 
2041 Pass *llvm::createOpenMPOptLegacyPass() { return new OpenMPOptLegacyPass(); }
2042