1 //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass replaces accesses to kernel arguments with loads from
10 /// offsets from the kernarg base pointer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUSubtarget.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/CodeGen/Passes.h"
20 #include "llvm/CodeGen/TargetPassConfig.h"
21 #include "llvm/IR/Attributes.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/Casting.h"
38 
39 #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
40 
41 using namespace llvm;
42 
43 namespace {
44 
45 class AMDGPULowerKernelArguments : public FunctionPass{
46 public:
47   static char ID;
48 
49   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
50 
51   bool runOnFunction(Function &F) override;
52 
53   void getAnalysisUsage(AnalysisUsage &AU) const override {
54     AU.addRequired<TargetPassConfig>();
55     AU.setPreservesAll();
56  }
57 };
58 
59 } // end anonymous namespace
60 
61 bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
62   CallingConv::ID CC = F.getCallingConv();
63   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
64     return false;
65 
66   auto &TPC = getAnalysis<TargetPassConfig>();
67 
68   const TargetMachine &TM = TPC.getTM<TargetMachine>();
69   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
70   LLVMContext &Ctx = F.getParent()->getContext();
71   const DataLayout &DL = F.getParent()->getDataLayout();
72   BasicBlock &EntryBlock = *F.begin();
73   IRBuilder<> Builder(&*EntryBlock.begin());
74 
75   const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
76   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
77 
78   unsigned MaxAlign;
79   // FIXME: Alignment is broken broken with explicit arg offset.;
80   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
81   if (TotalKernArgSize == 0)
82     return false;
83 
84   CallInst *KernArgSegment =
85       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
86                               nullptr, F.getName() + ".kernarg.segment");
87 
88   KernArgSegment->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
89   KernArgSegment->addAttribute(AttributeList::ReturnIndex,
90     Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
91 
92   unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
93   uint64_t ExplicitArgOffset = 0;
94 
95   for (Argument &Arg : F.args()) {
96     Type *ArgTy = Arg.getType();
97     unsigned Align = DL.getABITypeAlignment(ArgTy);
98     unsigned Size = DL.getTypeSizeInBits(ArgTy);
99     unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
100 
101     uint64_t EltOffset = alignTo(ExplicitArgOffset, Align) + BaseOffset;
102     ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
103 
104     if (Arg.use_empty())
105       continue;
106 
107     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
108       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
109       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
110       // can't represent this with range metadata because it's only allowed for
111       // integer types.
112       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
113            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
114           ST.getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
115         continue;
116 
117       // FIXME: We can replace this with equivalent alias.scope/noalias
118       // metadata, but this appears to be a lot of work.
119       if (Arg.hasNoAliasAttr())
120         continue;
121     }
122 
123     VectorType *VT = dyn_cast<VectorType>(ArgTy);
124     bool IsV3 = VT && VT->getNumElements() == 3;
125     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
126 
127     VectorType *V4Ty = nullptr;
128 
129     int64_t AlignDownOffset = alignDown(EltOffset, 4);
130     int64_t OffsetDiff = EltOffset - AlignDownOffset;
131     unsigned AdjustedAlign = MinAlign(DoShiftOpt ? AlignDownOffset : EltOffset,
132                                       KernArgBaseAlign);
133 
134     Value *ArgPtr;
135     if (DoShiftOpt) { // FIXME: Handle aggregate types
136       // Since we don't have sub-dword scalar loads, avoid doing an extload by
137       // loading earlier than the argument address, and extracting the relevant
138       // bits.
139       //
140       // Additionally widen any sub-dword load to i32 even if suitably aligned,
141       // so that CSE between different argument loads works easily.
142 
143       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
144         KernArgSegment,
145         AlignDownOffset,
146         Arg.getName() + ".kernarg.offset.align.down");
147       ArgPtr = Builder.CreateBitCast(ArgPtr,
148                                      Builder.getInt32Ty()->getPointerTo(AS),
149                                      ArgPtr->getName() + ".cast");
150     } else {
151       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
152         KernArgSegment,
153         EltOffset,
154         Arg.getName() + ".kernarg.offset");
155       ArgPtr = Builder.CreateBitCast(ArgPtr, ArgTy->getPointerTo(AS),
156                                      ArgPtr->getName() + ".cast");
157     }
158 
159     if (IsV3 && Size >= 32) {
160       V4Ty = VectorType::get(VT->getVectorElementType(), 4);
161       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
162       ArgPtr = Builder.CreateBitCast(ArgPtr, V4Ty->getPointerTo(AS));
163     }
164 
165     LoadInst *Load = Builder.CreateAlignedLoad(ArgPtr, AdjustedAlign);
166     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
167 
168     MDBuilder MDB(Ctx);
169 
170     if (isa<PointerType>(ArgTy)) {
171       if (Arg.hasNonNullAttr())
172         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
173 
174       uint64_t DerefBytes = Arg.getDereferenceableBytes();
175       if (DerefBytes != 0) {
176         Load->setMetadata(
177           LLVMContext::MD_dereferenceable,
178           MDNode::get(Ctx,
179                       MDB.createConstant(
180                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
181       }
182 
183       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
184       if (DerefOrNullBytes != 0) {
185         Load->setMetadata(
186           LLVMContext::MD_dereferenceable_or_null,
187           MDNode::get(Ctx,
188                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
189                                                           DerefOrNullBytes))));
190       }
191 
192       unsigned ParamAlign = Arg.getParamAlignment();
193       if (ParamAlign != 0) {
194         Load->setMetadata(
195           LLVMContext::MD_align,
196           MDNode::get(Ctx,
197                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
198                                                           ParamAlign))));
199       }
200     }
201 
202     // TODO: Convert noalias arg to !noalias
203 
204     if (DoShiftOpt) {
205       Value *ExtractBits = OffsetDiff == 0 ?
206         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
207 
208       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
209       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
210       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
211                                             Arg.getName() + ".load");
212       Arg.replaceAllUsesWith(NewVal);
213     } else if (IsV3) {
214       Value *Shuf = Builder.CreateShuffleVector(Load, UndefValue::get(V4Ty),
215                                                 {0, 1, 2},
216                                                 Arg.getName() + ".load");
217       Arg.replaceAllUsesWith(Shuf);
218     } else {
219       Load->setName(Arg.getName() + ".load");
220       Arg.replaceAllUsesWith(Load);
221     }
222   }
223 
224   KernArgSegment->addAttribute(
225     AttributeList::ReturnIndex,
226     Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
227 
228   return true;
229 }
230 
231 INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
232                       "AMDGPU Lower Kernel Arguments", false, false)
233 INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
234                     false, false)
235 
236 char AMDGPULowerKernelArguments::ID = 0;
237 
238 FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
239   return new AMDGPULowerKernelArguments();
240 }
241