10b57cec5SDimitry Andric //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric /// \file This pass replaces accesses to kernel arguments with loads from
100b57cec5SDimitry Andric /// offsets from the kernarg base pointer.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #include "AMDGPU.h"
15e8d8bef9SDimitry Andric #include "GCNSubtarget.h"
160b57cec5SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
17fe6060f1SDimitry Andric #include "llvm/IR/IRBuilder.h"
18c9157d92SDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h"
190b57cec5SDimitry Andric #include "llvm/IR/MDBuilder.h"
20e8d8bef9SDimitry Andric #include "llvm/Target/TargetMachine.h"
21c9157d92SDimitry Andric 
220b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric using namespace llvm;
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric namespace {
270b57cec5SDimitry Andric 
28c9157d92SDimitry Andric class PreloadKernelArgInfo {
29c9157d92SDimitry Andric private:
30c9157d92SDimitry Andric   Function &F;
31c9157d92SDimitry Andric   const GCNSubtarget &ST;
32c9157d92SDimitry Andric   unsigned NumFreeUserSGPRs;
33c9157d92SDimitry Andric 
34c9157d92SDimitry Andric public:
35c9157d92SDimitry Andric   SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
36c9157d92SDimitry Andric 
PreloadKernelArgInfo(Function & F,const GCNSubtarget & ST)37c9157d92SDimitry Andric   PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
38c9157d92SDimitry Andric     setInitialFreeUserSGPRsCount();
39c9157d92SDimitry Andric   }
40c9157d92SDimitry Andric 
41c9157d92SDimitry Andric   // Returns the maximum number of user SGPRs that we have available to preload
42c9157d92SDimitry Andric   // arguments.
setInitialFreeUserSGPRsCount()43c9157d92SDimitry Andric   void setInitialFreeUserSGPRsCount() {
44c9157d92SDimitry Andric     const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
45c9157d92SDimitry Andric     GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
46c9157d92SDimitry Andric 
47c9157d92SDimitry Andric     NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs();
48c9157d92SDimitry Andric   }
49c9157d92SDimitry Andric 
tryAllocPreloadSGPRs(unsigned AllocSize,uint64_t ArgOffset,uint64_t LastExplicitArgOffset)50c9157d92SDimitry Andric   bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
51c9157d92SDimitry Andric                             uint64_t LastExplicitArgOffset) {
52c9157d92SDimitry Andric     //  Check if this argument may be loaded into the same register as the
53c9157d92SDimitry Andric     //  previous argument.
54c9157d92SDimitry Andric     if (!isAligned(Align(4), ArgOffset) && AllocSize < 4)
55c9157d92SDimitry Andric       return true;
56c9157d92SDimitry Andric 
57c9157d92SDimitry Andric     // Pad SGPRs for kernarg alignment.
58c9157d92SDimitry Andric     unsigned Padding = ArgOffset - LastExplicitArgOffset;
59c9157d92SDimitry Andric     unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
60c9157d92SDimitry Andric     unsigned NumPreloadSGPRs = alignTo(AllocSize, 4) / 4;
61c9157d92SDimitry Andric     if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
62c9157d92SDimitry Andric       return false;
63c9157d92SDimitry Andric 
64c9157d92SDimitry Andric     NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
65c9157d92SDimitry Andric     return true;
66c9157d92SDimitry Andric   }
67c9157d92SDimitry Andric };
68c9157d92SDimitry Andric 
690b57cec5SDimitry Andric class AMDGPULowerKernelArguments : public FunctionPass {
700b57cec5SDimitry Andric public:
710b57cec5SDimitry Andric   static char ID;
720b57cec5SDimitry Andric 
AMDGPULowerKernelArguments()730b57cec5SDimitry Andric   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
740b57cec5SDimitry Andric 
750b57cec5SDimitry Andric   bool runOnFunction(Function &F) override;
760b57cec5SDimitry Andric 
getAnalysisUsage(AnalysisUsage & AU) const770b57cec5SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
780b57cec5SDimitry Andric     AU.addRequired<TargetPassConfig>();
790b57cec5SDimitry Andric     AU.setPreservesAll();
800b57cec5SDimitry Andric  }
810b57cec5SDimitry Andric };
820b57cec5SDimitry Andric 
830b57cec5SDimitry Andric } // end anonymous namespace
840b57cec5SDimitry Andric 
855ffd83dbSDimitry Andric // skip allocas
getInsertPt(BasicBlock & BB)865ffd83dbSDimitry Andric static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
875ffd83dbSDimitry Andric   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
885ffd83dbSDimitry Andric   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
895ffd83dbSDimitry Andric     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
905ffd83dbSDimitry Andric 
915ffd83dbSDimitry Andric     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
925ffd83dbSDimitry Andric     // so loads will need to be inserted before it.
935ffd83dbSDimitry Andric     if (!AI || !AI->isStaticAlloca())
945ffd83dbSDimitry Andric       break;
955ffd83dbSDimitry Andric   }
965ffd83dbSDimitry Andric 
975ffd83dbSDimitry Andric   return InsPt;
985ffd83dbSDimitry Andric }
995ffd83dbSDimitry Andric 
lowerKernelArguments(Function & F,const TargetMachine & TM)100c9157d92SDimitry Andric static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
1010b57cec5SDimitry Andric   CallingConv::ID CC = F.getCallingConv();
1020b57cec5SDimitry Andric   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
1030b57cec5SDimitry Andric     return false;
1040b57cec5SDimitry Andric 
1050b57cec5SDimitry Andric   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1060b57cec5SDimitry Andric   LLVMContext &Ctx = F.getParent()->getContext();
1070b57cec5SDimitry Andric   const DataLayout &DL = F.getParent()->getDataLayout();
1080b57cec5SDimitry Andric   BasicBlock &EntryBlock = *F.begin();
109*a58f00eaSDimitry Andric   IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock));
1100b57cec5SDimitry Andric 
1118bcb0991SDimitry Andric   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
112fe013be4SDimitry Andric   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
1130b57cec5SDimitry Andric 
1148bcb0991SDimitry Andric   Align MaxAlign;
11581ad6265SDimitry Andric   // FIXME: Alignment is broken with explicit arg offset.;
1160b57cec5SDimitry Andric   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
1170b57cec5SDimitry Andric   if (TotalKernArgSize == 0)
1180b57cec5SDimitry Andric     return false;
1190b57cec5SDimitry Andric 
1200b57cec5SDimitry Andric   CallInst *KernArgSegment =
1210b57cec5SDimitry Andric       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
1220b57cec5SDimitry Andric                               nullptr, F.getName() + ".kernarg.segment");
1230b57cec5SDimitry Andric 
124349cc55cSDimitry Andric   KernArgSegment->addRetAttr(Attribute::NonNull);
125349cc55cSDimitry Andric   KernArgSegment->addRetAttr(
1260b57cec5SDimitry Andric       Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric   uint64_t ExplicitArgOffset = 0;
129c9157d92SDimitry Andric   // Preloaded kernel arguments must be sequential.
130c9157d92SDimitry Andric   bool InPreloadSequence = true;
131c9157d92SDimitry Andric   PreloadKernelArgInfo PreloadInfo(F, ST);
1320b57cec5SDimitry Andric 
1330b57cec5SDimitry Andric   for (Argument &Arg : F.args()) {
134e8d8bef9SDimitry Andric     const bool IsByRef = Arg.hasByRefAttr();
135e8d8bef9SDimitry Andric     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136bdd1243dSDimitry Andric     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
13781ad6265SDimitry Andric     Align ABITypeAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
138e8d8bef9SDimitry Andric 
139e8d8bef9SDimitry Andric     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
140e8d8bef9SDimitry Andric     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
1410b57cec5SDimitry Andric 
1428bcb0991SDimitry Andric     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
143c9157d92SDimitry Andric     uint64_t LastExplicitArgOffset = ExplicitArgOffset;
1448bcb0991SDimitry Andric     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
1450b57cec5SDimitry Andric 
146c9157d92SDimitry Andric     // Try to preload this argument into user SGPRs.
147c9157d92SDimitry Andric     if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148c9157d92SDimitry Andric         !ST.needsKernargPreloadBackwardsCompatibility() &&
149c9157d92SDimitry Andric         !Arg.getType()->isAggregateType())
150c9157d92SDimitry Andric       if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
151c9157d92SDimitry Andric                                            LastExplicitArgOffset))
152c9157d92SDimitry Andric         continue;
153c9157d92SDimitry Andric 
154c9157d92SDimitry Andric     InPreloadSequence = false;
155c9157d92SDimitry Andric 
1560b57cec5SDimitry Andric     if (Arg.use_empty())
1570b57cec5SDimitry Andric       continue;
1580b57cec5SDimitry Andric 
159e8d8bef9SDimitry Andric     // If this is byval, the loads are already explicit in the function. We just
160e8d8bef9SDimitry Andric     // need to rewrite the pointer values.
161e8d8bef9SDimitry Andric     if (IsByRef) {
162e8d8bef9SDimitry Andric       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
163e8d8bef9SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
164e8d8bef9SDimitry Andric           Arg.getName() + ".byval.kernarg.offset");
165e8d8bef9SDimitry Andric 
166fe013be4SDimitry Andric       Value *CastOffsetPtr =
167fe013be4SDimitry Andric           Builder.CreateAddrSpaceCast(ArgOffsetPtr, Arg.getType());
168e8d8bef9SDimitry Andric       Arg.replaceAllUsesWith(CastOffsetPtr);
169e8d8bef9SDimitry Andric       continue;
170e8d8bef9SDimitry Andric     }
171e8d8bef9SDimitry Andric 
1720b57cec5SDimitry Andric     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
1730b57cec5SDimitry Andric       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
1740b57cec5SDimitry Andric       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
1750b57cec5SDimitry Andric       // can't represent this with range metadata because it's only allowed for
1760b57cec5SDimitry Andric       // integer types.
1770b57cec5SDimitry Andric       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1780b57cec5SDimitry Andric            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
1790b57cec5SDimitry Andric           !ST.hasUsableDSOffset())
1800b57cec5SDimitry Andric         continue;
1810b57cec5SDimitry Andric 
1820b57cec5SDimitry Andric       // FIXME: We can replace this with equivalent alias.scope/noalias
1830b57cec5SDimitry Andric       // metadata, but this appears to be a lot of work.
1840b57cec5SDimitry Andric       if (Arg.hasNoAliasAttr())
1850b57cec5SDimitry Andric         continue;
1860b57cec5SDimitry Andric     }
1870b57cec5SDimitry Andric 
1885ffd83dbSDimitry Andric     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
1890b57cec5SDimitry Andric     bool IsV3 = VT && VT->getNumElements() == 3;
1900b57cec5SDimitry Andric     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
1910b57cec5SDimitry Andric 
1920b57cec5SDimitry Andric     VectorType *V4Ty = nullptr;
1930b57cec5SDimitry Andric 
1940b57cec5SDimitry Andric     int64_t AlignDownOffset = alignDown(EltOffset, 4);
1950b57cec5SDimitry Andric     int64_t OffsetDiff = EltOffset - AlignDownOffset;
1968bcb0991SDimitry Andric     Align AdjustedAlign = commonAlignment(
1978bcb0991SDimitry Andric         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
1980b57cec5SDimitry Andric 
1990b57cec5SDimitry Andric     Value *ArgPtr;
2000b57cec5SDimitry Andric     Type *AdjustedArgTy;
2010b57cec5SDimitry Andric     if (DoShiftOpt) { // FIXME: Handle aggregate types
2020b57cec5SDimitry Andric       // Since we don't have sub-dword scalar loads, avoid doing an extload by
2030b57cec5SDimitry Andric       // loading earlier than the argument address, and extracting the relevant
2040b57cec5SDimitry Andric       // bits.
205*a58f00eaSDimitry Andric       // TODO: Update this for GFX12 which does have scalar sub-dword loads.
2060b57cec5SDimitry Andric       //
2070b57cec5SDimitry Andric       // Additionally widen any sub-dword load to i32 even if suitably aligned,
2080b57cec5SDimitry Andric       // so that CSE between different argument loads works easily.
2090b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
2100b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
2110b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset.align.down");
2120b57cec5SDimitry Andric       AdjustedArgTy = Builder.getInt32Ty();
2130b57cec5SDimitry Andric     } else {
2140b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
2150b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
2160b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset");
2170b57cec5SDimitry Andric       AdjustedArgTy = ArgTy;
2180b57cec5SDimitry Andric     }
2190b57cec5SDimitry Andric 
2200b57cec5SDimitry Andric     if (IsV3 && Size >= 32) {
2215ffd83dbSDimitry Andric       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
2220b57cec5SDimitry Andric       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
2230b57cec5SDimitry Andric       AdjustedArgTy = V4Ty;
2240b57cec5SDimitry Andric     }
2250b57cec5SDimitry Andric 
2260b57cec5SDimitry Andric     LoadInst *Load =
2275ffd83dbSDimitry Andric         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
2280b57cec5SDimitry Andric     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
2290b57cec5SDimitry Andric 
2300b57cec5SDimitry Andric     MDBuilder MDB(Ctx);
2310b57cec5SDimitry Andric 
2320b57cec5SDimitry Andric     if (isa<PointerType>(ArgTy)) {
2330b57cec5SDimitry Andric       if (Arg.hasNonNullAttr())
2340b57cec5SDimitry Andric         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
2350b57cec5SDimitry Andric 
2360b57cec5SDimitry Andric       uint64_t DerefBytes = Arg.getDereferenceableBytes();
2370b57cec5SDimitry Andric       if (DerefBytes != 0) {
2380b57cec5SDimitry Andric         Load->setMetadata(
2390b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable,
2400b57cec5SDimitry Andric           MDNode::get(Ctx,
2410b57cec5SDimitry Andric                       MDB.createConstant(
2420b57cec5SDimitry Andric                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
2430b57cec5SDimitry Andric       }
2440b57cec5SDimitry Andric 
2450b57cec5SDimitry Andric       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
2460b57cec5SDimitry Andric       if (DerefOrNullBytes != 0) {
2470b57cec5SDimitry Andric         Load->setMetadata(
2480b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable_or_null,
2490b57cec5SDimitry Andric           MDNode::get(Ctx,
2500b57cec5SDimitry Andric                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
2510b57cec5SDimitry Andric                                                           DerefOrNullBytes))));
2520b57cec5SDimitry Andric       }
2530b57cec5SDimitry Andric 
254bdd1243dSDimitry Andric       if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
2550b57cec5SDimitry Andric         Load->setMetadata(
2560b57cec5SDimitry Andric             LLVMContext::MD_align,
257bdd1243dSDimitry Andric             MDNode::get(Ctx, MDB.createConstant(ConstantInt::get(
258bdd1243dSDimitry Andric                                  Builder.getInt64Ty(), ParamAlign->value()))));
2590b57cec5SDimitry Andric       }
2600b57cec5SDimitry Andric     }
2610b57cec5SDimitry Andric 
2620b57cec5SDimitry Andric     // TODO: Convert noalias arg to !noalias
2630b57cec5SDimitry Andric 
2640b57cec5SDimitry Andric     if (DoShiftOpt) {
2650b57cec5SDimitry Andric       Value *ExtractBits = OffsetDiff == 0 ?
2660b57cec5SDimitry Andric         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
2670b57cec5SDimitry Andric 
2680b57cec5SDimitry Andric       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
2690b57cec5SDimitry Andric       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
2700b57cec5SDimitry Andric       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
2710b57cec5SDimitry Andric                                             Arg.getName() + ".load");
2720b57cec5SDimitry Andric       Arg.replaceAllUsesWith(NewVal);
2730b57cec5SDimitry Andric     } else if (IsV3) {
274e8d8bef9SDimitry Andric       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
2750b57cec5SDimitry Andric                                                 Arg.getName() + ".load");
2760b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Shuf);
2770b57cec5SDimitry Andric     } else {
2780b57cec5SDimitry Andric       Load->setName(Arg.getName() + ".load");
2790b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Load);
2800b57cec5SDimitry Andric     }
2810b57cec5SDimitry Andric   }
2820b57cec5SDimitry Andric 
283349cc55cSDimitry Andric   KernArgSegment->addRetAttr(
2840b57cec5SDimitry Andric       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
2850b57cec5SDimitry Andric 
2860b57cec5SDimitry Andric   return true;
2870b57cec5SDimitry Andric }
2880b57cec5SDimitry Andric 
runOnFunction(Function & F)289c9157d92SDimitry Andric bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
290c9157d92SDimitry Andric   auto &TPC = getAnalysis<TargetPassConfig>();
291c9157d92SDimitry Andric   const TargetMachine &TM = TPC.getTM<TargetMachine>();
292c9157d92SDimitry Andric   return lowerKernelArguments(F, TM);
293c9157d92SDimitry Andric }
294c9157d92SDimitry Andric 
2950b57cec5SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
2960b57cec5SDimitry Andric                       "AMDGPU Lower Kernel Arguments", false, false)
2970b57cec5SDimitry Andric INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
2980b57cec5SDimitry Andric                     false, false)
2990b57cec5SDimitry Andric 
3000b57cec5SDimitry Andric char AMDGPULowerKernelArguments::ID = 0;
3010b57cec5SDimitry Andric 
createAMDGPULowerKernelArgumentsPass()3020b57cec5SDimitry Andric FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
3030b57cec5SDimitry Andric   return new AMDGPULowerKernelArguments();
3040b57cec5SDimitry Andric }
305c9157d92SDimitry Andric 
306c9157d92SDimitry Andric PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)307c9157d92SDimitry Andric AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
308c9157d92SDimitry Andric   bool Changed = lowerKernelArguments(F, TM);
309c9157d92SDimitry Andric   if (Changed) {
310c9157d92SDimitry Andric     // TODO: Preserves a lot more.
311c9157d92SDimitry Andric     PreservedAnalyses PA;
312c9157d92SDimitry Andric     PA.preserveSet<CFGAnalyses>();
313c9157d92SDimitry Andric     return PA;
314c9157d92SDimitry Andric   }
315c9157d92SDimitry Andric 
316c9157d92SDimitry Andric   return PreservedAnalyses::all();
317c9157d92SDimitry Andric }
318