1 //===-- NVPTXTargetMachine.cpp - Define TargetMachine for NVPTX -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Top-level implementation for the NVPTX target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "NVPTXTargetMachine.h" 15 #include "MCTargetDesc/NVPTXMCAsmInfo.h" 16 #include "NVPTX.h" 17 #include "NVPTXAllocaHoisting.h" 18 #include "NVPTXLowerAggrCopies.h" 19 #include "NVPTXTargetObjectFile.h" 20 #include "NVPTXTargetTransformInfo.h" 21 #include "llvm/Analysis/Passes.h" 22 #include "llvm/CodeGen/AsmPrinter.h" 23 #include "llvm/CodeGen/MachineFunctionAnalysis.h" 24 #include "llvm/CodeGen/MachineModuleInfo.h" 25 #include "llvm/CodeGen/Passes.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/IRPrintingPasses.h" 28 #include "llvm/IR/LegacyPassManager.h" 29 #include "llvm/IR/Verifier.h" 30 #include "llvm/MC/MCAsmInfo.h" 31 #include "llvm/MC/MCInstrInfo.h" 32 #include "llvm/MC/MCStreamer.h" 33 #include "llvm/MC/MCSubtargetInfo.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/FormattedStream.h" 37 #include "llvm/Support/TargetRegistry.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetInstrInfo.h" 40 #include "llvm/Target/TargetLowering.h" 41 #include "llvm/Target/TargetLoweringObjectFile.h" 42 #include "llvm/Target/TargetMachine.h" 43 #include "llvm/Target/TargetOptions.h" 44 #include "llvm/Target/TargetRegisterInfo.h" 45 #include "llvm/Target/TargetSubtargetInfo.h" 46 #include "llvm/Transforms/Scalar.h" 47 #include "llvm/Transforms/Scalar/GVN.h" 48 49 using namespace llvm; 50 51 static cl::opt<bool> UseInferAddressSpaces( 52 "nvptx-use-infer-addrspace", cl::init(false), cl::Hidden, 53 cl::desc("Optimize address spaces using NVPTXInferAddressSpaces instead of " 54 "NVPTXFavorNonGenericAddrSpaces")); 55 56 namespace llvm { 57 void initializeNVVMReflectPass(PassRegistry&); 58 void initializeGenericToNVVMPass(PassRegistry&); 59 void initializeNVPTXAllocaHoistingPass(PassRegistry &); 60 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&); 61 void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &); 62 void initializeNVPTXInferAddressSpacesPass(PassRegistry &); 63 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &); 64 void initializeNVPTXLowerKernelArgsPass(PassRegistry &); 65 void initializeNVPTXLowerAllocaPass(PassRegistry &); 66 } 67 68 extern "C" void LLVMInitializeNVPTXTarget() { 69 // Register the target. 70 RegisterTargetMachine<NVPTXTargetMachine32> X(TheNVPTXTarget32); 71 RegisterTargetMachine<NVPTXTargetMachine64> Y(TheNVPTXTarget64); 72 73 // FIXME: This pass is really intended to be invoked during IR optimization, 74 // but it's very NVPTX-specific. 75 PassRegistry &PR = *PassRegistry::getPassRegistry(); 76 initializeNVVMReflectPass(PR); 77 initializeGenericToNVVMPass(PR); 78 initializeNVPTXAllocaHoistingPass(PR); 79 initializeNVPTXAssignValidGlobalNamesPass(PR); 80 initializeNVPTXFavorNonGenericAddrSpacesPass(PR); 81 initializeNVPTXInferAddressSpacesPass(PR); 82 initializeNVPTXLowerKernelArgsPass(PR); 83 initializeNVPTXLowerAllocaPass(PR); 84 initializeNVPTXLowerAggrCopiesPass(PR); 85 } 86 87 static std::string computeDataLayout(bool is64Bit) { 88 std::string Ret = "e"; 89 90 if (!is64Bit) 91 Ret += "-p:32:32"; 92 93 Ret += "-i64:64-v16:16-v32:32-n16:32:64"; 94 95 return Ret; 96 } 97 98 NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT, 99 StringRef CPU, StringRef FS, 100 const TargetOptions &Options, 101 Reloc::Model RM, CodeModel::Model CM, 102 CodeGenOpt::Level OL, bool is64bit) 103 : LLVMTargetMachine(T, computeDataLayout(is64bit), TT, CPU, FS, Options, RM, 104 CM, OL), 105 is64bit(is64bit), TLOF(make_unique<NVPTXTargetObjectFile>()), 106 Subtarget(TT, CPU, FS, *this) { 107 if (TT.getOS() == Triple::NVCL) 108 drvInterface = NVPTX::NVCL; 109 else 110 drvInterface = NVPTX::CUDA; 111 initAsmInfo(); 112 } 113 114 NVPTXTargetMachine::~NVPTXTargetMachine() {} 115 116 void NVPTXTargetMachine32::anchor() {} 117 118 NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT, 119 StringRef CPU, StringRef FS, 120 const TargetOptions &Options, 121 Reloc::Model RM, CodeModel::Model CM, 122 CodeGenOpt::Level OL) 123 : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 124 125 void NVPTXTargetMachine64::anchor() {} 126 127 NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT, 128 StringRef CPU, StringRef FS, 129 const TargetOptions &Options, 130 Reloc::Model RM, CodeModel::Model CM, 131 CodeGenOpt::Level OL) 132 : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 133 134 namespace { 135 class NVPTXPassConfig : public TargetPassConfig { 136 public: 137 NVPTXPassConfig(NVPTXTargetMachine *TM, PassManagerBase &PM) 138 : TargetPassConfig(TM, PM) {} 139 140 NVPTXTargetMachine &getNVPTXTargetMachine() const { 141 return getTM<NVPTXTargetMachine>(); 142 } 143 144 void addIRPasses() override; 145 bool addInstSelector() override; 146 void addPostRegAlloc() override; 147 void addMachineSSAOptimization() override; 148 149 FunctionPass *createTargetRegisterAllocator(bool) override; 150 void addFastRegAlloc(FunctionPass *RegAllocPass) override; 151 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override; 152 153 private: 154 // If the opt level is aggressive, add GVN; otherwise, add EarlyCSE. This 155 // function is only called in opt mode. 156 void addEarlyCSEOrGVNPass(); 157 158 // Add passes that propagate special memory spaces. 159 void addAddressSpaceInferencePasses(); 160 161 // Add passes that perform straight-line scalar optimizations. 162 void addStraightLineScalarOptimizationPasses(); 163 }; 164 } // end anonymous namespace 165 166 TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) { 167 return new NVPTXPassConfig(this, PM); 168 } 169 170 void NVPTXTargetMachine::addEarlyAsPossiblePasses(PassManagerBase &PM) { 171 PM.add(createNVVMReflectPass()); 172 } 173 174 TargetIRAnalysis NVPTXTargetMachine::getTargetIRAnalysis() { 175 return TargetIRAnalysis([this](const Function &F) { 176 return TargetTransformInfo(NVPTXTTIImpl(this, F)); 177 }); 178 } 179 180 void NVPTXPassConfig::addEarlyCSEOrGVNPass() { 181 if (getOptLevel() == CodeGenOpt::Aggressive) 182 addPass(createGVNPass()); 183 else 184 addPass(createEarlyCSEPass()); 185 } 186 187 void NVPTXPassConfig::addAddressSpaceInferencePasses() { 188 // NVPTXLowerKernelArgs emits alloca for byval parameters which can often 189 // be eliminated by SROA. 190 addPass(createSROAPass()); 191 addPass(createNVPTXLowerAllocaPass()); 192 if (UseInferAddressSpaces) { 193 addPass(createNVPTXInferAddressSpacesPass()); 194 } else { 195 addPass(createNVPTXFavorNonGenericAddrSpacesPass()); 196 // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave 197 // them unused. We could remove dead code in an ad-hoc manner, but that 198 // requires manual work and might be error-prone. 199 addPass(createDeadCodeEliminationPass()); 200 } 201 } 202 203 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() { 204 addPass(createSeparateConstOffsetFromGEPPass()); 205 addPass(createSpeculativeExecutionPass()); 206 // ReassociateGEPs exposes more opportunites for SLSR. See 207 // the example in reassociate-geps-and-slsr.ll. 208 addPass(createStraightLineStrengthReducePass()); 209 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or 210 // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE 211 // for some of our benchmarks. 212 addEarlyCSEOrGVNPass(); 213 // Run NaryReassociate after EarlyCSE/GVN to be more effective. 214 addPass(createNaryReassociatePass()); 215 // NaryReassociate on GEPs creates redundant common expressions, so run 216 // EarlyCSE after it. 217 addPass(createEarlyCSEPass()); 218 } 219 220 void NVPTXPassConfig::addIRPasses() { 221 // The following passes are known to not play well with virtual regs hanging 222 // around after register allocation (which in our case, is *all* registers). 223 // We explicitly disable them here. We do, however, need some functionality 224 // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the 225 // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp). 226 disablePass(&PrologEpilogCodeInserterID); 227 disablePass(&MachineCopyPropagationID); 228 disablePass(&TailDuplicateID); 229 disablePass(&StackMapLivenessID); 230 disablePass(&LiveDebugValuesID); 231 disablePass(&PostRASchedulerID); 232 disablePass(&FuncletLayoutID); 233 disablePass(&PatchableFunctionID); 234 235 // NVVMReflectPass is added in addEarlyAsPossiblePasses, so hopefully running 236 // it here does nothing. But since we need it for correctness when lowering 237 // to NVPTX, run it here too, in case whoever built our pass pipeline didn't 238 // call addEarlyAsPossiblePasses. 239 addPass(createNVVMReflectPass()); 240 241 if (getOptLevel() != CodeGenOpt::None) 242 addPass(createNVPTXImageOptimizerPass()); 243 addPass(createNVPTXAssignValidGlobalNamesPass()); 244 addPass(createGenericToNVVMPass()); 245 246 // NVPTXLowerKernelArgs is required for correctness and should be run right 247 // before the address space inference passes. 248 addPass(createNVPTXLowerKernelArgsPass(&getNVPTXTargetMachine())); 249 if (getOptLevel() != CodeGenOpt::None) { 250 addAddressSpaceInferencePasses(); 251 addStraightLineScalarOptimizationPasses(); 252 } 253 254 // === LSR and other generic IR passes === 255 TargetPassConfig::addIRPasses(); 256 // EarlyCSE is not always strong enough to clean up what LSR produces. For 257 // example, GVN can combine 258 // 259 // %0 = add %a, %b 260 // %1 = add %b, %a 261 // 262 // and 263 // 264 // %0 = shl nsw %a, 2 265 // %1 = shl %a, 2 266 // 267 // but EarlyCSE can do neither of them. 268 if (getOptLevel() != CodeGenOpt::None) 269 addEarlyCSEOrGVNPass(); 270 } 271 272 bool NVPTXPassConfig::addInstSelector() { 273 const NVPTXSubtarget &ST = *getTM<NVPTXTargetMachine>().getSubtargetImpl(); 274 275 addPass(createLowerAggrCopies()); 276 addPass(createAllocaHoisting()); 277 addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel())); 278 279 if (!ST.hasImageHandles()) 280 addPass(createNVPTXReplaceImageHandlesPass()); 281 282 return false; 283 } 284 285 void NVPTXPassConfig::addPostRegAlloc() { 286 addPass(createNVPTXPrologEpilogPass(), false); 287 if (getOptLevel() != CodeGenOpt::None) { 288 // NVPTXPrologEpilogPass calculates frame object offset and replace frame 289 // index with VRFrame register. NVPTXPeephole need to be run after that and 290 // will replace VRFrame with VRFrameLocal when possible. 291 addPass(createNVPTXPeephole()); 292 } 293 } 294 295 FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) { 296 return nullptr; // No reg alloc 297 } 298 299 void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { 300 assert(!RegAllocPass && "NVPTX uses no regalloc!"); 301 addPass(&PHIEliminationID); 302 addPass(&TwoAddressInstructionPassID); 303 } 304 305 void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { 306 assert(!RegAllocPass && "NVPTX uses no regalloc!"); 307 308 addPass(&ProcessImplicitDefsID); 309 addPass(&LiveVariablesID); 310 addPass(&MachineLoopInfoID); 311 addPass(&PHIEliminationID); 312 313 addPass(&TwoAddressInstructionPassID); 314 addPass(&RegisterCoalescerID); 315 316 // PreRA instruction scheduling. 317 if (addPass(&MachineSchedulerID)) 318 printAndVerify("After Machine Scheduling"); 319 320 321 addPass(&StackSlotColoringID); 322 323 // FIXME: Needs physical registers 324 //addPass(&PostRAMachineLICMID); 325 326 printAndVerify("After StackSlotColoring"); 327 } 328 329 void NVPTXPassConfig::addMachineSSAOptimization() { 330 // Pre-ra tail duplication. 331 if (addPass(&EarlyTailDuplicateID)) 332 printAndVerify("After Pre-RegAlloc TailDuplicate"); 333 334 // Optimize PHIs before DCE: removing dead PHI cycles may make more 335 // instructions dead. 336 addPass(&OptimizePHIsID); 337 338 // This pass merges large allocas. StackSlotColoring is a different pass 339 // which merges spill slots. 340 addPass(&StackColoringID); 341 342 // If the target requests it, assign local variables to stack slots relative 343 // to one another and simplify frame index references where possible. 344 addPass(&LocalStackSlotAllocationID); 345 346 // With optimization, dead code should already be eliminated. However 347 // there is one known exception: lowered code for arguments that are only 348 // used by tail calls, where the tail calls reuse the incoming stack 349 // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). 350 addPass(&DeadMachineInstructionElimID); 351 printAndVerify("After codegen DCE pass"); 352 353 // Allow targets to insert passes that improve instruction level parallelism, 354 // like if-conversion. Such passes will typically need dominator trees and 355 // loop info, just like LICM and CSE below. 356 if (addILPOpts()) 357 printAndVerify("After ILP optimizations"); 358 359 addPass(&MachineLICMID); 360 addPass(&MachineCSEID); 361 362 addPass(&MachineSinkingID); 363 printAndVerify("After Machine LICM, CSE and Sinking passes"); 364 365 addPass(&PeepholeOptimizerID); 366 printAndVerify("After codegen peephole optimization pass"); 367 } 368