1 //===-- NVPTXTargetMachine.cpp - Define TargetMachine for NVPTX -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Top-level implementation for the NVPTX target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "NVPTXTargetMachine.h" 15 #include "NVPTX.h" 16 #include "NVPTXAllocaHoisting.h" 17 #include "NVPTXLowerAggrCopies.h" 18 #include "NVPTXTargetObjectFile.h" 19 #include "NVPTXTargetTransformInfo.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/Triple.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/CodeGen/Passes.h" 24 #include "llvm/CodeGen/TargetPassConfig.h" 25 #include "llvm/IR/LegacyPassManager.h" 26 #include "llvm/Pass.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/TargetRegistry.h" 29 #include "llvm/Target/TargetMachine.h" 30 #include "llvm/Target/TargetOptions.h" 31 #include "llvm/Transforms/IPO/PassManagerBuilder.h" 32 #include "llvm/Transforms/Scalar.h" 33 #include "llvm/Transforms/Scalar/GVN.h" 34 #include "llvm/Transforms/Vectorize.h" 35 #include <cassert> 36 #include <string> 37 38 using namespace llvm; 39 40 // LSV is still relatively new; this switch lets us turn it off in case we 41 // encounter (or suspect) a bug. 42 static cl::opt<bool> 43 DisableLoadStoreVectorizer("disable-nvptx-load-store-vectorizer", 44 cl::desc("Disable load/store vectorizer"), 45 cl::init(false), cl::Hidden); 46 47 // TODO: Remove this flag when we are confident with no regressions. 48 static cl::opt<bool> DisableRequireStructuredCFG( 49 "disable-nvptx-require-structured-cfg", 50 cl::desc("Transitional flag to turn off NVPTX's requirement on preserving " 51 "structured CFG. The requirement should be disabled only when " 52 "unexpected regressions happen."), 53 cl::init(false), cl::Hidden); 54 55 namespace llvm { 56 57 void initializeNVVMIntrRangePass(PassRegistry&); 58 void initializeNVVMReflectPass(PassRegistry&); 59 void initializeGenericToNVVMPass(PassRegistry&); 60 void initializeNVPTXAllocaHoistingPass(PassRegistry &); 61 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&); 62 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &); 63 void initializeNVPTXLowerArgsPass(PassRegistry &); 64 void initializeNVPTXLowerAllocaPass(PassRegistry &); 65 66 } // end namespace llvm 67 68 extern "C" void LLVMInitializeNVPTXTarget() { 69 // Register the target. 70 RegisterTargetMachine<NVPTXTargetMachine32> X(getTheNVPTXTarget32()); 71 RegisterTargetMachine<NVPTXTargetMachine64> Y(getTheNVPTXTarget64()); 72 73 // FIXME: This pass is really intended to be invoked during IR optimization, 74 // but it's very NVPTX-specific. 75 PassRegistry &PR = *PassRegistry::getPassRegistry(); 76 initializeNVVMReflectPass(PR); 77 initializeNVVMIntrRangePass(PR); 78 initializeGenericToNVVMPass(PR); 79 initializeNVPTXAllocaHoistingPass(PR); 80 initializeNVPTXAssignValidGlobalNamesPass(PR); 81 initializeNVPTXLowerArgsPass(PR); 82 initializeNVPTXLowerAllocaPass(PR); 83 initializeNVPTXLowerAggrCopiesPass(PR); 84 } 85 86 static std::string computeDataLayout(bool is64Bit) { 87 std::string Ret = "e"; 88 89 if (!is64Bit) 90 Ret += "-p:32:32"; 91 92 Ret += "-i64:64-i128:128-v16:16-v32:32-n16:32:64"; 93 94 return Ret; 95 } 96 97 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) { 98 if (CM) 99 return *CM; 100 return CodeModel::Small; 101 } 102 103 NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT, 104 StringRef CPU, StringRef FS, 105 const TargetOptions &Options, 106 Optional<Reloc::Model> RM, 107 Optional<CodeModel::Model> CM, 108 CodeGenOpt::Level OL, bool is64bit) 109 // The pic relocation model is used regardless of what the client has 110 // specified, as it is the only relocation model currently supported. 111 : LLVMTargetMachine(T, computeDataLayout(is64bit), TT, CPU, FS, Options, 112 Reloc::PIC_, getEffectiveCodeModel(CM), OL), 113 is64bit(is64bit), TLOF(llvm::make_unique<NVPTXTargetObjectFile>()), 114 Subtarget(TT, CPU, FS, *this) { 115 if (TT.getOS() == Triple::NVCL) 116 drvInterface = NVPTX::NVCL; 117 else 118 drvInterface = NVPTX::CUDA; 119 if (!DisableRequireStructuredCFG) 120 setRequiresStructuredCFG(true); 121 initAsmInfo(); 122 } 123 124 NVPTXTargetMachine::~NVPTXTargetMachine() = default; 125 126 void NVPTXTargetMachine32::anchor() {} 127 128 NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT, 129 StringRef CPU, StringRef FS, 130 const TargetOptions &Options, 131 Optional<Reloc::Model> RM, 132 Optional<CodeModel::Model> CM, 133 CodeGenOpt::Level OL, bool JIT) 134 : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 135 136 void NVPTXTargetMachine64::anchor() {} 137 138 NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT, 139 StringRef CPU, StringRef FS, 140 const TargetOptions &Options, 141 Optional<Reloc::Model> RM, 142 Optional<CodeModel::Model> CM, 143 CodeGenOpt::Level OL, bool JIT) 144 : NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 145 146 namespace { 147 148 class NVPTXPassConfig : public TargetPassConfig { 149 public: 150 NVPTXPassConfig(NVPTXTargetMachine &TM, PassManagerBase &PM) 151 : TargetPassConfig(TM, PM) {} 152 153 NVPTXTargetMachine &getNVPTXTargetMachine() const { 154 return getTM<NVPTXTargetMachine>(); 155 } 156 157 void addIRPasses() override; 158 bool addInstSelector() override; 159 void addPostRegAlloc() override; 160 void addMachineSSAOptimization() override; 161 162 FunctionPass *createTargetRegisterAllocator(bool) override; 163 void addFastRegAlloc(FunctionPass *RegAllocPass) override; 164 void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override; 165 166 private: 167 // If the opt level is aggressive, add GVN; otherwise, add EarlyCSE. This 168 // function is only called in opt mode. 169 void addEarlyCSEOrGVNPass(); 170 171 // Add passes that propagate special memory spaces. 172 void addAddressSpaceInferencePasses(); 173 174 // Add passes that perform straight-line scalar optimizations. 175 void addStraightLineScalarOptimizationPasses(); 176 }; 177 178 } // end anonymous namespace 179 180 TargetPassConfig *NVPTXTargetMachine::createPassConfig(PassManagerBase &PM) { 181 return new NVPTXPassConfig(*this, PM); 182 } 183 184 void NVPTXTargetMachine::adjustPassManager(PassManagerBuilder &Builder) { 185 Builder.addExtension( 186 PassManagerBuilder::EP_EarlyAsPossible, 187 [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) { 188 PM.add(createNVVMReflectPass()); 189 PM.add(createNVVMIntrRangePass(Subtarget.getSmVersion())); 190 }); 191 } 192 193 TargetTransformInfo 194 NVPTXTargetMachine::getTargetTransformInfo(const Function &F) { 195 return TargetTransformInfo(NVPTXTTIImpl(this, F)); 196 } 197 198 void NVPTXPassConfig::addEarlyCSEOrGVNPass() { 199 if (getOptLevel() == CodeGenOpt::Aggressive) 200 addPass(createGVNPass()); 201 else 202 addPass(createEarlyCSEPass()); 203 } 204 205 void NVPTXPassConfig::addAddressSpaceInferencePasses() { 206 // NVPTXLowerArgs emits alloca for byval parameters which can often 207 // be eliminated by SROA. 208 addPass(createSROAPass()); 209 addPass(createNVPTXLowerAllocaPass()); 210 addPass(createInferAddressSpacesPass()); 211 } 212 213 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() { 214 addPass(createSeparateConstOffsetFromGEPPass()); 215 addPass(createSpeculativeExecutionPass()); 216 // ReassociateGEPs exposes more opportunites for SLSR. See 217 // the example in reassociate-geps-and-slsr.ll. 218 addPass(createStraightLineStrengthReducePass()); 219 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or 220 // EarlyCSE can reuse. GVN generates significantly better code than EarlyCSE 221 // for some of our benchmarks. 222 addEarlyCSEOrGVNPass(); 223 // Run NaryReassociate after EarlyCSE/GVN to be more effective. 224 addPass(createNaryReassociatePass()); 225 // NaryReassociate on GEPs creates redundant common expressions, so run 226 // EarlyCSE after it. 227 addPass(createEarlyCSEPass()); 228 } 229 230 void NVPTXPassConfig::addIRPasses() { 231 // The following passes are known to not play well with virtual regs hanging 232 // around after register allocation (which in our case, is *all* registers). 233 // We explicitly disable them here. We do, however, need some functionality 234 // of the PrologEpilogCodeInserter pass, so we emulate that behavior in the 235 // NVPTXPrologEpilog pass (see NVPTXPrologEpilogPass.cpp). 236 disablePass(&PrologEpilogCodeInserterID); 237 disablePass(&MachineCopyPropagationID); 238 disablePass(&TailDuplicateID); 239 disablePass(&StackMapLivenessID); 240 disablePass(&LiveDebugValuesID); 241 disablePass(&PostRAMachineSinkingID); 242 disablePass(&PostRASchedulerID); 243 disablePass(&FuncletLayoutID); 244 disablePass(&PatchableFunctionID); 245 disablePass(&ShrinkWrapID); 246 247 // NVVMReflectPass is added in addEarlyAsPossiblePasses, so hopefully running 248 // it here does nothing. But since we need it for correctness when lowering 249 // to NVPTX, run it here too, in case whoever built our pass pipeline didn't 250 // call addEarlyAsPossiblePasses. 251 addPass(createNVVMReflectPass()); 252 253 if (getOptLevel() != CodeGenOpt::None) 254 addPass(createNVPTXImageOptimizerPass()); 255 addPass(createNVPTXAssignValidGlobalNamesPass()); 256 addPass(createGenericToNVVMPass()); 257 258 // NVPTXLowerArgs is required for correctness and should be run right 259 // before the address space inference passes. 260 addPass(createNVPTXLowerArgsPass(&getNVPTXTargetMachine())); 261 if (getOptLevel() != CodeGenOpt::None) { 262 addAddressSpaceInferencePasses(); 263 if (!DisableLoadStoreVectorizer) 264 addPass(createLoadStoreVectorizerPass()); 265 addStraightLineScalarOptimizationPasses(); 266 } 267 268 // === LSR and other generic IR passes === 269 TargetPassConfig::addIRPasses(); 270 // EarlyCSE is not always strong enough to clean up what LSR produces. For 271 // example, GVN can combine 272 // 273 // %0 = add %a, %b 274 // %1 = add %b, %a 275 // 276 // and 277 // 278 // %0 = shl nsw %a, 2 279 // %1 = shl %a, 2 280 // 281 // but EarlyCSE can do neither of them. 282 if (getOptLevel() != CodeGenOpt::None) 283 addEarlyCSEOrGVNPass(); 284 } 285 286 bool NVPTXPassConfig::addInstSelector() { 287 const NVPTXSubtarget &ST = *getTM<NVPTXTargetMachine>().getSubtargetImpl(); 288 289 addPass(createLowerAggrCopies()); 290 addPass(createAllocaHoisting()); 291 addPass(createNVPTXISelDag(getNVPTXTargetMachine(), getOptLevel())); 292 293 if (!ST.hasImageHandles()) 294 addPass(createNVPTXReplaceImageHandlesPass()); 295 296 return false; 297 } 298 299 void NVPTXPassConfig::addPostRegAlloc() { 300 addPass(createNVPTXPrologEpilogPass(), false); 301 if (getOptLevel() != CodeGenOpt::None) { 302 // NVPTXPrologEpilogPass calculates frame object offset and replace frame 303 // index with VRFrame register. NVPTXPeephole need to be run after that and 304 // will replace VRFrame with VRFrameLocal when possible. 305 addPass(createNVPTXPeephole()); 306 } 307 } 308 309 FunctionPass *NVPTXPassConfig::createTargetRegisterAllocator(bool) { 310 return nullptr; // No reg alloc 311 } 312 313 void NVPTXPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { 314 assert(!RegAllocPass && "NVPTX uses no regalloc!"); 315 addPass(&PHIEliminationID); 316 addPass(&TwoAddressInstructionPassID); 317 } 318 319 void NVPTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { 320 assert(!RegAllocPass && "NVPTX uses no regalloc!"); 321 322 addPass(&ProcessImplicitDefsID); 323 addPass(&LiveVariablesID); 324 addPass(&MachineLoopInfoID); 325 addPass(&PHIEliminationID); 326 327 addPass(&TwoAddressInstructionPassID); 328 addPass(&RegisterCoalescerID); 329 330 // PreRA instruction scheduling. 331 if (addPass(&MachineSchedulerID)) 332 printAndVerify("After Machine Scheduling"); 333 334 335 addPass(&StackSlotColoringID); 336 337 // FIXME: Needs physical registers 338 //addPass(&MachineLICMID); 339 340 printAndVerify("After StackSlotColoring"); 341 } 342 343 void NVPTXPassConfig::addMachineSSAOptimization() { 344 // Pre-ra tail duplication. 345 if (addPass(&EarlyTailDuplicateID)) 346 printAndVerify("After Pre-RegAlloc TailDuplicate"); 347 348 // Optimize PHIs before DCE: removing dead PHI cycles may make more 349 // instructions dead. 350 addPass(&OptimizePHIsID); 351 352 // This pass merges large allocas. StackSlotColoring is a different pass 353 // which merges spill slots. 354 addPass(&StackColoringID); 355 356 // If the target requests it, assign local variables to stack slots relative 357 // to one another and simplify frame index references where possible. 358 addPass(&LocalStackSlotAllocationID); 359 360 // With optimization, dead code should already be eliminated. However 361 // there is one known exception: lowered code for arguments that are only 362 // used by tail calls, where the tail calls reuse the incoming stack 363 // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll). 364 addPass(&DeadMachineInstructionElimID); 365 printAndVerify("After codegen DCE pass"); 366 367 // Allow targets to insert passes that improve instruction level parallelism, 368 // like if-conversion. Such passes will typically need dominator trees and 369 // loop info, just like LICM and CSE below. 370 if (addILPOpts()) 371 printAndVerify("After ILP optimizations"); 372 373 addPass(&EarlyMachineLICMID); 374 addPass(&MachineCSEID); 375 376 addPass(&MachineSinkingID); 377 printAndVerify("After Machine LICM, CSE and Sinking passes"); 378 379 addPass(&PeepholeOptimizerID); 380 printAndVerify("After codegen peephole optimization pass"); 381 } 382