1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMTargetMachine.h" 14 #include "ARM.h" 15 #include "ARMFrameLowering.h" 16 #include "llvm/CodeGen/Passes.h" 17 #include "llvm/MC/MCAsmInfo.h" 18 #include "llvm/PassManager.h" 19 #include "llvm/Support/CommandLine.h" 20 #include "llvm/Support/FormattedStream.h" 21 #include "llvm/Support/TargetRegistry.h" 22 #include "llvm/Target/TargetOptions.h" 23 #include "llvm/Transforms/Scalar.h" 24 using namespace llvm; 25 26 static cl::opt<bool> 27 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 28 cl::desc("Inhibit optimization of S->D register accesses on A15"), 29 cl::init(false)); 30 31 extern "C" void LLVMInitializeARMTarget() { 32 // Register the target. 33 RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget); 34 RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget); 35 } 36 37 38 /// TargetMachine ctor - Create an ARM architecture model. 39 /// 40 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, 41 StringRef CPU, StringRef FS, 42 const TargetOptions &Options, 43 Reloc::Model RM, CodeModel::Model CM, 44 CodeGenOpt::Level OL) 45 : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), 46 Subtarget(TT, CPU, FS, Options), 47 JITInfo(), 48 InstrItins(Subtarget.getInstrItineraryData()) { 49 50 // Default to triple-appropriate float ABI 51 if (Options.FloatABIType == FloatABI::Default) 52 this->Options.FloatABIType = 53 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 54 } 55 56 void ARMBaseTargetMachine::addAnalysisPasses(PassManagerBase &PM) { 57 // Add first the target-independent BasicTTI pass, then our ARM pass. This 58 // allows the ARM pass to delegate to the target independent layer when 59 // appropriate. 60 PM.add(createBasicTargetTransformInfoPass(this)); 61 PM.add(createARMTargetTransformInfoPass(this)); 62 } 63 64 65 void ARMTargetMachine::anchor() { } 66 67 static std::string computeDataLayout(ARMSubtarget &ST) { 68 // Little endian. 69 std::string Ret = "e"; 70 71 Ret += DataLayout::getManglingComponent(ST.getTargetTriple()); 72 73 // Pointers are 32 bits and aligned to 32 bits. 74 Ret += "-p:32:32"; 75 76 // On thumb, i16,i18 and i1 have natural aligment requirements, but we try to 77 // align to 32. 78 if (ST.isThumb()) 79 Ret += "-i1:8:32-i8:8:32-i16:16:32"; 80 81 // ABIs other than APC have 64 bit integers with natural alignment. 82 if (!ST.isAPCS_ABI()) 83 Ret += "-i64:64"; 84 85 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 86 // bits, others to 64 bits. We always try to align to 64 bits. 87 if (ST.isAPCS_ABI()) 88 Ret += "-f64:32:64"; 89 90 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 91 // to 64. We always ty to give them natural alignment. 92 if (ST.isAPCS_ABI()) 93 Ret += "-v64:32:64-v128:32:128"; 94 else 95 Ret += "-v128:64:128"; 96 97 // On thumb and APCS, only try to align aggregates to 32 bits (the default is 98 // 64 bits). 99 if (ST.isThumb() || ST.isAPCS_ABI()) 100 Ret += "-a:0:32"; 101 102 // Integer registers are 32 bits. 103 Ret += "-n32"; 104 105 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 106 // aligned everywhere else. 107 if (ST.isTargetNaCl()) 108 Ret += "-S128"; 109 else if (ST.isAAPCS_ABI()) 110 Ret += "-S64"; 111 else 112 Ret += "-S32"; 113 114 return Ret; 115 } 116 117 ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, 118 StringRef CPU, StringRef FS, 119 const TargetOptions &Options, 120 Reloc::Model RM, CodeModel::Model CM, 121 CodeGenOpt::Level OL) 122 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), 123 InstrInfo(Subtarget), 124 DL(computeDataLayout(Subtarget)), 125 TLInfo(*this), 126 TSInfo(*this), 127 FrameLowering(Subtarget) { 128 initAsmInfo(); 129 if (!Subtarget.hasARMOps()) 130 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 131 "support ARM mode execution!"); 132 } 133 134 void ThumbTargetMachine::anchor() { } 135 136 ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, 137 StringRef CPU, StringRef FS, 138 const TargetOptions &Options, 139 Reloc::Model RM, CodeModel::Model CM, 140 CodeGenOpt::Level OL) 141 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), 142 InstrInfo(Subtarget.hasThumb2() 143 ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget)) 144 : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))), 145 DL(computeDataLayout(Subtarget)), 146 TLInfo(*this), 147 TSInfo(*this), 148 FrameLowering(Subtarget.hasThumb2() 149 ? new ARMFrameLowering(Subtarget) 150 : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) { 151 initAsmInfo(); 152 } 153 154 namespace { 155 /// ARM Code Generator Pass Configuration Options. 156 class ARMPassConfig : public TargetPassConfig { 157 public: 158 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 159 : TargetPassConfig(TM, PM) {} 160 161 ARMBaseTargetMachine &getARMTargetMachine() const { 162 return getTM<ARMBaseTargetMachine>(); 163 } 164 165 const ARMSubtarget &getARMSubtarget() const { 166 return *getARMTargetMachine().getSubtargetImpl(); 167 } 168 169 bool addPreISel() override; 170 bool addInstSelector() override; 171 bool addPreRegAlloc() override; 172 bool addPreSched2() override; 173 bool addPreEmitPass() override; 174 }; 175 } // namespace 176 177 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 178 return new ARMPassConfig(this, PM); 179 } 180 181 bool ARMPassConfig::addPreISel() { 182 if (TM->getOptLevel() != CodeGenOpt::None) 183 addPass(createGlobalMergePass(TM)); 184 185 return false; 186 } 187 188 bool ARMPassConfig::addInstSelector() { 189 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 190 191 const ARMSubtarget *Subtarget = &getARMSubtarget(); 192 if (Subtarget->isTargetELF() && !Subtarget->isThumb1Only() && 193 TM->Options.EnableFastISel) 194 addPass(createARMGlobalBaseRegPass()); 195 return false; 196 } 197 198 bool ARMPassConfig::addPreRegAlloc() { 199 // FIXME: temporarily disabling load / store optimization pass for Thumb1. 200 if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only()) 201 addPass(createARMLoadStoreOptimizationPass(true)); 202 if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) 203 addPass(createMLxExpansionPass()); 204 // Since the A15SDOptimizer pass can insert VDUP instructions, it can only be 205 // enabled when NEON is available. 206 if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA15() && 207 getARMSubtarget().hasNEON() && !DisableA15SDOptimization) { 208 addPass(createA15SDOptimizerPass()); 209 } 210 return true; 211 } 212 213 bool ARMPassConfig::addPreSched2() { 214 // FIXME: temporarily disabling load / store optimization pass for Thumb1. 215 if (getOptLevel() != CodeGenOpt::None) { 216 if (!getARMSubtarget().isThumb1Only()) { 217 addPass(createARMLoadStoreOptimizationPass()); 218 printAndVerify("After ARM load / store optimizer"); 219 } 220 if (getARMSubtarget().hasNEON()) 221 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 222 } 223 224 // Expand some pseudo instructions into multiple instructions to allow 225 // proper scheduling. 226 addPass(createARMExpandPseudoPass()); 227 228 if (getOptLevel() != CodeGenOpt::None) { 229 if (!getARMSubtarget().isThumb1Only()) { 230 // in v8, IfConversion depends on Thumb instruction widths 231 if (getARMSubtarget().restrictIT() && 232 !getARMSubtarget().prefers32BitThumb()) 233 addPass(createThumb2SizeReductionPass()); 234 addPass(&IfConverterID); 235 } 236 } 237 if (getARMSubtarget().isThumb2()) 238 addPass(createThumb2ITBlockPass()); 239 240 return true; 241 } 242 243 bool ARMPassConfig::addPreEmitPass() { 244 if (getARMSubtarget().isThumb2()) { 245 if (!getARMSubtarget().prefers32BitThumb()) 246 addPass(createThumb2SizeReductionPass()); 247 248 // Constant island pass work on unbundled instructions. 249 addPass(&UnpackMachineBundlesID); 250 } 251 252 addPass(createARMConstantIslandPass()); 253 254 return true; 255 } 256 257 bool ARMBaseTargetMachine::addCodeEmitter(PassManagerBase &PM, 258 JITCodeEmitter &JCE) { 259 // Machine code emitter pass for ARM. 260 PM.add(createARMJITCodeEmitterPass(*this, JCE)); 261 return false; 262 } 263