1 //===-- X86TargetMachine.cpp - Define TargetMachine for the X86 -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the X86 specific subclass of TargetMachine. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86TargetMachine.h" 15 #include "MCTargetDesc/X86MCTargetDesc.h" 16 #include "X86.h" 17 #include "X86CallLowering.h" 18 #include "X86LegalizerInfo.h" 19 #include "X86MacroFusion.h" 20 #include "X86Subtarget.h" 21 #include "X86TargetObjectFile.h" 22 #include "X86TargetTransformInfo.h" 23 #include "llvm/ADT/Optional.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallString.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/CodeGen/ExecutionDomainFix.h" 30 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 31 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 32 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 33 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 34 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 35 #include "llvm/CodeGen/MachineScheduler.h" 36 #include "llvm/CodeGen/Passes.h" 37 #include "llvm/CodeGen/TargetPassConfig.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/CodeGen.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/TargetRegistry.h" 46 #include "llvm/Target/TargetLoweringObjectFile.h" 47 #include "llvm/Target/TargetOptions.h" 48 #include <memory> 49 #include <string> 50 51 using namespace llvm; 52 53 static cl::opt<bool> EnableMachineCombinerPass("x86-machine-combiner", 54 cl::desc("Enable the machine combiner pass"), 55 cl::init(true), cl::Hidden); 56 57 namespace llvm { 58 59 void initializeWinEHStatePassPass(PassRegistry &); 60 void initializeFixupLEAPassPass(PassRegistry &); 61 void initializeShadowCallStackPass(PassRegistry &); 62 void initializeX86CallFrameOptimizationPass(PassRegistry &); 63 void initializeX86CmovConverterPassPass(PassRegistry &); 64 void initializeX86ExecutionDomainFixPass(PassRegistry &); 65 void initializeX86DomainReassignmentPass(PassRegistry &); 66 void initializeX86AvoidSFBPassPass(PassRegistry &); 67 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &); 68 69 } // end namespace llvm 70 71 extern "C" void LLVMInitializeX86Target() { 72 // Register the target. 73 RegisterTargetMachine<X86TargetMachine> X(getTheX86_32Target()); 74 RegisterTargetMachine<X86TargetMachine> Y(getTheX86_64Target()); 75 76 PassRegistry &PR = *PassRegistry::getPassRegistry(); 77 initializeGlobalISel(PR); 78 initializeWinEHStatePassPass(PR); 79 initializeFixupBWInstPassPass(PR); 80 initializeEvexToVexInstPassPass(PR); 81 initializeFixupLEAPassPass(PR); 82 initializeShadowCallStackPass(PR); 83 initializeX86CallFrameOptimizationPass(PR); 84 initializeX86CmovConverterPassPass(PR); 85 initializeX86ExecutionDomainFixPass(PR); 86 initializeX86DomainReassignmentPass(PR); 87 initializeX86AvoidSFBPassPass(PR); 88 initializeX86FlagsCopyLoweringPassPass(PR); 89 } 90 91 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 92 if (TT.isOSBinFormatMachO()) { 93 if (TT.getArch() == Triple::x86_64) 94 return llvm::make_unique<X86_64MachoTargetObjectFile>(); 95 return llvm::make_unique<TargetLoweringObjectFileMachO>(); 96 } 97 98 if (TT.isOSFreeBSD()) 99 return llvm::make_unique<X86FreeBSDTargetObjectFile>(); 100 if (TT.isOSLinux() || TT.isOSNaCl() || TT.isOSIAMCU()) 101 return llvm::make_unique<X86LinuxNaClTargetObjectFile>(); 102 if (TT.isOSSolaris()) 103 return llvm::make_unique<X86SolarisTargetObjectFile>(); 104 if (TT.isOSFuchsia()) 105 return llvm::make_unique<X86FuchsiaTargetObjectFile>(); 106 if (TT.isOSBinFormatELF()) 107 return llvm::make_unique<X86ELFTargetObjectFile>(); 108 if (TT.isKnownWindowsMSVCEnvironment() || TT.isWindowsCoreCLREnvironment()) 109 return llvm::make_unique<X86WindowsTargetObjectFile>(); 110 if (TT.isOSBinFormatCOFF()) 111 return llvm::make_unique<TargetLoweringObjectFileCOFF>(); 112 llvm_unreachable("unknown subtarget type"); 113 } 114 115 static std::string computeDataLayout(const Triple &TT) { 116 // X86 is little endian 117 std::string Ret = "e"; 118 119 Ret += DataLayout::getManglingComponent(TT); 120 // X86 and x32 have 32 bit pointers. 121 if ((TT.isArch64Bit() && 122 (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) || 123 !TT.isArch64Bit()) 124 Ret += "-p:32:32"; 125 126 // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32. 127 if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl()) 128 Ret += "-i64:64"; 129 else if (TT.isOSIAMCU()) 130 Ret += "-i64:32-f64:32"; 131 else 132 Ret += "-f64:32:64"; 133 134 // Some ABIs align long double to 128 bits, others to 32. 135 if (TT.isOSNaCl() || TT.isOSIAMCU()) 136 ; // No f80 137 else if (TT.isArch64Bit() || TT.isOSDarwin()) 138 Ret += "-f80:128"; 139 else 140 Ret += "-f80:32"; 141 142 if (TT.isOSIAMCU()) 143 Ret += "-f128:32"; 144 145 // The registers can hold 8, 16, 32 or, in x86-64, 64 bits. 146 if (TT.isArch64Bit()) 147 Ret += "-n8:16:32:64"; 148 else 149 Ret += "-n8:16:32"; 150 151 // The stack is aligned to 32 bits on some ABIs and 128 bits on others. 152 if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU()) 153 Ret += "-a:0:32-S32"; 154 else 155 Ret += "-S128"; 156 157 return Ret; 158 } 159 160 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 161 Optional<Reloc::Model> RM) { 162 bool is64Bit = TT.getArch() == Triple::x86_64; 163 if (!RM.hasValue()) { 164 // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode. 165 // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we 166 // use static relocation model by default. 167 if (TT.isOSDarwin()) { 168 if (is64Bit) 169 return Reloc::PIC_; 170 return Reloc::DynamicNoPIC; 171 } 172 if (TT.isOSWindows() && is64Bit) 173 return Reloc::PIC_; 174 return Reloc::Static; 175 } 176 177 // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC 178 // is defined as a model for code which may be used in static or dynamic 179 // executables but not necessarily a shared library. On X86-32 we just 180 // compile in -static mode, in x86-64 we use PIC. 181 if (*RM == Reloc::DynamicNoPIC) { 182 if (is64Bit) 183 return Reloc::PIC_; 184 if (!TT.isOSDarwin()) 185 return Reloc::Static; 186 } 187 188 // If we are on Darwin, disallow static relocation model in X86-64 mode, since 189 // the Mach-O file format doesn't support it. 190 if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit) 191 return Reloc::PIC_; 192 193 return *RM; 194 } 195 196 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM, 197 bool JIT, bool Is64Bit) { 198 if (CM) 199 return *CM; 200 if (JIT) 201 return Is64Bit ? CodeModel::Large : CodeModel::Small; 202 return CodeModel::Small; 203 } 204 205 /// Create an X86 target. 206 /// 207 X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT, 208 StringRef CPU, StringRef FS, 209 const TargetOptions &Options, 210 Optional<Reloc::Model> RM, 211 Optional<CodeModel::Model> CM, 212 CodeGenOpt::Level OL, bool JIT) 213 : LLVMTargetMachine( 214 T, computeDataLayout(TT), TT, CPU, FS, Options, 215 getEffectiveRelocModel(TT, RM), 216 getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL), 217 TLOF(createTLOF(getTargetTriple())) { 218 // Windows stack unwinder gets confused when execution flow "falls through" 219 // after a call to 'noreturn' function. 220 // To prevent that, we emit a trap for 'unreachable' IR instructions. 221 // (which on X86, happens to be the 'ud2' instruction) 222 // On PS4, the "return address" of a 'noreturn' call must still be within 223 // the calling function, and TrapUnreachable is an easy way to get that. 224 // The check here for 64-bit windows is a bit icky, but as we're unlikely 225 // to ever want to mix 32 and 64-bit windows code in a single module 226 // this should be fine. 227 if ((TT.isOSWindows() && TT.getArch() == Triple::x86_64) || TT.isPS4()) 228 this->Options.TrapUnreachable = true; 229 230 initAsmInfo(); 231 } 232 233 X86TargetMachine::~X86TargetMachine() = default; 234 235 const X86Subtarget * 236 X86TargetMachine::getSubtargetImpl(const Function &F) const { 237 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 238 Attribute FSAttr = F.getFnAttribute("target-features"); 239 240 StringRef CPU = !CPUAttr.hasAttribute(Attribute::None) 241 ? CPUAttr.getValueAsString() 242 : (StringRef)TargetCPU; 243 StringRef FS = !FSAttr.hasAttribute(Attribute::None) 244 ? FSAttr.getValueAsString() 245 : (StringRef)TargetFS; 246 247 SmallString<512> Key; 248 Key.reserve(CPU.size() + FS.size()); 249 Key += CPU; 250 Key += FS; 251 252 // FIXME: This is related to the code below to reset the target options, 253 // we need to know whether or not the soft float flag is set on the 254 // function before we can generate a subtarget. We also need to use 255 // it as a key for the subtarget since that can be the only difference 256 // between two functions. 257 bool SoftFloat = 258 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 259 // If the soft float attribute is set on the function turn on the soft float 260 // subtarget feature. 261 if (SoftFloat) 262 Key += FS.empty() ? "+soft-float" : ",+soft-float"; 263 264 // Keep track of the key width after all features are added so we can extract 265 // the feature string out later. 266 unsigned CPUFSWidth = Key.size(); 267 268 // Extract prefer-vector-width attribute. 269 unsigned PreferVectorWidthOverride = 0; 270 if (F.hasFnAttribute("prefer-vector-width")) { 271 StringRef Val = F.getFnAttribute("prefer-vector-width").getValueAsString(); 272 unsigned Width; 273 if (!Val.getAsInteger(0, Width)) { 274 Key += ",prefer-vector-width="; 275 Key += Val; 276 PreferVectorWidthOverride = Width; 277 } 278 } 279 280 // Extract required-vector-width attribute. 281 unsigned RequiredVectorWidth = UINT32_MAX; 282 if (F.hasFnAttribute("required-vector-width")) { 283 StringRef Val = F.getFnAttribute("required-vector-width").getValueAsString(); 284 unsigned Width; 285 if (!Val.getAsInteger(0, Width)) { 286 Key += ",required-vector-width="; 287 Key += Val; 288 RequiredVectorWidth = Width; 289 } 290 } 291 292 // Extracted here so that we make sure there is backing for the StringRef. If 293 // we assigned earlier, its possible the SmallString reallocated leaving a 294 // dangling StringRef. 295 FS = Key.slice(CPU.size(), CPUFSWidth); 296 297 auto &I = SubtargetMap[Key]; 298 if (!I) { 299 // This needs to be done before we create a new subtarget since any 300 // creation will depend on the TM and the code generation flags on the 301 // function that reside in TargetOptions. 302 resetTargetOptions(F); 303 I = llvm::make_unique<X86Subtarget>(TargetTriple, CPU, FS, *this, 304 Options.StackAlignmentOverride, 305 PreferVectorWidthOverride, 306 RequiredVectorWidth); 307 } 308 return I.get(); 309 } 310 311 //===----------------------------------------------------------------------===// 312 // Command line options for x86 313 //===----------------------------------------------------------------------===// 314 static cl::opt<bool> 315 UseVZeroUpper("x86-use-vzeroupper", cl::Hidden, 316 cl::desc("Minimize AVX to SSE transition penalty"), 317 cl::init(true)); 318 319 //===----------------------------------------------------------------------===// 320 // X86 TTI query. 321 //===----------------------------------------------------------------------===// 322 323 TargetTransformInfo 324 X86TargetMachine::getTargetTransformInfo(const Function &F) { 325 return TargetTransformInfo(X86TTIImpl(this, F)); 326 } 327 328 //===----------------------------------------------------------------------===// 329 // Pass Pipeline Configuration 330 //===----------------------------------------------------------------------===// 331 332 namespace { 333 334 /// X86 Code Generator Pass Configuration Options. 335 class X86PassConfig : public TargetPassConfig { 336 public: 337 X86PassConfig(X86TargetMachine &TM, PassManagerBase &PM) 338 : TargetPassConfig(TM, PM) {} 339 340 X86TargetMachine &getX86TargetMachine() const { 341 return getTM<X86TargetMachine>(); 342 } 343 344 ScheduleDAGInstrs * 345 createMachineScheduler(MachineSchedContext *C) const override { 346 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 347 DAG->addMutation(createX86MacroFusionDAGMutation()); 348 return DAG; 349 } 350 351 void addIRPasses() override; 352 bool addInstSelector() override; 353 bool addIRTranslator() override; 354 bool addLegalizeMachineIR() override; 355 bool addRegBankSelect() override; 356 bool addGlobalInstructionSelect() override; 357 bool addILPOpts() override; 358 bool addPreISel() override; 359 void addMachineSSAOptimization() override; 360 void addPreRegAlloc() override; 361 void addPostRegAlloc() override; 362 void addPreEmitPass() override; 363 void addPreEmitPass2() override; 364 void addPreSched2() override; 365 }; 366 367 class X86ExecutionDomainFix : public ExecutionDomainFix { 368 public: 369 static char ID; 370 X86ExecutionDomainFix() : ExecutionDomainFix(ID, X86::VR128XRegClass) {} 371 StringRef getPassName() const override { 372 return "X86 Execution Dependency Fix"; 373 } 374 }; 375 char X86ExecutionDomainFix::ID; 376 377 } // end anonymous namespace 378 379 INITIALIZE_PASS_BEGIN(X86ExecutionDomainFix, "x86-execution-domain-fix", 380 "X86 Execution Domain Fix", false, false) 381 INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis) 382 INITIALIZE_PASS_END(X86ExecutionDomainFix, "x86-execution-domain-fix", 383 "X86 Execution Domain Fix", false, false) 384 385 TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) { 386 return new X86PassConfig(*this, PM); 387 } 388 389 void X86PassConfig::addIRPasses() { 390 addPass(createAtomicExpandPass()); 391 392 TargetPassConfig::addIRPasses(); 393 394 if (TM->getOptLevel() != CodeGenOpt::None) 395 addPass(createInterleavedAccessPass()); 396 397 // Add passes that handle indirect branch removal and insertion of a retpoline 398 // thunk. These will be a no-op unless a function subtarget has the retpoline 399 // feature enabled. 400 addPass(createIndirectBrExpandPass()); 401 } 402 403 bool X86PassConfig::addInstSelector() { 404 // Install an instruction selector. 405 addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel())); 406 407 // For ELF, cleanup any local-dynamic TLS accesses. 408 if (TM->getTargetTriple().isOSBinFormatELF() && 409 getOptLevel() != CodeGenOpt::None) 410 addPass(createCleanupLocalDynamicTLSPass()); 411 412 addPass(createX86GlobalBaseRegPass()); 413 return false; 414 } 415 416 bool X86PassConfig::addIRTranslator() { 417 addPass(new IRTranslator()); 418 return false; 419 } 420 421 bool X86PassConfig::addLegalizeMachineIR() { 422 addPass(new Legalizer()); 423 return false; 424 } 425 426 bool X86PassConfig::addRegBankSelect() { 427 addPass(new RegBankSelect()); 428 return false; 429 } 430 431 bool X86PassConfig::addGlobalInstructionSelect() { 432 addPass(new InstructionSelect()); 433 return false; 434 } 435 436 bool X86PassConfig::addILPOpts() { 437 addPass(&EarlyIfConverterID); 438 if (EnableMachineCombinerPass) 439 addPass(&MachineCombinerID); 440 addPass(createX86CmovConverterPass()); 441 return true; 442 } 443 444 bool X86PassConfig::addPreISel() { 445 // Only add this pass for 32-bit x86 Windows. 446 const Triple &TT = TM->getTargetTriple(); 447 if (TT.isOSWindows() && TT.getArch() == Triple::x86) 448 addPass(createX86WinEHStatePass()); 449 return true; 450 } 451 452 void X86PassConfig::addPreRegAlloc() { 453 if (getOptLevel() != CodeGenOpt::None) { 454 addPass(&LiveRangeShrinkID); 455 addPass(createX86FixupSetCC()); 456 addPass(createX86OptimizeLEAs()); 457 addPass(createX86CallFrameOptimization()); 458 addPass(createX86AvoidStoreForwardingBlocks()); 459 } 460 461 addPass(createX86FlagsCopyLoweringPass()); 462 addPass(createX86WinAllocaExpander()); 463 } 464 void X86PassConfig::addMachineSSAOptimization() { 465 addPass(createX86DomainReassignmentPass()); 466 TargetPassConfig::addMachineSSAOptimization(); 467 } 468 469 void X86PassConfig::addPostRegAlloc() { 470 addPass(createX86FloatingPointStackifierPass()); 471 } 472 473 void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); } 474 475 void X86PassConfig::addPreEmitPass() { 476 if (getOptLevel() != CodeGenOpt::None) { 477 addPass(new X86ExecutionDomainFix()); 478 addPass(createBreakFalseDeps()); 479 } 480 481 addPass(createShadowCallStackPass()); 482 addPass(createX86IndirectBranchTrackingPass()); 483 484 if (UseVZeroUpper) 485 addPass(createX86IssueVZeroUpperPass()); 486 487 if (getOptLevel() != CodeGenOpt::None) { 488 addPass(createX86FixupBWInsts()); 489 addPass(createX86PadShortFunctions()); 490 addPass(createX86FixupLEAs()); 491 addPass(createX86EvexToVexInsts()); 492 } 493 } 494 495 void X86PassConfig::addPreEmitPass2() { 496 addPass(createX86RetpolineThunksPass()); 497 } 498