1 //===-- X86TargetMachine.cpp - Define TargetMachine for the X86 -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the X86 specific subclass of TargetMachine. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86TargetMachine.h" 15 #include "MCTargetDesc/X86MCTargetDesc.h" 16 #include "X86.h" 17 #include "X86CallLowering.h" 18 #include "X86LegalizerInfo.h" 19 #include "X86MacroFusion.h" 20 #include "X86Subtarget.h" 21 #include "X86TargetObjectFile.h" 22 #include "X86TargetTransformInfo.h" 23 #include "llvm/ADT/Optional.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallString.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/CodeGen/ExecutionDomainFix.h" 30 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 31 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 32 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 33 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 34 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 35 #include "llvm/CodeGen/MachineScheduler.h" 36 #include "llvm/CodeGen/Passes.h" 37 #include "llvm/CodeGen/TargetPassConfig.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/CodeGen.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/TargetRegistry.h" 46 #include "llvm/Target/TargetLoweringObjectFile.h" 47 #include "llvm/Target/TargetOptions.h" 48 #include <memory> 49 #include <string> 50 51 using namespace llvm; 52 53 static cl::opt<bool> EnableMachineCombinerPass("x86-machine-combiner", 54 cl::desc("Enable the machine combiner pass"), 55 cl::init(true), cl::Hidden); 56 57 static cl::opt<bool> EnableSpeculativeLoadHardening( 58 "x86-speculative-load-hardening", 59 cl::desc("Enable speculative load hardening"), cl::init(false), cl::Hidden); 60 61 namespace llvm { 62 63 void initializeWinEHStatePassPass(PassRegistry &); 64 void initializeFixupLEAPassPass(PassRegistry &); 65 void initializeShadowCallStackPass(PassRegistry &); 66 void initializeX86CallFrameOptimizationPass(PassRegistry &); 67 void initializeX86CmovConverterPassPass(PassRegistry &); 68 void initializeX86ExecutionDomainFixPass(PassRegistry &); 69 void initializeX86DomainReassignmentPass(PassRegistry &); 70 void initializeX86AvoidSFBPassPass(PassRegistry &); 71 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &); 72 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &); 73 74 } // end namespace llvm 75 76 extern "C" void LLVMInitializeX86Target() { 77 // Register the target. 78 RegisterTargetMachine<X86TargetMachine> X(getTheX86_32Target()); 79 RegisterTargetMachine<X86TargetMachine> Y(getTheX86_64Target()); 80 81 PassRegistry &PR = *PassRegistry::getPassRegistry(); 82 initializeGlobalISel(PR); 83 initializeWinEHStatePassPass(PR); 84 initializeFixupBWInstPassPass(PR); 85 initializeEvexToVexInstPassPass(PR); 86 initializeFixupLEAPassPass(PR); 87 initializeShadowCallStackPass(PR); 88 initializeX86CallFrameOptimizationPass(PR); 89 initializeX86CmovConverterPassPass(PR); 90 initializeX86ExecutionDomainFixPass(PR); 91 initializeX86DomainReassignmentPass(PR); 92 initializeX86AvoidSFBPassPass(PR); 93 initializeX86SpeculativeLoadHardeningPassPass(PR); 94 initializeX86FlagsCopyLoweringPassPass(PR); 95 } 96 97 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 98 if (TT.isOSBinFormatMachO()) { 99 if (TT.getArch() == Triple::x86_64) 100 return llvm::make_unique<X86_64MachoTargetObjectFile>(); 101 return llvm::make_unique<TargetLoweringObjectFileMachO>(); 102 } 103 104 if (TT.isOSFreeBSD()) 105 return llvm::make_unique<X86FreeBSDTargetObjectFile>(); 106 if (TT.isOSLinux() || TT.isOSNaCl() || TT.isOSIAMCU()) 107 return llvm::make_unique<X86LinuxNaClTargetObjectFile>(); 108 if (TT.isOSSolaris()) 109 return llvm::make_unique<X86SolarisTargetObjectFile>(); 110 if (TT.isOSFuchsia()) 111 return llvm::make_unique<X86FuchsiaTargetObjectFile>(); 112 if (TT.isOSBinFormatELF()) 113 return llvm::make_unique<X86ELFTargetObjectFile>(); 114 if (TT.isOSBinFormatCOFF()) 115 return llvm::make_unique<TargetLoweringObjectFileCOFF>(); 116 llvm_unreachable("unknown subtarget type"); 117 } 118 119 static std::string computeDataLayout(const Triple &TT) { 120 // X86 is little endian 121 std::string Ret = "e"; 122 123 Ret += DataLayout::getManglingComponent(TT); 124 // X86 and x32 have 32 bit pointers. 125 if ((TT.isArch64Bit() && 126 (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) || 127 !TT.isArch64Bit()) 128 Ret += "-p:32:32"; 129 130 // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32. 131 if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl()) 132 Ret += "-i64:64"; 133 else if (TT.isOSIAMCU()) 134 Ret += "-i64:32-f64:32"; 135 else 136 Ret += "-f64:32:64"; 137 138 // Some ABIs align long double to 128 bits, others to 32. 139 if (TT.isOSNaCl() || TT.isOSIAMCU()) 140 ; // No f80 141 else if (TT.isArch64Bit() || TT.isOSDarwin()) 142 Ret += "-f80:128"; 143 else 144 Ret += "-f80:32"; 145 146 if (TT.isOSIAMCU()) 147 Ret += "-f128:32"; 148 149 // The registers can hold 8, 16, 32 or, in x86-64, 64 bits. 150 if (TT.isArch64Bit()) 151 Ret += "-n8:16:32:64"; 152 else 153 Ret += "-n8:16:32"; 154 155 // The stack is aligned to 32 bits on some ABIs and 128 bits on others. 156 if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU()) 157 Ret += "-a:0:32-S32"; 158 else 159 Ret += "-S128"; 160 161 return Ret; 162 } 163 164 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 165 bool JIT, 166 Optional<Reloc::Model> RM) { 167 bool is64Bit = TT.getArch() == Triple::x86_64; 168 if (!RM.hasValue()) { 169 // JIT codegen should use static relocations by default, since it's 170 // typically executed in process and not relocatable. 171 if (JIT) 172 return Reloc::Static; 173 174 // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode. 175 // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we 176 // use static relocation model by default. 177 if (TT.isOSDarwin()) { 178 if (is64Bit) 179 return Reloc::PIC_; 180 return Reloc::DynamicNoPIC; 181 } 182 if (TT.isOSWindows() && is64Bit) 183 return Reloc::PIC_; 184 return Reloc::Static; 185 } 186 187 // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC 188 // is defined as a model for code which may be used in static or dynamic 189 // executables but not necessarily a shared library. On X86-32 we just 190 // compile in -static mode, in x86-64 we use PIC. 191 if (*RM == Reloc::DynamicNoPIC) { 192 if (is64Bit) 193 return Reloc::PIC_; 194 if (!TT.isOSDarwin()) 195 return Reloc::Static; 196 } 197 198 // If we are on Darwin, disallow static relocation model in X86-64 mode, since 199 // the Mach-O file format doesn't support it. 200 if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit) 201 return Reloc::PIC_; 202 203 return *RM; 204 } 205 206 static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM, 207 bool JIT, bool Is64Bit) { 208 if (CM) 209 return *CM; 210 if (JIT) 211 return Is64Bit ? CodeModel::Large : CodeModel::Small; 212 return CodeModel::Small; 213 } 214 215 /// Create an X86 target. 216 /// 217 X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT, 218 StringRef CPU, StringRef FS, 219 const TargetOptions &Options, 220 Optional<Reloc::Model> RM, 221 Optional<CodeModel::Model> CM, 222 CodeGenOpt::Level OL, bool JIT) 223 : LLVMTargetMachine( 224 T, computeDataLayout(TT), TT, CPU, FS, Options, 225 getEffectiveRelocModel(TT, JIT, RM), 226 getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL), 227 TLOF(createTLOF(getTargetTriple())) { 228 // Windows stack unwinder gets confused when execution flow "falls through" 229 // after a call to 'noreturn' function. 230 // To prevent that, we emit a trap for 'unreachable' IR instructions. 231 // (which on X86, happens to be the 'ud2' instruction) 232 // On PS4, the "return address" of a 'noreturn' call must still be within 233 // the calling function, and TrapUnreachable is an easy way to get that. 234 // The check here for 64-bit windows is a bit icky, but as we're unlikely 235 // to ever want to mix 32 and 64-bit windows code in a single module 236 // this should be fine. 237 if ((TT.isOSWindows() && TT.getArch() == Triple::x86_64) || TT.isPS4() || 238 TT.isOSBinFormatMachO()) { 239 this->Options.TrapUnreachable = true; 240 this->Options.NoTrapAfterNoreturn = TT.isOSBinFormatMachO(); 241 } 242 243 // Outlining is available for x86-64. 244 if (TT.getArch() == Triple::x86_64) 245 setMachineOutliner(true); 246 247 initAsmInfo(); 248 } 249 250 X86TargetMachine::~X86TargetMachine() = default; 251 252 const X86Subtarget * 253 X86TargetMachine::getSubtargetImpl(const Function &F) const { 254 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 255 Attribute FSAttr = F.getFnAttribute("target-features"); 256 257 StringRef CPU = !CPUAttr.hasAttribute(Attribute::None) 258 ? CPUAttr.getValueAsString() 259 : (StringRef)TargetCPU; 260 StringRef FS = !FSAttr.hasAttribute(Attribute::None) 261 ? FSAttr.getValueAsString() 262 : (StringRef)TargetFS; 263 264 SmallString<512> Key; 265 Key.reserve(CPU.size() + FS.size()); 266 Key += CPU; 267 Key += FS; 268 269 // FIXME: This is related to the code below to reset the target options, 270 // we need to know whether or not the soft float flag is set on the 271 // function before we can generate a subtarget. We also need to use 272 // it as a key for the subtarget since that can be the only difference 273 // between two functions. 274 bool SoftFloat = 275 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 276 // If the soft float attribute is set on the function turn on the soft float 277 // subtarget feature. 278 if (SoftFloat) 279 Key += FS.empty() ? "+soft-float" : ",+soft-float"; 280 281 // Keep track of the key width after all features are added so we can extract 282 // the feature string out later. 283 unsigned CPUFSWidth = Key.size(); 284 285 // Extract prefer-vector-width attribute. 286 unsigned PreferVectorWidthOverride = 0; 287 if (F.hasFnAttribute("prefer-vector-width")) { 288 StringRef Val = F.getFnAttribute("prefer-vector-width").getValueAsString(); 289 unsigned Width; 290 if (!Val.getAsInteger(0, Width)) { 291 Key += ",prefer-vector-width="; 292 Key += Val; 293 PreferVectorWidthOverride = Width; 294 } 295 } 296 297 // Extract required-vector-width attribute. 298 unsigned RequiredVectorWidth = UINT32_MAX; 299 if (F.hasFnAttribute("required-vector-width")) { 300 StringRef Val = F.getFnAttribute("required-vector-width").getValueAsString(); 301 unsigned Width; 302 if (!Val.getAsInteger(0, Width)) { 303 Key += ",required-vector-width="; 304 Key += Val; 305 RequiredVectorWidth = Width; 306 } 307 } 308 309 // Extracted here so that we make sure there is backing for the StringRef. If 310 // we assigned earlier, its possible the SmallString reallocated leaving a 311 // dangling StringRef. 312 FS = Key.slice(CPU.size(), CPUFSWidth); 313 314 auto &I = SubtargetMap[Key]; 315 if (!I) { 316 // This needs to be done before we create a new subtarget since any 317 // creation will depend on the TM and the code generation flags on the 318 // function that reside in TargetOptions. 319 resetTargetOptions(F); 320 I = llvm::make_unique<X86Subtarget>(TargetTriple, CPU, FS, *this, 321 Options.StackAlignmentOverride, 322 PreferVectorWidthOverride, 323 RequiredVectorWidth); 324 } 325 return I.get(); 326 } 327 328 //===----------------------------------------------------------------------===// 329 // Command line options for x86 330 //===----------------------------------------------------------------------===// 331 static cl::opt<bool> 332 UseVZeroUpper("x86-use-vzeroupper", cl::Hidden, 333 cl::desc("Minimize AVX to SSE transition penalty"), 334 cl::init(true)); 335 336 //===----------------------------------------------------------------------===// 337 // X86 TTI query. 338 //===----------------------------------------------------------------------===// 339 340 TargetTransformInfo 341 X86TargetMachine::getTargetTransformInfo(const Function &F) { 342 return TargetTransformInfo(X86TTIImpl(this, F)); 343 } 344 345 //===----------------------------------------------------------------------===// 346 // Pass Pipeline Configuration 347 //===----------------------------------------------------------------------===// 348 349 namespace { 350 351 /// X86 Code Generator Pass Configuration Options. 352 class X86PassConfig : public TargetPassConfig { 353 public: 354 X86PassConfig(X86TargetMachine &TM, PassManagerBase &PM) 355 : TargetPassConfig(TM, PM) {} 356 357 X86TargetMachine &getX86TargetMachine() const { 358 return getTM<X86TargetMachine>(); 359 } 360 361 ScheduleDAGInstrs * 362 createMachineScheduler(MachineSchedContext *C) const override { 363 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 364 DAG->addMutation(createX86MacroFusionDAGMutation()); 365 return DAG; 366 } 367 368 void addIRPasses() override; 369 bool addInstSelector() override; 370 bool addIRTranslator() override; 371 bool addLegalizeMachineIR() override; 372 bool addRegBankSelect() override; 373 bool addGlobalInstructionSelect() override; 374 bool addILPOpts() override; 375 bool addPreISel() override; 376 void addMachineSSAOptimization() override; 377 void addPreRegAlloc() override; 378 void addPostRegAlloc() override; 379 void addPreEmitPass() override; 380 void addPreEmitPass2() override; 381 void addPreSched2() override; 382 }; 383 384 class X86ExecutionDomainFix : public ExecutionDomainFix { 385 public: 386 static char ID; 387 X86ExecutionDomainFix() : ExecutionDomainFix(ID, X86::VR128XRegClass) {} 388 StringRef getPassName() const override { 389 return "X86 Execution Dependency Fix"; 390 } 391 }; 392 char X86ExecutionDomainFix::ID; 393 394 } // end anonymous namespace 395 396 INITIALIZE_PASS_BEGIN(X86ExecutionDomainFix, "x86-execution-domain-fix", 397 "X86 Execution Domain Fix", false, false) 398 INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis) 399 INITIALIZE_PASS_END(X86ExecutionDomainFix, "x86-execution-domain-fix", 400 "X86 Execution Domain Fix", false, false) 401 402 TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) { 403 return new X86PassConfig(*this, PM); 404 } 405 406 void X86PassConfig::addIRPasses() { 407 addPass(createAtomicExpandPass()); 408 409 TargetPassConfig::addIRPasses(); 410 411 if (TM->getOptLevel() != CodeGenOpt::None) 412 addPass(createInterleavedAccessPass()); 413 414 // Add passes that handle indirect branch removal and insertion of a retpoline 415 // thunk. These will be a no-op unless a function subtarget has the retpoline 416 // feature enabled. 417 addPass(createIndirectBrExpandPass()); 418 } 419 420 bool X86PassConfig::addInstSelector() { 421 // Install an instruction selector. 422 addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel())); 423 424 // For ELF, cleanup any local-dynamic TLS accesses. 425 if (TM->getTargetTriple().isOSBinFormatELF() && 426 getOptLevel() != CodeGenOpt::None) 427 addPass(createCleanupLocalDynamicTLSPass()); 428 429 addPass(createX86GlobalBaseRegPass()); 430 return false; 431 } 432 433 bool X86PassConfig::addIRTranslator() { 434 addPass(new IRTranslator()); 435 return false; 436 } 437 438 bool X86PassConfig::addLegalizeMachineIR() { 439 addPass(new Legalizer()); 440 return false; 441 } 442 443 bool X86PassConfig::addRegBankSelect() { 444 addPass(new RegBankSelect()); 445 return false; 446 } 447 448 bool X86PassConfig::addGlobalInstructionSelect() { 449 addPass(new InstructionSelect()); 450 return false; 451 } 452 453 bool X86PassConfig::addILPOpts() { 454 addPass(&EarlyIfConverterID); 455 if (EnableMachineCombinerPass) 456 addPass(&MachineCombinerID); 457 addPass(createX86CmovConverterPass()); 458 return true; 459 } 460 461 bool X86PassConfig::addPreISel() { 462 // Only add this pass for 32-bit x86 Windows. 463 const Triple &TT = TM->getTargetTriple(); 464 if (TT.isOSWindows() && TT.getArch() == Triple::x86) 465 addPass(createX86WinEHStatePass()); 466 return true; 467 } 468 469 void X86PassConfig::addPreRegAlloc() { 470 if (getOptLevel() != CodeGenOpt::None) { 471 addPass(&LiveRangeShrinkID); 472 addPass(createX86FixupSetCC()); 473 addPass(createX86OptimizeLEAs()); 474 addPass(createX86CallFrameOptimization()); 475 addPass(createX86AvoidStoreForwardingBlocks()); 476 } 477 478 if (EnableSpeculativeLoadHardening) 479 addPass(createX86SpeculativeLoadHardeningPass()); 480 481 addPass(createX86FlagsCopyLoweringPass()); 482 addPass(createX86WinAllocaExpander()); 483 } 484 void X86PassConfig::addMachineSSAOptimization() { 485 addPass(createX86DomainReassignmentPass()); 486 TargetPassConfig::addMachineSSAOptimization(); 487 } 488 489 void X86PassConfig::addPostRegAlloc() { 490 addPass(createX86FloatingPointStackifierPass()); 491 } 492 493 void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); } 494 495 void X86PassConfig::addPreEmitPass() { 496 if (getOptLevel() != CodeGenOpt::None) { 497 addPass(new X86ExecutionDomainFix()); 498 addPass(createBreakFalseDeps()); 499 } 500 501 addPass(createShadowCallStackPass()); 502 addPass(createX86IndirectBranchTrackingPass()); 503 504 if (UseVZeroUpper) 505 addPass(createX86IssueVZeroUpperPass()); 506 507 if (getOptLevel() != CodeGenOpt::None) { 508 addPass(createX86FixupBWInsts()); 509 addPass(createX86PadShortFunctions()); 510 addPass(createX86FixupLEAs()); 511 addPass(createX86EvexToVexInsts()); 512 } 513 } 514 515 void X86PassConfig::addPreEmitPass2() { 516 addPass(createX86RetpolineThunksPass()); 517 // Verify basic block incoming and outgoing cfa offset and register values and 518 // correct CFA calculation rule where needed by inserting appropriate CFI 519 // instructions. 520 const Triple &TT = TM->getTargetTriple(); 521 if (!TT.isOSDarwin() && !TT.isOSWindows()) 522 addPass(createCFIInstrInserter()); 523 } 524