1 //===- X86InstructionSelector.cpp -----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// X86. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/X86BaseInfo.h" 15 #include "X86.h" 16 #include "X86InstrBuilder.h" 17 #include "X86InstrInfo.h" 18 #include "X86RegisterBankInfo.h" 19 #include "X86RegisterInfo.h" 20 #include "X86Subtarget.h" 21 #include "X86TargetMachine.h" 22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/Utils.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineConstantPool.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineMemOperand.h" 31 #include "llvm/CodeGen/MachineOperand.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/CodeGen/RegisterBank.h" 34 #include "llvm/CodeGen/TargetOpcodes.h" 35 #include "llvm/CodeGen/TargetRegisterInfo.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/InstrTypes.h" 38 #include "llvm/IR/IntrinsicsX86.h" 39 #include "llvm/Support/AtomicOrdering.h" 40 #include "llvm/Support/CodeGen.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/LowLevelTypeImpl.h" 44 #include "llvm/Support/MathExtras.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include <cassert> 47 #include <cstdint> 48 #include <tuple> 49 50 #define DEBUG_TYPE "X86-isel" 51 52 using namespace llvm; 53 54 namespace { 55 56 #define GET_GLOBALISEL_PREDICATE_BITSET 57 #include "X86GenGlobalISel.inc" 58 #undef GET_GLOBALISEL_PREDICATE_BITSET 59 60 class X86InstructionSelector : public InstructionSelector { 61 public: 62 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI, 63 const X86RegisterBankInfo &RBI); 64 65 bool select(MachineInstr &I) override; 66 static const char *getName() { return DEBUG_TYPE; } 67 68 private: 69 /// tblgen-erated 'select' implementation, used as the initial selector for 70 /// the patterns that don't require complex C++. 71 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; 72 73 // TODO: remove after supported by Tablegen-erated instruction selection. 74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc, 75 Align Alignment) const; 76 77 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI, 78 MachineFunction &MF) const; 79 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI, 80 MachineFunction &MF) const; 81 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI, 82 MachineFunction &MF) const; 83 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI, 84 MachineFunction &MF) const; 85 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI, 86 MachineFunction &MF) const; 87 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI, 88 MachineFunction &MF) const; 89 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI, 90 MachineFunction &MF) const; 91 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI, 92 MachineFunction &MF) const; 93 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI, 94 MachineFunction &MF) const; 95 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI, 96 MachineFunction &MF) const; 97 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; 98 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI, 99 MachineFunction &MF); 100 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI, 101 MachineFunction &MF); 102 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI, 103 MachineFunction &MF) const; 104 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI, 105 MachineFunction &MF) const; 106 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI, 107 MachineFunction &MF) const; 108 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI, 109 const unsigned DstReg, 110 const TargetRegisterClass *DstRC, 111 const unsigned SrcReg, 112 const TargetRegisterClass *SrcRC) const; 113 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI, 114 MachineFunction &MF) const; 115 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const; 116 bool selectDivRem(MachineInstr &I, MachineRegisterInfo &MRI, 117 MachineFunction &MF) const; 118 bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI, 119 MachineFunction &MF) const; 120 121 // emit insert subreg instruction and insert it before MachineInstr &I 122 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I, 123 MachineRegisterInfo &MRI, MachineFunction &MF) const; 124 // emit extract subreg instruction and insert it before MachineInstr &I 125 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I, 126 MachineRegisterInfo &MRI, MachineFunction &MF) const; 127 128 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const; 129 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg, 130 MachineRegisterInfo &MRI) const; 131 132 const X86TargetMachine &TM; 133 const X86Subtarget &STI; 134 const X86InstrInfo &TII; 135 const X86RegisterInfo &TRI; 136 const X86RegisterBankInfo &RBI; 137 138 #define GET_GLOBALISEL_PREDICATES_DECL 139 #include "X86GenGlobalISel.inc" 140 #undef GET_GLOBALISEL_PREDICATES_DECL 141 142 #define GET_GLOBALISEL_TEMPORARIES_DECL 143 #include "X86GenGlobalISel.inc" 144 #undef GET_GLOBALISEL_TEMPORARIES_DECL 145 }; 146 147 } // end anonymous namespace 148 149 #define GET_GLOBALISEL_IMPL 150 #include "X86GenGlobalISel.inc" 151 #undef GET_GLOBALISEL_IMPL 152 153 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM, 154 const X86Subtarget &STI, 155 const X86RegisterBankInfo &RBI) 156 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), 157 RBI(RBI), 158 #define GET_GLOBALISEL_PREDICATES_INIT 159 #include "X86GenGlobalISel.inc" 160 #undef GET_GLOBALISEL_PREDICATES_INIT 161 #define GET_GLOBALISEL_TEMPORARIES_INIT 162 #include "X86GenGlobalISel.inc" 163 #undef GET_GLOBALISEL_TEMPORARIES_INIT 164 { 165 } 166 167 // FIXME: This should be target-independent, inferred from the types declared 168 // for each class in the bank. 169 const TargetRegisterClass * 170 X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const { 171 if (RB.getID() == X86::GPRRegBankID) { 172 if (Ty.getSizeInBits() <= 8) 173 return &X86::GR8RegClass; 174 if (Ty.getSizeInBits() == 16) 175 return &X86::GR16RegClass; 176 if (Ty.getSizeInBits() == 32) 177 return &X86::GR32RegClass; 178 if (Ty.getSizeInBits() == 64) 179 return &X86::GR64RegClass; 180 } 181 if (RB.getID() == X86::VECRRegBankID) { 182 if (Ty.getSizeInBits() == 32) 183 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass; 184 if (Ty.getSizeInBits() == 64) 185 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass; 186 if (Ty.getSizeInBits() == 128) 187 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass; 188 if (Ty.getSizeInBits() == 256) 189 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass; 190 if (Ty.getSizeInBits() == 512) 191 return &X86::VR512RegClass; 192 } 193 194 llvm_unreachable("Unknown RegBank!"); 195 } 196 197 const TargetRegisterClass * 198 X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg, 199 MachineRegisterInfo &MRI) const { 200 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI); 201 return getRegClass(Ty, RegBank); 202 } 203 204 static unsigned getSubRegIndex(const TargetRegisterClass *RC) { 205 unsigned SubIdx = X86::NoSubRegister; 206 if (RC == &X86::GR32RegClass) { 207 SubIdx = X86::sub_32bit; 208 } else if (RC == &X86::GR16RegClass) { 209 SubIdx = X86::sub_16bit; 210 } else if (RC == &X86::GR8RegClass) { 211 SubIdx = X86::sub_8bit; 212 } 213 214 return SubIdx; 215 } 216 217 static const TargetRegisterClass *getRegClassFromGRPhysReg(Register Reg) { 218 assert(Reg.isPhysical()); 219 if (X86::GR64RegClass.contains(Reg)) 220 return &X86::GR64RegClass; 221 if (X86::GR32RegClass.contains(Reg)) 222 return &X86::GR32RegClass; 223 if (X86::GR16RegClass.contains(Reg)) 224 return &X86::GR16RegClass; 225 if (X86::GR8RegClass.contains(Reg)) 226 return &X86::GR8RegClass; 227 228 llvm_unreachable("Unknown RegClass for PhysReg!"); 229 } 230 231 // Set X86 Opcode and constrain DestReg. 232 bool X86InstructionSelector::selectCopy(MachineInstr &I, 233 MachineRegisterInfo &MRI) const { 234 Register DstReg = I.getOperand(0).getReg(); 235 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI); 236 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI); 237 238 Register SrcReg = I.getOperand(1).getReg(); 239 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); 240 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); 241 242 if (DstReg.isPhysical()) { 243 assert(I.isCopy() && "Generic operators do not allow physical registers"); 244 245 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID && 246 DstRegBank.getID() == X86::GPRRegBankID) { 247 248 const TargetRegisterClass *SrcRC = 249 getRegClass(MRI.getType(SrcReg), SrcRegBank); 250 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg); 251 252 if (SrcRC != DstRC) { 253 // This case can be generated by ABI lowering, performe anyext 254 Register ExtSrc = MRI.createVirtualRegister(DstRC); 255 BuildMI(*I.getParent(), I, I.getDebugLoc(), 256 TII.get(TargetOpcode::SUBREG_TO_REG)) 257 .addDef(ExtSrc) 258 .addImm(0) 259 .addReg(SrcReg) 260 .addImm(getSubRegIndex(SrcRC)); 261 262 I.getOperand(1).setReg(ExtSrc); 263 } 264 } 265 266 return true; 267 } 268 269 assert((!SrcReg.isPhysical() || I.isCopy()) && 270 "No phys reg on generic operators"); 271 assert((DstSize == SrcSize || 272 // Copies are a mean to setup initial types, the number of 273 // bits may not exactly match. 274 (SrcReg.isPhysical() && 275 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) && 276 "Copy with different width?!"); 277 278 const TargetRegisterClass *DstRC = 279 getRegClass(MRI.getType(DstReg), DstRegBank); 280 281 if (SrcRegBank.getID() == X86::GPRRegBankID && 282 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize && 283 SrcReg.isPhysical()) { 284 // Change the physical register to performe truncate. 285 286 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg); 287 288 if (DstRC != SrcRC) { 289 I.getOperand(1).setSubReg(getSubRegIndex(DstRC)); 290 I.getOperand(1).substPhysReg(SrcReg, TRI); 291 } 292 } 293 294 // No need to constrain SrcReg. It will get constrained when 295 // we hit another of its use or its defs. 296 // Copies do not have constraints. 297 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg); 298 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) { 299 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 300 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 301 << " operand\n"); 302 return false; 303 } 304 } 305 I.setDesc(TII.get(X86::COPY)); 306 return true; 307 } 308 309 bool X86InstructionSelector::select(MachineInstr &I) { 310 assert(I.getParent() && "Instruction should be in a basic block!"); 311 assert(I.getParent()->getParent() && "Instruction should be in a function!"); 312 313 MachineBasicBlock &MBB = *I.getParent(); 314 MachineFunction &MF = *MBB.getParent(); 315 MachineRegisterInfo &MRI = MF.getRegInfo(); 316 317 unsigned Opcode = I.getOpcode(); 318 if (!isPreISelGenericOpcode(Opcode)) { 319 // Certain non-generic instructions also need some special handling. 320 321 if (Opcode == TargetOpcode::LOAD_STACK_GUARD) 322 return false; 323 324 if (I.isCopy()) 325 return selectCopy(I, MRI); 326 327 return true; 328 } 329 330 assert(I.getNumOperands() == I.getNumExplicitOperands() && 331 "Generic instruction has unexpected implicit operands\n"); 332 333 if (selectImpl(I, *CoverageInfo)) 334 return true; 335 336 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs())); 337 338 // TODO: This should be implemented by tblgen. 339 switch (I.getOpcode()) { 340 default: 341 return false; 342 case TargetOpcode::G_STORE: 343 case TargetOpcode::G_LOAD: 344 return selectLoadStoreOp(I, MRI, MF); 345 case TargetOpcode::G_PTR_ADD: 346 case TargetOpcode::G_FRAME_INDEX: 347 return selectFrameIndexOrGep(I, MRI, MF); 348 case TargetOpcode::G_GLOBAL_VALUE: 349 return selectGlobalValue(I, MRI, MF); 350 case TargetOpcode::G_CONSTANT: 351 return selectConstant(I, MRI, MF); 352 case TargetOpcode::G_FCONSTANT: 353 return materializeFP(I, MRI, MF); 354 case TargetOpcode::G_PTRTOINT: 355 case TargetOpcode::G_TRUNC: 356 return selectTruncOrPtrToInt(I, MRI, MF); 357 case TargetOpcode::G_INTTOPTR: 358 return selectCopy(I, MRI); 359 case TargetOpcode::G_ZEXT: 360 return selectZext(I, MRI, MF); 361 case TargetOpcode::G_ANYEXT: 362 return selectAnyext(I, MRI, MF); 363 case TargetOpcode::G_ICMP: 364 return selectCmp(I, MRI, MF); 365 case TargetOpcode::G_FCMP: 366 return selectFCmp(I, MRI, MF); 367 case TargetOpcode::G_UADDE: 368 return selectUadde(I, MRI, MF); 369 case TargetOpcode::G_UNMERGE_VALUES: 370 return selectUnmergeValues(I, MRI, MF); 371 case TargetOpcode::G_MERGE_VALUES: 372 case TargetOpcode::G_CONCAT_VECTORS: 373 return selectMergeValues(I, MRI, MF); 374 case TargetOpcode::G_EXTRACT: 375 return selectExtract(I, MRI, MF); 376 case TargetOpcode::G_INSERT: 377 return selectInsert(I, MRI, MF); 378 case TargetOpcode::G_BRCOND: 379 return selectCondBranch(I, MRI, MF); 380 case TargetOpcode::G_IMPLICIT_DEF: 381 case TargetOpcode::G_PHI: 382 return selectImplicitDefOrPHI(I, MRI); 383 case TargetOpcode::G_SDIV: 384 case TargetOpcode::G_UDIV: 385 case TargetOpcode::G_SREM: 386 case TargetOpcode::G_UREM: 387 return selectDivRem(I, MRI, MF); 388 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 389 return selectIntrinsicWSideEffects(I, MRI, MF); 390 } 391 392 return false; 393 } 394 395 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty, 396 const RegisterBank &RB, 397 unsigned Opc, 398 Align Alignment) const { 399 bool Isload = (Opc == TargetOpcode::G_LOAD); 400 bool HasAVX = STI.hasAVX(); 401 bool HasAVX512 = STI.hasAVX512(); 402 bool HasVLX = STI.hasVLX(); 403 404 if (Ty == LLT::scalar(8)) { 405 if (X86::GPRRegBankID == RB.getID()) 406 return Isload ? X86::MOV8rm : X86::MOV8mr; 407 } else if (Ty == LLT::scalar(16)) { 408 if (X86::GPRRegBankID == RB.getID()) 409 return Isload ? X86::MOV16rm : X86::MOV16mr; 410 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) { 411 if (X86::GPRRegBankID == RB.getID()) 412 return Isload ? X86::MOV32rm : X86::MOV32mr; 413 if (X86::VECRRegBankID == RB.getID()) 414 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt : 415 HasAVX ? X86::VMOVSSrm_alt : 416 X86::MOVSSrm_alt) 417 : (HasAVX512 ? X86::VMOVSSZmr : 418 HasAVX ? X86::VMOVSSmr : 419 X86::MOVSSmr); 420 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) { 421 if (X86::GPRRegBankID == RB.getID()) 422 return Isload ? X86::MOV64rm : X86::MOV64mr; 423 if (X86::VECRRegBankID == RB.getID()) 424 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt : 425 HasAVX ? X86::VMOVSDrm_alt : 426 X86::MOVSDrm_alt) 427 : (HasAVX512 ? X86::VMOVSDZmr : 428 HasAVX ? X86::VMOVSDmr : 429 X86::MOVSDmr); 430 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) { 431 if (Alignment >= Align(16)) 432 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm 433 : HasAVX512 434 ? X86::VMOVAPSZ128rm_NOVLX 435 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm) 436 : (HasVLX ? X86::VMOVAPSZ128mr 437 : HasAVX512 438 ? X86::VMOVAPSZ128mr_NOVLX 439 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr); 440 else 441 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm 442 : HasAVX512 443 ? X86::VMOVUPSZ128rm_NOVLX 444 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm) 445 : (HasVLX ? X86::VMOVUPSZ128mr 446 : HasAVX512 447 ? X86::VMOVUPSZ128mr_NOVLX 448 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); 449 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) { 450 if (Alignment >= Align(32)) 451 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm 452 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX 453 : X86::VMOVAPSYrm) 454 : (HasVLX ? X86::VMOVAPSZ256mr 455 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX 456 : X86::VMOVAPSYmr); 457 else 458 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm 459 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX 460 : X86::VMOVUPSYrm) 461 : (HasVLX ? X86::VMOVUPSZ256mr 462 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX 463 : X86::VMOVUPSYmr); 464 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) { 465 if (Alignment >= Align(64)) 466 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; 467 else 468 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; 469 } 470 return Opc; 471 } 472 473 // Fill in an address from the given instruction. 474 static void X86SelectAddress(const MachineInstr &I, 475 const MachineRegisterInfo &MRI, 476 X86AddressMode &AM) { 477 assert(I.getOperand(0).isReg() && "unsupported opperand."); 478 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() && 479 "unsupported type."); 480 481 if (I.getOpcode() == TargetOpcode::G_PTR_ADD) { 482 if (auto COff = getIConstantVRegSExtVal(I.getOperand(2).getReg(), MRI)) { 483 int64_t Imm = *COff; 484 if (isInt<32>(Imm)) { // Check for displacement overflow. 485 AM.Disp = static_cast<int32_t>(Imm); 486 AM.Base.Reg = I.getOperand(1).getReg(); 487 return; 488 } 489 } 490 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) { 491 AM.Base.FrameIndex = I.getOperand(1).getIndex(); 492 AM.BaseType = X86AddressMode::FrameIndexBase; 493 return; 494 } 495 496 // Default behavior. 497 AM.Base.Reg = I.getOperand(0).getReg(); 498 } 499 500 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I, 501 MachineRegisterInfo &MRI, 502 MachineFunction &MF) const { 503 unsigned Opc = I.getOpcode(); 504 505 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) && 506 "unexpected instruction"); 507 508 const Register DefReg = I.getOperand(0).getReg(); 509 LLT Ty = MRI.getType(DefReg); 510 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); 511 512 assert(I.hasOneMemOperand()); 513 auto &MemOp = **I.memoperands_begin(); 514 if (MemOp.isAtomic()) { 515 // Note: for unordered operations, we rely on the fact the appropriate MMO 516 // is already on the instruction we're mutating, and thus we don't need to 517 // make any changes. So long as we select an opcode which is capable of 518 // loading or storing the appropriate size atomically, the rest of the 519 // backend is required to respect the MMO state. 520 if (!MemOp.isUnordered()) { 521 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n"); 522 return false; 523 } 524 if (MemOp.getAlign() < Ty.getSizeInBits() / 8) { 525 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n"); 526 return false; 527 } 528 } 529 530 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign()); 531 if (NewOpc == Opc) 532 return false; 533 534 X86AddressMode AM; 535 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM); 536 537 I.setDesc(TII.get(NewOpc)); 538 MachineInstrBuilder MIB(MF, I); 539 if (Opc == TargetOpcode::G_LOAD) { 540 I.RemoveOperand(1); 541 addFullAddress(MIB, AM); 542 } else { 543 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL) 544 I.RemoveOperand(1); 545 I.RemoveOperand(0); 546 addFullAddress(MIB, AM).addUse(DefReg); 547 } 548 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 549 } 550 551 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) { 552 if (Ty == LLT::pointer(0, 64)) 553 return X86::LEA64r; 554 else if (Ty == LLT::pointer(0, 32)) 555 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r; 556 else 557 llvm_unreachable("Can't get LEA opcode. Unsupported type."); 558 } 559 560 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I, 561 MachineRegisterInfo &MRI, 562 MachineFunction &MF) const { 563 unsigned Opc = I.getOpcode(); 564 565 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) && 566 "unexpected instruction"); 567 568 const Register DefReg = I.getOperand(0).getReg(); 569 LLT Ty = MRI.getType(DefReg); 570 571 // Use LEA to calculate frame index and GEP 572 unsigned NewOpc = getLeaOP(Ty, STI); 573 I.setDesc(TII.get(NewOpc)); 574 MachineInstrBuilder MIB(MF, I); 575 576 if (Opc == TargetOpcode::G_FRAME_INDEX) { 577 addOffset(MIB, 0); 578 } else { 579 MachineOperand &InxOp = I.getOperand(2); 580 I.addOperand(InxOp); // set IndexReg 581 InxOp.ChangeToImmediate(1); // set Scale 582 MIB.addImm(0).addReg(0); 583 } 584 585 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 586 } 587 588 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I, 589 MachineRegisterInfo &MRI, 590 MachineFunction &MF) const { 591 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) && 592 "unexpected instruction"); 593 594 auto GV = I.getOperand(1).getGlobal(); 595 if (GV->isThreadLocal()) { 596 return false; // TODO: we don't support TLS yet. 597 } 598 599 // Can't handle alternate code models yet. 600 if (TM.getCodeModel() != CodeModel::Small) 601 return false; 602 603 X86AddressMode AM; 604 AM.GV = GV; 605 AM.GVOpFlags = STI.classifyGlobalReference(GV); 606 607 // TODO: The ABI requires an extra load. not supported yet. 608 if (isGlobalStubReference(AM.GVOpFlags)) 609 return false; 610 611 // TODO: This reference is relative to the pic base. not supported yet. 612 if (isGlobalRelativeToPICBase(AM.GVOpFlags)) 613 return false; 614 615 if (STI.isPICStyleRIPRel()) { 616 // Use rip-relative addressing. 617 assert(AM.Base.Reg == 0 && AM.IndexReg == 0); 618 AM.Base.Reg = X86::RIP; 619 } 620 621 const Register DefReg = I.getOperand(0).getReg(); 622 LLT Ty = MRI.getType(DefReg); 623 unsigned NewOpc = getLeaOP(Ty, STI); 624 625 I.setDesc(TII.get(NewOpc)); 626 MachineInstrBuilder MIB(MF, I); 627 628 I.RemoveOperand(1); 629 addFullAddress(MIB, AM); 630 631 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 632 } 633 634 bool X86InstructionSelector::selectConstant(MachineInstr &I, 635 MachineRegisterInfo &MRI, 636 MachineFunction &MF) const { 637 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) && 638 "unexpected instruction"); 639 640 const Register DefReg = I.getOperand(0).getReg(); 641 LLT Ty = MRI.getType(DefReg); 642 643 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID) 644 return false; 645 646 uint64_t Val = 0; 647 if (I.getOperand(1).isCImm()) { 648 Val = I.getOperand(1).getCImm()->getZExtValue(); 649 I.getOperand(1).ChangeToImmediate(Val); 650 } else if (I.getOperand(1).isImm()) { 651 Val = I.getOperand(1).getImm(); 652 } else 653 llvm_unreachable("Unsupported operand type."); 654 655 unsigned NewOpc; 656 switch (Ty.getSizeInBits()) { 657 case 8: 658 NewOpc = X86::MOV8ri; 659 break; 660 case 16: 661 NewOpc = X86::MOV16ri; 662 break; 663 case 32: 664 NewOpc = X86::MOV32ri; 665 break; 666 case 64: 667 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used 668 if (isInt<32>(Val)) 669 NewOpc = X86::MOV64ri32; 670 else 671 NewOpc = X86::MOV64ri; 672 break; 673 default: 674 llvm_unreachable("Can't select G_CONSTANT, unsupported type."); 675 } 676 677 I.setDesc(TII.get(NewOpc)); 678 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 679 } 680 681 // Helper function for selectTruncOrPtrToInt and selectAnyext. 682 // Returns true if DstRC lives on a floating register class and 683 // SrcRC lives on a 128-bit vector class. 684 static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, 685 const TargetRegisterClass *SrcRC) { 686 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass || 687 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) && 688 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass); 689 } 690 691 bool X86InstructionSelector::selectTurnIntoCOPY( 692 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg, 693 const TargetRegisterClass *DstRC, const unsigned SrcReg, 694 const TargetRegisterClass *SrcRC) const { 695 696 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || 697 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 698 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 699 << " operand\n"); 700 return false; 701 } 702 I.setDesc(TII.get(X86::COPY)); 703 return true; 704 } 705 706 bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I, 707 MachineRegisterInfo &MRI, 708 MachineFunction &MF) const { 709 assert((I.getOpcode() == TargetOpcode::G_TRUNC || 710 I.getOpcode() == TargetOpcode::G_PTRTOINT) && 711 "unexpected instruction"); 712 713 const Register DstReg = I.getOperand(0).getReg(); 714 const Register SrcReg = I.getOperand(1).getReg(); 715 716 const LLT DstTy = MRI.getType(DstReg); 717 const LLT SrcTy = MRI.getType(SrcReg); 718 719 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); 720 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); 721 722 if (DstRB.getID() != SrcRB.getID()) { 723 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode()) 724 << " input/output on different banks\n"); 725 return false; 726 } 727 728 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB); 729 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB); 730 731 if (!DstRC || !SrcRC) 732 return false; 733 734 // If that's truncation of the value that lives on the vector class and goes 735 // into the floating class, just replace it with copy, as we are able to 736 // select it as a regular move. 737 if (canTurnIntoCOPY(DstRC, SrcRC)) 738 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC); 739 740 if (DstRB.getID() != X86::GPRRegBankID) 741 return false; 742 743 unsigned SubIdx; 744 if (DstRC == SrcRC) { 745 // Nothing to be done 746 SubIdx = X86::NoSubRegister; 747 } else if (DstRC == &X86::GR32RegClass) { 748 SubIdx = X86::sub_32bit; 749 } else if (DstRC == &X86::GR16RegClass) { 750 SubIdx = X86::sub_16bit; 751 } else if (DstRC == &X86::GR8RegClass) { 752 SubIdx = X86::sub_8bit; 753 } else { 754 return false; 755 } 756 757 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx); 758 759 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || 760 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 761 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 762 << "\n"); 763 return false; 764 } 765 766 I.getOperand(1).setSubReg(SubIdx); 767 768 I.setDesc(TII.get(X86::COPY)); 769 return true; 770 } 771 772 bool X86InstructionSelector::selectZext(MachineInstr &I, 773 MachineRegisterInfo &MRI, 774 MachineFunction &MF) const { 775 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction"); 776 777 const Register DstReg = I.getOperand(0).getReg(); 778 const Register SrcReg = I.getOperand(1).getReg(); 779 780 const LLT DstTy = MRI.getType(DstReg); 781 const LLT SrcTy = MRI.getType(SrcReg); 782 783 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(16)) && 784 "8=>16 Zext is handled by tablegen"); 785 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) && 786 "8=>32 Zext is handled by tablegen"); 787 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) && 788 "16=>32 Zext is handled by tablegen"); 789 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(64)) && 790 "8=>64 Zext is handled by tablegen"); 791 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(64)) && 792 "16=>64 Zext is handled by tablegen"); 793 assert(!(SrcTy == LLT::scalar(32) && DstTy == LLT::scalar(64)) && 794 "32=>64 Zext is handled by tablegen"); 795 796 if (SrcTy != LLT::scalar(1)) 797 return false; 798 799 unsigned AndOpc; 800 if (DstTy == LLT::scalar(8)) 801 AndOpc = X86::AND8ri; 802 else if (DstTy == LLT::scalar(16)) 803 AndOpc = X86::AND16ri8; 804 else if (DstTy == LLT::scalar(32)) 805 AndOpc = X86::AND32ri8; 806 else if (DstTy == LLT::scalar(64)) 807 AndOpc = X86::AND64ri8; 808 else 809 return false; 810 811 Register DefReg = SrcReg; 812 if (DstTy != LLT::scalar(8)) { 813 Register ImpDefReg = 814 MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI)); 815 BuildMI(*I.getParent(), I, I.getDebugLoc(), 816 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg); 817 818 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI)); 819 BuildMI(*I.getParent(), I, I.getDebugLoc(), 820 TII.get(TargetOpcode::INSERT_SUBREG), DefReg) 821 .addReg(ImpDefReg) 822 .addReg(SrcReg) 823 .addImm(X86::sub_8bit); 824 } 825 826 MachineInstr &AndInst = 827 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg) 828 .addReg(DefReg) 829 .addImm(1); 830 831 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI); 832 833 I.eraseFromParent(); 834 return true; 835 } 836 837 bool X86InstructionSelector::selectAnyext(MachineInstr &I, 838 MachineRegisterInfo &MRI, 839 MachineFunction &MF) const { 840 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction"); 841 842 const Register DstReg = I.getOperand(0).getReg(); 843 const Register SrcReg = I.getOperand(1).getReg(); 844 845 const LLT DstTy = MRI.getType(DstReg); 846 const LLT SrcTy = MRI.getType(SrcReg); 847 848 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); 849 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); 850 851 assert(DstRB.getID() == SrcRB.getID() && 852 "G_ANYEXT input/output on different banks\n"); 853 854 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() && 855 "G_ANYEXT incorrect operand size"); 856 857 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB); 858 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB); 859 860 // If that's ANY_EXT of the value that lives on the floating class and goes 861 // into the vector class, just replace it with copy, as we are able to select 862 // it as a regular move. 863 if (canTurnIntoCOPY(SrcRC, DstRC)) 864 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC); 865 866 if (DstRB.getID() != X86::GPRRegBankID) 867 return false; 868 869 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || 870 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 871 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 872 << " operand\n"); 873 return false; 874 } 875 876 if (SrcRC == DstRC) { 877 I.setDesc(TII.get(X86::COPY)); 878 return true; 879 } 880 881 BuildMI(*I.getParent(), I, I.getDebugLoc(), 882 TII.get(TargetOpcode::SUBREG_TO_REG)) 883 .addDef(DstReg) 884 .addImm(0) 885 .addReg(SrcReg) 886 .addImm(getSubRegIndex(SrcRC)); 887 888 I.eraseFromParent(); 889 return true; 890 } 891 892 bool X86InstructionSelector::selectCmp(MachineInstr &I, 893 MachineRegisterInfo &MRI, 894 MachineFunction &MF) const { 895 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction"); 896 897 X86::CondCode CC; 898 bool SwapArgs; 899 std::tie(CC, SwapArgs) = X86::getX86ConditionCode( 900 (CmpInst::Predicate)I.getOperand(1).getPredicate()); 901 902 Register LHS = I.getOperand(2).getReg(); 903 Register RHS = I.getOperand(3).getReg(); 904 905 if (SwapArgs) 906 std::swap(LHS, RHS); 907 908 unsigned OpCmp; 909 LLT Ty = MRI.getType(LHS); 910 911 switch (Ty.getSizeInBits()) { 912 default: 913 return false; 914 case 8: 915 OpCmp = X86::CMP8rr; 916 break; 917 case 16: 918 OpCmp = X86::CMP16rr; 919 break; 920 case 32: 921 OpCmp = X86::CMP32rr; 922 break; 923 case 64: 924 OpCmp = X86::CMP64rr; 925 break; 926 } 927 928 MachineInstr &CmpInst = 929 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp)) 930 .addReg(LHS) 931 .addReg(RHS); 932 933 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 934 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC); 935 936 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI); 937 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI); 938 939 I.eraseFromParent(); 940 return true; 941 } 942 943 bool X86InstructionSelector::selectFCmp(MachineInstr &I, 944 MachineRegisterInfo &MRI, 945 MachineFunction &MF) const { 946 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction"); 947 948 Register LhsReg = I.getOperand(2).getReg(); 949 Register RhsReg = I.getOperand(3).getReg(); 950 CmpInst::Predicate Predicate = 951 (CmpInst::Predicate)I.getOperand(1).getPredicate(); 952 953 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction. 954 static const uint16_t SETFOpcTable[2][3] = { 955 {X86::COND_E, X86::COND_NP, X86::AND8rr}, 956 {X86::COND_NE, X86::COND_P, X86::OR8rr}}; 957 const uint16_t *SETFOpc = nullptr; 958 switch (Predicate) { 959 default: 960 break; 961 case CmpInst::FCMP_OEQ: 962 SETFOpc = &SETFOpcTable[0][0]; 963 break; 964 case CmpInst::FCMP_UNE: 965 SETFOpc = &SETFOpcTable[1][0]; 966 break; 967 } 968 969 // Compute the opcode for the CMP instruction. 970 unsigned OpCmp; 971 LLT Ty = MRI.getType(LhsReg); 972 switch (Ty.getSizeInBits()) { 973 default: 974 return false; 975 case 32: 976 OpCmp = X86::UCOMISSrr; 977 break; 978 case 64: 979 OpCmp = X86::UCOMISDrr; 980 break; 981 } 982 983 Register ResultReg = I.getOperand(0).getReg(); 984 RBI.constrainGenericRegister( 985 ResultReg, 986 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI); 987 if (SETFOpc) { 988 MachineInstr &CmpInst = 989 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp)) 990 .addReg(LhsReg) 991 .addReg(RhsReg); 992 993 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass); 994 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass); 995 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 996 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]); 997 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 998 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]); 999 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 1000 TII.get(SETFOpc[2]), ResultReg) 1001 .addReg(FlagReg1) 1002 .addReg(FlagReg2); 1003 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI); 1004 constrainSelectedInstRegOperands(Set1, TII, TRI, RBI); 1005 constrainSelectedInstRegOperands(Set2, TII, TRI, RBI); 1006 constrainSelectedInstRegOperands(Set3, TII, TRI, RBI); 1007 1008 I.eraseFromParent(); 1009 return true; 1010 } 1011 1012 X86::CondCode CC; 1013 bool SwapArgs; 1014 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); 1015 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); 1016 1017 if (SwapArgs) 1018 std::swap(LhsReg, RhsReg); 1019 1020 // Emit a compare of LHS/RHS. 1021 MachineInstr &CmpInst = 1022 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp)) 1023 .addReg(LhsReg) 1024 .addReg(RhsReg); 1025 1026 MachineInstr &Set = 1027 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC); 1028 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI); 1029 constrainSelectedInstRegOperands(Set, TII, TRI, RBI); 1030 I.eraseFromParent(); 1031 return true; 1032 } 1033 1034 bool X86InstructionSelector::selectUadde(MachineInstr &I, 1035 MachineRegisterInfo &MRI, 1036 MachineFunction &MF) const { 1037 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction"); 1038 1039 const Register DstReg = I.getOperand(0).getReg(); 1040 const Register CarryOutReg = I.getOperand(1).getReg(); 1041 const Register Op0Reg = I.getOperand(2).getReg(); 1042 const Register Op1Reg = I.getOperand(3).getReg(); 1043 Register CarryInReg = I.getOperand(4).getReg(); 1044 1045 const LLT DstTy = MRI.getType(DstReg); 1046 1047 if (DstTy != LLT::scalar(32)) 1048 return false; 1049 1050 // find CarryIn def instruction. 1051 MachineInstr *Def = MRI.getVRegDef(CarryInReg); 1052 while (Def->getOpcode() == TargetOpcode::G_TRUNC) { 1053 CarryInReg = Def->getOperand(1).getReg(); 1054 Def = MRI.getVRegDef(CarryInReg); 1055 } 1056 1057 unsigned Opcode; 1058 if (Def->getOpcode() == TargetOpcode::G_UADDE) { 1059 // carry set by prev ADD. 1060 1061 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS) 1062 .addReg(CarryInReg); 1063 1064 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI)) 1065 return false; 1066 1067 Opcode = X86::ADC32rr; 1068 } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) { 1069 // carry is constant, support only 0. 1070 if (*val != 0) 1071 return false; 1072 1073 Opcode = X86::ADD32rr; 1074 } else 1075 return false; 1076 1077 MachineInstr &AddInst = 1078 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg) 1079 .addReg(Op0Reg) 1080 .addReg(Op1Reg); 1081 1082 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg) 1083 .addReg(X86::EFLAGS); 1084 1085 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) || 1086 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI)) 1087 return false; 1088 1089 I.eraseFromParent(); 1090 return true; 1091 } 1092 1093 bool X86InstructionSelector::selectExtract(MachineInstr &I, 1094 MachineRegisterInfo &MRI, 1095 MachineFunction &MF) const { 1096 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) && 1097 "unexpected instruction"); 1098 1099 const Register DstReg = I.getOperand(0).getReg(); 1100 const Register SrcReg = I.getOperand(1).getReg(); 1101 int64_t Index = I.getOperand(2).getImm(); 1102 1103 const LLT DstTy = MRI.getType(DstReg); 1104 const LLT SrcTy = MRI.getType(SrcReg); 1105 1106 // Meanwile handle vector type only. 1107 if (!DstTy.isVector()) 1108 return false; 1109 1110 if (Index % DstTy.getSizeInBits() != 0) 1111 return false; // Not extract subvector. 1112 1113 if (Index == 0) { 1114 // Replace by extract subreg copy. 1115 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF)) 1116 return false; 1117 1118 I.eraseFromParent(); 1119 return true; 1120 } 1121 1122 bool HasAVX = STI.hasAVX(); 1123 bool HasAVX512 = STI.hasAVX512(); 1124 bool HasVLX = STI.hasVLX(); 1125 1126 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) { 1127 if (HasVLX) 1128 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr)); 1129 else if (HasAVX) 1130 I.setDesc(TII.get(X86::VEXTRACTF128rr)); 1131 else 1132 return false; 1133 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) { 1134 if (DstTy.getSizeInBits() == 128) 1135 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr)); 1136 else if (DstTy.getSizeInBits() == 256) 1137 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr)); 1138 else 1139 return false; 1140 } else 1141 return false; 1142 1143 // Convert to X86 VEXTRACT immediate. 1144 Index = Index / DstTy.getSizeInBits(); 1145 I.getOperand(2).setImm(Index); 1146 1147 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 1148 } 1149 1150 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg, 1151 MachineInstr &I, 1152 MachineRegisterInfo &MRI, 1153 MachineFunction &MF) const { 1154 const LLT DstTy = MRI.getType(DstReg); 1155 const LLT SrcTy = MRI.getType(SrcReg); 1156 unsigned SubIdx = X86::NoSubRegister; 1157 1158 if (!DstTy.isVector() || !SrcTy.isVector()) 1159 return false; 1160 1161 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() && 1162 "Incorrect Src/Dst register size"); 1163 1164 if (DstTy.getSizeInBits() == 128) 1165 SubIdx = X86::sub_xmm; 1166 else if (DstTy.getSizeInBits() == 256) 1167 SubIdx = X86::sub_ymm; 1168 else 1169 return false; 1170 1171 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI); 1172 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI); 1173 1174 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx); 1175 1176 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || 1177 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 1178 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n"); 1179 return false; 1180 } 1181 1182 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg) 1183 .addReg(SrcReg, 0, SubIdx); 1184 1185 return true; 1186 } 1187 1188 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg, 1189 MachineInstr &I, 1190 MachineRegisterInfo &MRI, 1191 MachineFunction &MF) const { 1192 const LLT DstTy = MRI.getType(DstReg); 1193 const LLT SrcTy = MRI.getType(SrcReg); 1194 unsigned SubIdx = X86::NoSubRegister; 1195 1196 // TODO: support scalar types 1197 if (!DstTy.isVector() || !SrcTy.isVector()) 1198 return false; 1199 1200 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() && 1201 "Incorrect Src/Dst register size"); 1202 1203 if (SrcTy.getSizeInBits() == 128) 1204 SubIdx = X86::sub_xmm; 1205 else if (SrcTy.getSizeInBits() == 256) 1206 SubIdx = X86::sub_ymm; 1207 else 1208 return false; 1209 1210 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI); 1211 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI); 1212 1213 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) || 1214 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) { 1215 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n"); 1216 return false; 1217 } 1218 1219 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY)) 1220 .addReg(DstReg, RegState::DefineNoRead, SubIdx) 1221 .addReg(SrcReg); 1222 1223 return true; 1224 } 1225 1226 bool X86InstructionSelector::selectInsert(MachineInstr &I, 1227 MachineRegisterInfo &MRI, 1228 MachineFunction &MF) const { 1229 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction"); 1230 1231 const Register DstReg = I.getOperand(0).getReg(); 1232 const Register SrcReg = I.getOperand(1).getReg(); 1233 const Register InsertReg = I.getOperand(2).getReg(); 1234 int64_t Index = I.getOperand(3).getImm(); 1235 1236 const LLT DstTy = MRI.getType(DstReg); 1237 const LLT InsertRegTy = MRI.getType(InsertReg); 1238 1239 // Meanwile handle vector type only. 1240 if (!DstTy.isVector()) 1241 return false; 1242 1243 if (Index % InsertRegTy.getSizeInBits() != 0) 1244 return false; // Not insert subvector. 1245 1246 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) { 1247 // Replace by subreg copy. 1248 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF)) 1249 return false; 1250 1251 I.eraseFromParent(); 1252 return true; 1253 } 1254 1255 bool HasAVX = STI.hasAVX(); 1256 bool HasAVX512 = STI.hasAVX512(); 1257 bool HasVLX = STI.hasVLX(); 1258 1259 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) { 1260 if (HasVLX) 1261 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr)); 1262 else if (HasAVX) 1263 I.setDesc(TII.get(X86::VINSERTF128rr)); 1264 else 1265 return false; 1266 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) { 1267 if (InsertRegTy.getSizeInBits() == 128) 1268 I.setDesc(TII.get(X86::VINSERTF32x4Zrr)); 1269 else if (InsertRegTy.getSizeInBits() == 256) 1270 I.setDesc(TII.get(X86::VINSERTF64x4Zrr)); 1271 else 1272 return false; 1273 } else 1274 return false; 1275 1276 // Convert to X86 VINSERT immediate. 1277 Index = Index / InsertRegTy.getSizeInBits(); 1278 1279 I.getOperand(3).setImm(Index); 1280 1281 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 1282 } 1283 1284 bool X86InstructionSelector::selectUnmergeValues( 1285 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) { 1286 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) && 1287 "unexpected instruction"); 1288 1289 // Split to extracts. 1290 unsigned NumDefs = I.getNumOperands() - 1; 1291 Register SrcReg = I.getOperand(NumDefs).getReg(); 1292 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); 1293 1294 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) { 1295 MachineInstr &ExtrInst = 1296 *BuildMI(*I.getParent(), I, I.getDebugLoc(), 1297 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg()) 1298 .addReg(SrcReg) 1299 .addImm(Idx * DefSize); 1300 1301 if (!select(ExtrInst)) 1302 return false; 1303 } 1304 1305 I.eraseFromParent(); 1306 return true; 1307 } 1308 1309 bool X86InstructionSelector::selectMergeValues( 1310 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) { 1311 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES || 1312 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) && 1313 "unexpected instruction"); 1314 1315 // Split to inserts. 1316 Register DstReg = I.getOperand(0).getReg(); 1317 Register SrcReg0 = I.getOperand(1).getReg(); 1318 1319 const LLT DstTy = MRI.getType(DstReg); 1320 const LLT SrcTy = MRI.getType(SrcReg0); 1321 unsigned SrcSize = SrcTy.getSizeInBits(); 1322 1323 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); 1324 1325 // For the first src use insertSubReg. 1326 Register DefReg = MRI.createGenericVirtualRegister(DstTy); 1327 MRI.setRegBank(DefReg, RegBank); 1328 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF)) 1329 return false; 1330 1331 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) { 1332 Register Tmp = MRI.createGenericVirtualRegister(DstTy); 1333 MRI.setRegBank(Tmp, RegBank); 1334 1335 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 1336 TII.get(TargetOpcode::G_INSERT), Tmp) 1337 .addReg(DefReg) 1338 .addReg(I.getOperand(Idx).getReg()) 1339 .addImm((Idx - 1) * SrcSize); 1340 1341 DefReg = Tmp; 1342 1343 if (!select(InsertInst)) 1344 return false; 1345 } 1346 1347 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(), 1348 TII.get(TargetOpcode::COPY), DstReg) 1349 .addReg(DefReg); 1350 1351 if (!select(CopyInst)) 1352 return false; 1353 1354 I.eraseFromParent(); 1355 return true; 1356 } 1357 1358 bool X86InstructionSelector::selectCondBranch(MachineInstr &I, 1359 MachineRegisterInfo &MRI, 1360 MachineFunction &MF) const { 1361 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction"); 1362 1363 const Register CondReg = I.getOperand(0).getReg(); 1364 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); 1365 1366 MachineInstr &TestInst = 1367 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri)) 1368 .addReg(CondReg) 1369 .addImm(1); 1370 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1)) 1371 .addMBB(DestMBB).addImm(X86::COND_NE); 1372 1373 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI); 1374 1375 I.eraseFromParent(); 1376 return true; 1377 } 1378 1379 bool X86InstructionSelector::materializeFP(MachineInstr &I, 1380 MachineRegisterInfo &MRI, 1381 MachineFunction &MF) const { 1382 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) && 1383 "unexpected instruction"); 1384 1385 // Can't handle alternate code models yet. 1386 CodeModel::Model CM = TM.getCodeModel(); 1387 if (CM != CodeModel::Small && CM != CodeModel::Large) 1388 return false; 1389 1390 const Register DstReg = I.getOperand(0).getReg(); 1391 const LLT DstTy = MRI.getType(DstReg); 1392 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); 1393 Align Alignment = Align(DstTy.getSizeInBytes()); 1394 const DebugLoc &DbgLoc = I.getDebugLoc(); 1395 1396 unsigned Opc = 1397 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment); 1398 1399 // Create the load from the constant pool. 1400 const ConstantFP *CFP = I.getOperand(1).getFPImm(); 1401 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment); 1402 MachineInstr *LoadInst = nullptr; 1403 unsigned char OpFlag = STI.classifyLocalReference(nullptr); 1404 1405 if (CM == CodeModel::Large && STI.is64Bit()) { 1406 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 1407 // they cannot be folded into immediate fields. 1408 1409 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass); 1410 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg) 1411 .addConstantPoolIndex(CPI, 0, OpFlag); 1412 1413 MachineMemOperand *MMO = MF.getMachineMemOperand( 1414 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, 1415 MF.getDataLayout().getPointerSize(), Alignment); 1416 1417 LoadInst = 1418 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), 1419 AddrReg) 1420 .addMemOperand(MMO); 1421 1422 } else if (CM == CodeModel::Small || !STI.is64Bit()) { 1423 // Handle the case when globals fit in our immediate field. 1424 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode. 1425 1426 // x86-32 PIC requires a PIC base register for constant pools. 1427 unsigned PICBase = 0; 1428 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) { 1429 // PICBase can be allocated by TII.getGlobalBaseReg(&MF). 1430 // In DAGISEL the code that initialize it generated by the CGBR pass. 1431 return false; // TODO support the mode. 1432 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small) 1433 PICBase = X86::RIP; 1434 1435 LoadInst = addConstantPoolReference( 1436 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase, 1437 OpFlag); 1438 } else 1439 return false; 1440 1441 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI); 1442 I.eraseFromParent(); 1443 return true; 1444 } 1445 1446 bool X86InstructionSelector::selectImplicitDefOrPHI( 1447 MachineInstr &I, MachineRegisterInfo &MRI) const { 1448 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF || 1449 I.getOpcode() == TargetOpcode::G_PHI) && 1450 "unexpected instruction"); 1451 1452 Register DstReg = I.getOperand(0).getReg(); 1453 1454 if (!MRI.getRegClassOrNull(DstReg)) { 1455 const LLT DstTy = MRI.getType(DstReg); 1456 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI); 1457 1458 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) { 1459 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 1460 << " operand\n"); 1461 return false; 1462 } 1463 } 1464 1465 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF) 1466 I.setDesc(TII.get(X86::IMPLICIT_DEF)); 1467 else 1468 I.setDesc(TII.get(X86::PHI)); 1469 1470 return true; 1471 } 1472 1473 bool X86InstructionSelector::selectDivRem(MachineInstr &I, 1474 MachineRegisterInfo &MRI, 1475 MachineFunction &MF) const { 1476 // The implementation of this function is taken from X86FastISel. 1477 assert((I.getOpcode() == TargetOpcode::G_SDIV || 1478 I.getOpcode() == TargetOpcode::G_SREM || 1479 I.getOpcode() == TargetOpcode::G_UDIV || 1480 I.getOpcode() == TargetOpcode::G_UREM) && 1481 "unexpected instruction"); 1482 1483 const Register DstReg = I.getOperand(0).getReg(); 1484 const Register Op1Reg = I.getOperand(1).getReg(); 1485 const Register Op2Reg = I.getOperand(2).getReg(); 1486 1487 const LLT RegTy = MRI.getType(DstReg); 1488 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) && 1489 "Arguments and return value types must match"); 1490 1491 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI); 1492 if (!RegRB || RegRB->getID() != X86::GPRRegBankID) 1493 return false; 1494 1495 const static unsigned NumTypes = 4; // i8, i16, i32, i64 1496 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem 1497 const static bool S = true; // IsSigned 1498 const static bool U = false; // !IsSigned 1499 const static unsigned Copy = TargetOpcode::COPY; 1500 // For the X86 IDIV instruction, in most cases the dividend 1501 // (numerator) must be in a specific register pair highreg:lowreg, 1502 // producing the quotient in lowreg and the remainder in highreg. 1503 // For most data types, to set up the instruction, the dividend is 1504 // copied into lowreg, and lowreg is sign-extended into highreg. The 1505 // exception is i8, where the dividend is defined as a single register rather 1506 // than a register pair, and we therefore directly sign-extend the dividend 1507 // into lowreg, instead of copying, and ignore the highreg. 1508 const static struct DivRemEntry { 1509 // The following portion depends only on the data type. 1510 unsigned SizeInBits; 1511 unsigned LowInReg; // low part of the register pair 1512 unsigned HighInReg; // high part of the register pair 1513 // The following portion depends on both the data type and the operation. 1514 struct DivRemResult { 1515 unsigned OpDivRem; // The specific DIV/IDIV opcode to use. 1516 unsigned OpSignExtend; // Opcode for sign-extending lowreg into 1517 // highreg, or copying a zero into highreg. 1518 unsigned OpCopy; // Opcode for copying dividend into lowreg, or 1519 // zero/sign-extending into lowreg for i8. 1520 unsigned DivRemResultReg; // Register containing the desired result. 1521 bool IsOpSigned; // Whether to use signed or unsigned form. 1522 } ResultTable[NumOps]; 1523 } OpTable[NumTypes] = { 1524 {8, 1525 X86::AX, 1526 0, 1527 { 1528 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S}, // SDiv 1529 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S}, // SRem 1530 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U}, // UDiv 1531 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U}, // URem 1532 }}, // i8 1533 {16, 1534 X86::AX, 1535 X86::DX, 1536 { 1537 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S}, // SDiv 1538 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S}, // SRem 1539 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U}, // UDiv 1540 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U}, // URem 1541 }}, // i16 1542 {32, 1543 X86::EAX, 1544 X86::EDX, 1545 { 1546 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S}, // SDiv 1547 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S}, // SRem 1548 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U}, // UDiv 1549 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U}, // URem 1550 }}, // i32 1551 {64, 1552 X86::RAX, 1553 X86::RDX, 1554 { 1555 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S}, // SDiv 1556 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S}, // SRem 1557 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U}, // UDiv 1558 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U}, // URem 1559 }}, // i64 1560 }; 1561 1562 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const DivRemEntry &El) { 1563 return El.SizeInBits == RegTy.getSizeInBits(); 1564 }); 1565 if (OpEntryIt == std::end(OpTable)) 1566 return false; 1567 1568 unsigned OpIndex; 1569 switch (I.getOpcode()) { 1570 default: 1571 llvm_unreachable("Unexpected div/rem opcode"); 1572 case TargetOpcode::G_SDIV: 1573 OpIndex = 0; 1574 break; 1575 case TargetOpcode::G_SREM: 1576 OpIndex = 1; 1577 break; 1578 case TargetOpcode::G_UDIV: 1579 OpIndex = 2; 1580 break; 1581 case TargetOpcode::G_UREM: 1582 OpIndex = 3; 1583 break; 1584 } 1585 1586 const DivRemEntry &TypeEntry = *OpEntryIt; 1587 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex]; 1588 1589 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB); 1590 if (!RBI.constrainGenericRegister(Op1Reg, *RegRC, MRI) || 1591 !RBI.constrainGenericRegister(Op2Reg, *RegRC, MRI) || 1592 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) { 1593 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode()) 1594 << " operand\n"); 1595 return false; 1596 } 1597 1598 // Move op1 into low-order input register. 1599 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy), 1600 TypeEntry.LowInReg) 1601 .addReg(Op1Reg); 1602 // Zero-extend or sign-extend into high-order input register. 1603 if (OpEntry.OpSignExtend) { 1604 if (OpEntry.IsOpSigned) 1605 BuildMI(*I.getParent(), I, I.getDebugLoc(), 1606 TII.get(OpEntry.OpSignExtend)); 1607 else { 1608 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass); 1609 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0), 1610 Zero32); 1611 1612 // Copy the zero into the appropriate sub/super/identical physical 1613 // register. Unfortunately the operations needed are not uniform enough 1614 // to fit neatly into the table above. 1615 if (RegTy.getSizeInBits() == 16) { 1616 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), 1617 TypeEntry.HighInReg) 1618 .addReg(Zero32, 0, X86::sub_16bit); 1619 } else if (RegTy.getSizeInBits() == 32) { 1620 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), 1621 TypeEntry.HighInReg) 1622 .addReg(Zero32); 1623 } else if (RegTy.getSizeInBits() == 64) { 1624 BuildMI(*I.getParent(), I, I.getDebugLoc(), 1625 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg) 1626 .addImm(0) 1627 .addReg(Zero32) 1628 .addImm(X86::sub_32bit); 1629 } 1630 } 1631 } 1632 // Generate the DIV/IDIV instruction. 1633 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpDivRem)) 1634 .addReg(Op2Reg); 1635 // For i8 remainder, we can't reference ah directly, as we'll end 1636 // up with bogus copies like %r9b = COPY %ah. Reference ax 1637 // instead to prevent ah references in a rex instruction. 1638 // 1639 // The current assumption of the fast register allocator is that isel 1640 // won't generate explicit references to the GR8_NOREX registers. If 1641 // the allocator and/or the backend get enhanced to be more robust in 1642 // that regard, this can be, and should be, removed. 1643 if ((I.getOpcode() == Instruction::SRem || 1644 I.getOpcode() == Instruction::URem) && 1645 OpEntry.DivRemResultReg == X86::AH && STI.is64Bit()) { 1646 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); 1647 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass); 1648 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg) 1649 .addReg(X86::AX); 1650 1651 // Shift AX right by 8 bits instead of using AH. 1652 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri), 1653 ResultSuperReg) 1654 .addReg(SourceSuperReg) 1655 .addImm(8); 1656 1657 // Now reference the 8-bit subreg of the result. 1658 BuildMI(*I.getParent(), I, I.getDebugLoc(), 1659 TII.get(TargetOpcode::SUBREG_TO_REG)) 1660 .addDef(DstReg) 1661 .addImm(0) 1662 .addReg(ResultSuperReg) 1663 .addImm(X86::sub_8bit); 1664 } else { 1665 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), 1666 DstReg) 1667 .addReg(OpEntry.DivRemResultReg); 1668 } 1669 I.eraseFromParent(); 1670 return true; 1671 } 1672 1673 bool X86InstructionSelector::selectIntrinsicWSideEffects( 1674 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const { 1675 1676 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS && 1677 "unexpected instruction"); 1678 1679 if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap) 1680 return false; 1681 1682 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP)); 1683 1684 I.eraseFromParent(); 1685 return true; 1686 } 1687 1688 InstructionSelector * 1689 llvm::createX86InstructionSelector(const X86TargetMachine &TM, 1690 X86Subtarget &Subtarget, 1691 X86RegisterBankInfo &RBI) { 1692 return new X86InstructionSelector(TM, Subtarget, RBI); 1693 } 1694