1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains code to lower X86 MachineInstrs to their corresponding 11 // MCInst records. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "X86AsmPrinter.h" 16 #include "X86RegisterInfo.h" 17 #include "X86ShuffleDecodeConstantPool.h" 18 #include "InstPrinter/X86ATTInstPrinter.h" 19 #include "InstPrinter/X86InstComments.h" 20 #include "MCTargetDesc/X86BaseInfo.h" 21 #include "Utils/X86ShuffleDecode.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/SmallString.h" 24 #include "llvm/ADT/iterator_range.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineConstantPool.h" 27 #include "llvm/CodeGen/MachineOperand.h" 28 #include "llvm/CodeGen/MachineModuleInfoImpls.h" 29 #include "llvm/CodeGen/StackMaps.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/GlobalValue.h" 32 #include "llvm/IR/Mangler.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/MC/MCCodeEmitter.h" 35 #include "llvm/MC/MCContext.h" 36 #include "llvm/MC/MCExpr.h" 37 #include "llvm/MC/MCFixup.h" 38 #include "llvm/MC/MCInst.h" 39 #include "llvm/MC/MCInstBuilder.h" 40 #include "llvm/MC/MCSection.h" 41 #include "llvm/MC/MCStreamer.h" 42 #include "llvm/MC/MCSymbol.h" 43 #include "llvm/MC/MCSymbolELF.h" 44 #include "llvm/MC/MCSectionELF.h" 45 #include "llvm/MC/MCSectionMachO.h" 46 #include "llvm/Support/TargetRegistry.h" 47 #include "llvm/Support/ELF.h" 48 #include "llvm/Target/TargetLoweringObjectFile.h" 49 50 using namespace llvm; 51 52 namespace { 53 54 /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. 55 class X86MCInstLower { 56 MCContext &Ctx; 57 const MachineFunction &MF; 58 const TargetMachine &TM; 59 const MCAsmInfo &MAI; 60 X86AsmPrinter &AsmPrinter; 61 public: 62 X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); 63 64 Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, 65 const MachineOperand &MO) const; 66 void Lower(const MachineInstr *MI, MCInst &OutMI) const; 67 68 MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; 69 MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; 70 71 private: 72 MachineModuleInfoMachO &getMachOMMI() const; 73 }; 74 75 } // end anonymous namespace 76 77 // Emit a minimal sequence of nops spanning NumBytes bytes. 78 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 79 const MCSubtargetInfo &STI); 80 81 void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, 82 const MCSubtargetInfo &STI, 83 MCCodeEmitter *CodeEmitter) { 84 if (InShadow) { 85 SmallString<256> Code; 86 SmallVector<MCFixup, 4> Fixups; 87 raw_svector_ostream VecOS(Code); 88 CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); 89 CurrentShadowSize += Code.size(); 90 if (CurrentShadowSize >= RequiredShadowSize) 91 InShadow = false; // The shadow is big enough. Stop counting. 92 } 93 } 94 95 void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( 96 MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { 97 if (InShadow && CurrentShadowSize < RequiredShadowSize) { 98 InShadow = false; 99 EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, 100 MF->getSubtarget<X86Subtarget>().is64Bit(), STI); 101 } 102 } 103 104 void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { 105 OutStreamer->EmitInstruction(Inst, getSubtargetInfo()); 106 SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); 107 } 108 109 X86MCInstLower::X86MCInstLower(const MachineFunction &mf, 110 X86AsmPrinter &asmprinter) 111 : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), 112 AsmPrinter(asmprinter) {} 113 114 MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { 115 return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); 116 } 117 118 119 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol 120 /// operand to an MCSymbol. 121 MCSymbol *X86MCInstLower:: 122 GetSymbolFromOperand(const MachineOperand &MO) const { 123 const DataLayout &DL = MF.getDataLayout(); 124 assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference"); 125 126 MCSymbol *Sym = nullptr; 127 SmallString<128> Name; 128 StringRef Suffix; 129 130 switch (MO.getTargetFlags()) { 131 case X86II::MO_DLLIMPORT: 132 // Handle dllimport linkage. 133 Name += "__imp_"; 134 break; 135 case X86II::MO_DARWIN_NONLAZY: 136 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 137 Suffix = "$non_lazy_ptr"; 138 break; 139 } 140 141 if (!Suffix.empty()) 142 Name += DL.getPrivateGlobalPrefix(); 143 144 if (MO.isGlobal()) { 145 const GlobalValue *GV = MO.getGlobal(); 146 AsmPrinter.getNameWithPrefix(Name, GV); 147 } else if (MO.isSymbol()) { 148 Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); 149 } else if (MO.isMBB()) { 150 assert(Suffix.empty()); 151 Sym = MO.getMBB()->getSymbol(); 152 } 153 154 Name += Suffix; 155 if (!Sym) 156 Sym = Ctx.getOrCreateSymbol(Name); 157 158 // If the target flags on the operand changes the name of the symbol, do that 159 // before we return the symbol. 160 switch (MO.getTargetFlags()) { 161 default: break; 162 case X86II::MO_DARWIN_NONLAZY: 163 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { 164 MachineModuleInfoImpl::StubValueTy &StubSym = 165 getMachOMMI().getGVStubEntry(Sym); 166 if (!StubSym.getPointer()) { 167 assert(MO.isGlobal() && "Extern symbol not handled yet"); 168 StubSym = 169 MachineModuleInfoImpl:: 170 StubValueTy(AsmPrinter.getSymbol(MO.getGlobal()), 171 !MO.getGlobal()->hasInternalLinkage()); 172 } 173 break; 174 } 175 } 176 177 return Sym; 178 } 179 180 MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, 181 MCSymbol *Sym) const { 182 // FIXME: We would like an efficient form for this, so we don't have to do a 183 // lot of extra uniquing. 184 const MCExpr *Expr = nullptr; 185 MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; 186 187 switch (MO.getTargetFlags()) { 188 default: llvm_unreachable("Unknown target flag on GV operand"); 189 case X86II::MO_NO_FLAG: // No flag. 190 // These affect the name of the symbol, not any suffix. 191 case X86II::MO_DARWIN_NONLAZY: 192 case X86II::MO_DLLIMPORT: 193 break; 194 195 case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break; 196 case X86II::MO_TLVP_PIC_BASE: 197 Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); 198 // Subtract the pic base. 199 Expr = MCBinaryExpr::createSub(Expr, 200 MCSymbolRefExpr::create(MF.getPICBaseSymbol(), 201 Ctx), 202 Ctx); 203 break; 204 case X86II::MO_SECREL: RefKind = MCSymbolRefExpr::VK_SECREL; break; 205 case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break; 206 case X86II::MO_TLSLD: RefKind = MCSymbolRefExpr::VK_TLSLD; break; 207 case X86II::MO_TLSLDM: RefKind = MCSymbolRefExpr::VK_TLSLDM; break; 208 case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break; 209 case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break; 210 case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break; 211 case X86II::MO_DTPOFF: RefKind = MCSymbolRefExpr::VK_DTPOFF; break; 212 case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break; 213 case X86II::MO_GOTNTPOFF: RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; break; 214 case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; 215 case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; 216 case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break; 217 case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break; 218 case X86II::MO_PIC_BASE_OFFSET: 219 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 220 Expr = MCSymbolRefExpr::create(Sym, Ctx); 221 // Subtract the pic base. 222 Expr = MCBinaryExpr::createSub(Expr, 223 MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), 224 Ctx); 225 if (MO.isJTI()) { 226 assert(MAI.doesSetDirectiveSuppressReloc()); 227 // If .set directive is supported, use it to reduce the number of 228 // relocations the assembler will generate for differences between 229 // local labels. This is only safe when the symbols are in the same 230 // section so we are restricting it to jumptable references. 231 MCSymbol *Label = Ctx.createTempSymbol(); 232 AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); 233 Expr = MCSymbolRefExpr::create(Label, Ctx); 234 } 235 break; 236 } 237 238 if (!Expr) 239 Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); 240 241 if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) 242 Expr = MCBinaryExpr::createAdd(Expr, 243 MCConstantExpr::create(MO.getOffset(), Ctx), 244 Ctx); 245 return MCOperand::createExpr(Expr); 246 } 247 248 249 /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with 250 /// a short fixed-register form. 251 static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { 252 unsigned ImmOp = Inst.getNumOperands() - 1; 253 assert(Inst.getOperand(0).isReg() && 254 (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && 255 ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && 256 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || 257 Inst.getNumOperands() == 2) && "Unexpected instruction!"); 258 259 // Check whether the destination register can be fixed. 260 unsigned Reg = Inst.getOperand(0).getReg(); 261 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 262 return; 263 264 // If so, rewrite the instruction. 265 MCOperand Saved = Inst.getOperand(ImmOp); 266 Inst = MCInst(); 267 Inst.setOpcode(Opcode); 268 Inst.addOperand(Saved); 269 } 270 271 /// \brief If a movsx instruction has a shorter encoding for the used register 272 /// simplify the instruction to use it instead. 273 static void SimplifyMOVSX(MCInst &Inst) { 274 unsigned NewOpcode = 0; 275 unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); 276 switch (Inst.getOpcode()) { 277 default: 278 llvm_unreachable("Unexpected instruction!"); 279 case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw 280 if (Op0 == X86::AX && Op1 == X86::AL) 281 NewOpcode = X86::CBW; 282 break; 283 case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl 284 if (Op0 == X86::EAX && Op1 == X86::AX) 285 NewOpcode = X86::CWDE; 286 break; 287 case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq 288 if (Op0 == X86::RAX && Op1 == X86::EAX) 289 NewOpcode = X86::CDQE; 290 break; 291 } 292 293 if (NewOpcode != 0) { 294 Inst = MCInst(); 295 Inst.setOpcode(NewOpcode); 296 } 297 } 298 299 /// \brief Simplify things like MOV32rm to MOV32o32a. 300 static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, 301 unsigned Opcode) { 302 // Don't make these simplifications in 64-bit mode; other assemblers don't 303 // perform them because they make the code larger. 304 if (Printer.getSubtarget().is64Bit()) 305 return; 306 307 bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); 308 unsigned AddrBase = IsStore; 309 unsigned RegOp = IsStore ? 0 : 5; 310 unsigned AddrOp = AddrBase + 3; 311 assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && 312 Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && 313 Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && 314 Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && 315 Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && 316 (Inst.getOperand(AddrOp).isExpr() || 317 Inst.getOperand(AddrOp).isImm()) && 318 "Unexpected instruction!"); 319 320 // Check whether the destination register can be fixed. 321 unsigned Reg = Inst.getOperand(RegOp).getReg(); 322 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 323 return; 324 325 // Check whether this is an absolute address. 326 // FIXME: We know TLVP symbol refs aren't, but there should be a better way 327 // to do this here. 328 bool Absolute = true; 329 if (Inst.getOperand(AddrOp).isExpr()) { 330 const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); 331 if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) 332 if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) 333 Absolute = false; 334 } 335 336 if (Absolute && 337 (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || 338 Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || 339 Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) 340 return; 341 342 // If so, rewrite the instruction. 343 MCOperand Saved = Inst.getOperand(AddrOp); 344 MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); 345 Inst = MCInst(); 346 Inst.setOpcode(Opcode); 347 Inst.addOperand(Saved); 348 Inst.addOperand(Seg); 349 } 350 351 static unsigned getRetOpcode(const X86Subtarget &Subtarget) { 352 return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; 353 } 354 355 Optional<MCOperand> 356 X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, 357 const MachineOperand &MO) const { 358 switch (MO.getType()) { 359 default: 360 MI->dump(); 361 llvm_unreachable("unknown operand type"); 362 case MachineOperand::MO_Register: 363 // Ignore all implicit register operands. 364 if (MO.isImplicit()) 365 return None; 366 return MCOperand::createReg(MO.getReg()); 367 case MachineOperand::MO_Immediate: 368 return MCOperand::createImm(MO.getImm()); 369 case MachineOperand::MO_MachineBasicBlock: 370 case MachineOperand::MO_GlobalAddress: 371 case MachineOperand::MO_ExternalSymbol: 372 return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); 373 case MachineOperand::MO_MCSymbol: 374 return LowerSymbolOperand(MO, MO.getMCSymbol()); 375 case MachineOperand::MO_JumpTableIndex: 376 return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); 377 case MachineOperand::MO_ConstantPoolIndex: 378 return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); 379 case MachineOperand::MO_BlockAddress: 380 return LowerSymbolOperand( 381 MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); 382 case MachineOperand::MO_RegisterMask: 383 // Ignore call clobbers. 384 return None; 385 } 386 } 387 388 void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { 389 OutMI.setOpcode(MI->getOpcode()); 390 391 for (const MachineOperand &MO : MI->operands()) 392 if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) 393 OutMI.addOperand(MaybeMCOp.getValue()); 394 395 // Handle a few special cases to eliminate operand modifiers. 396 ReSimplify: 397 switch (OutMI.getOpcode()) { 398 case X86::LEA64_32r: 399 case X86::LEA64r: 400 case X86::LEA16r: 401 case X86::LEA32r: 402 // LEA should have a segment register, but it must be empty. 403 assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands && 404 "Unexpected # of LEA operands"); 405 assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 && 406 "LEA has segment specified!"); 407 break; 408 409 // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B 410 // if one of the registers is extended, but other isn't. 411 case X86::VMOVZPQILo2PQIrr: 412 case X86::VMOVAPDrr: 413 case X86::VMOVAPDYrr: 414 case X86::VMOVAPSrr: 415 case X86::VMOVAPSYrr: 416 case X86::VMOVDQArr: 417 case X86::VMOVDQAYrr: 418 case X86::VMOVDQUrr: 419 case X86::VMOVDQUYrr: 420 case X86::VMOVUPDrr: 421 case X86::VMOVUPDYrr: 422 case X86::VMOVUPSrr: 423 case X86::VMOVUPSYrr: { 424 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 425 X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { 426 unsigned NewOpc; 427 switch (OutMI.getOpcode()) { 428 default: llvm_unreachable("Invalid opcode"); 429 case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; 430 case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; 431 case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; 432 case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; 433 case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; 434 case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; 435 case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; 436 case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; 437 case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; 438 case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; 439 case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; 440 case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; 441 case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; 442 } 443 OutMI.setOpcode(NewOpc); 444 } 445 break; 446 } 447 case X86::VMOVSDrr: 448 case X86::VMOVSSrr: { 449 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 450 X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { 451 unsigned NewOpc; 452 switch (OutMI.getOpcode()) { 453 default: llvm_unreachable("Invalid opcode"); 454 case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; 455 case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; 456 } 457 OutMI.setOpcode(NewOpc); 458 } 459 break; 460 } 461 462 // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register 463 // inputs modeled as normal uses instead of implicit uses. As such, truncate 464 // off all but the first operand (the callee). FIXME: Change isel. 465 case X86::TAILJMPr64: 466 case X86::TAILJMPr64_REX: 467 case X86::CALL64r: 468 case X86::CALL64pcrel32: { 469 unsigned Opcode = OutMI.getOpcode(); 470 MCOperand Saved = OutMI.getOperand(0); 471 OutMI = MCInst(); 472 OutMI.setOpcode(Opcode); 473 OutMI.addOperand(Saved); 474 break; 475 } 476 477 case X86::EH_RETURN: 478 case X86::EH_RETURN64: { 479 OutMI = MCInst(); 480 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 481 break; 482 } 483 484 case X86::CLEANUPRET: { 485 // Replace CATCHRET with the appropriate RET. 486 OutMI = MCInst(); 487 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 488 break; 489 } 490 491 case X86::CATCHRET: { 492 // Replace CATCHRET with the appropriate RET. 493 const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); 494 unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; 495 OutMI = MCInst(); 496 OutMI.setOpcode(getRetOpcode(Subtarget)); 497 OutMI.addOperand(MCOperand::createReg(ReturnReg)); 498 break; 499 } 500 501 // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction. 502 { unsigned Opcode; 503 case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode; 504 case X86::TAILJMPd: 505 case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode; 506 case X86::TAILJMPd_CC: 507 case X86::TAILJMPd64_CC: 508 Opcode = X86::GetCondBranchFromCond( 509 static_cast<X86::CondCode>(MI->getOperand(1).getImm())); 510 goto SetTailJmpOpcode; 511 512 SetTailJmpOpcode: 513 MCOperand Saved = OutMI.getOperand(0); 514 OutMI = MCInst(); 515 OutMI.setOpcode(Opcode); 516 OutMI.addOperand(Saved); 517 break; 518 } 519 520 case X86::DEC16r: 521 case X86::DEC32r: 522 case X86::INC16r: 523 case X86::INC32r: 524 // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. 525 if (!AsmPrinter.getSubtarget().is64Bit()) { 526 unsigned Opcode; 527 switch (OutMI.getOpcode()) { 528 default: llvm_unreachable("Invalid opcode"); 529 case X86::DEC16r: Opcode = X86::DEC16r_alt; break; 530 case X86::DEC32r: Opcode = X86::DEC32r_alt; break; 531 case X86::INC16r: Opcode = X86::INC16r_alt; break; 532 case X86::INC32r: Opcode = X86::INC32r_alt; break; 533 } 534 OutMI.setOpcode(Opcode); 535 } 536 break; 537 538 // These are pseudo-ops for OR to help with the OR->ADD transformation. We do 539 // this with an ugly goto in case the resultant OR uses EAX and needs the 540 // short form. 541 case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify; 542 case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify; 543 case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify; 544 case X86::ADD16ri_DB: OutMI.setOpcode(X86::OR16ri); goto ReSimplify; 545 case X86::ADD32ri_DB: OutMI.setOpcode(X86::OR32ri); goto ReSimplify; 546 case X86::ADD64ri32_DB: OutMI.setOpcode(X86::OR64ri32); goto ReSimplify; 547 case X86::ADD16ri8_DB: OutMI.setOpcode(X86::OR16ri8); goto ReSimplify; 548 case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; 549 case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; 550 551 // Atomic load and store require a separate pseudo-inst because Acquire 552 // implies mayStore and Release implies mayLoad; fix these to regular MOV 553 // instructions here 554 case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify; 555 case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify; 556 case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify; 557 case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify; 558 case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify; 559 case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify; 560 case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify; 561 case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify; 562 case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify; 563 case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify; 564 case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify; 565 case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify; 566 case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify; 567 case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify; 568 case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify; 569 case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify; 570 case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify; 571 case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify; 572 case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify; 573 case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify; 574 case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify; 575 case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify; 576 case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify; 577 case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify; 578 case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify; 579 case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify; 580 case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify; 581 case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify; 582 case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify; 583 case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify; 584 case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify; 585 case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify; 586 case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify; 587 case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify; 588 case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify; 589 case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify; 590 case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify; 591 case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify; 592 case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify; 593 case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify; 594 case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify; 595 case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify; 596 case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify; 597 case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify; 598 599 // We don't currently select the correct instruction form for instructions 600 // which have a short %eax, etc. form. Handle this by custom lowering, for 601 // now. 602 // 603 // Note, we are currently not handling the following instructions: 604 // MOV64ao8, MOV64o8a 605 // XCHG16ar, XCHG32ar, XCHG64ar 606 case X86::MOV8mr_NOREX: 607 case X86::MOV8mr: 608 case X86::MOV8rm_NOREX: 609 case X86::MOV8rm: 610 case X86::MOV16mr: 611 case X86::MOV16rm: 612 case X86::MOV32mr: 613 case X86::MOV32rm: { 614 unsigned NewOpc; 615 switch (OutMI.getOpcode()) { 616 default: llvm_unreachable("Invalid opcode"); 617 case X86::MOV8mr_NOREX: 618 case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; 619 case X86::MOV8rm_NOREX: 620 case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; 621 case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; 622 case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; 623 case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; 624 case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; 625 } 626 SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); 627 break; 628 } 629 630 case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: 631 case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: 632 case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: 633 case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: 634 case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: 635 case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: 636 case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: 637 case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: 638 case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { 639 unsigned NewOpc; 640 switch (OutMI.getOpcode()) { 641 default: llvm_unreachable("Invalid opcode"); 642 case X86::ADC8ri: NewOpc = X86::ADC8i8; break; 643 case X86::ADC16ri: NewOpc = X86::ADC16i16; break; 644 case X86::ADC32ri: NewOpc = X86::ADC32i32; break; 645 case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; 646 case X86::ADD8ri: NewOpc = X86::ADD8i8; break; 647 case X86::ADD16ri: NewOpc = X86::ADD16i16; break; 648 case X86::ADD32ri: NewOpc = X86::ADD32i32; break; 649 case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; 650 case X86::AND8ri: NewOpc = X86::AND8i8; break; 651 case X86::AND16ri: NewOpc = X86::AND16i16; break; 652 case X86::AND32ri: NewOpc = X86::AND32i32; break; 653 case X86::AND64ri32: NewOpc = X86::AND64i32; break; 654 case X86::CMP8ri: NewOpc = X86::CMP8i8; break; 655 case X86::CMP16ri: NewOpc = X86::CMP16i16; break; 656 case X86::CMP32ri: NewOpc = X86::CMP32i32; break; 657 case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; 658 case X86::OR8ri: NewOpc = X86::OR8i8; break; 659 case X86::OR16ri: NewOpc = X86::OR16i16; break; 660 case X86::OR32ri: NewOpc = X86::OR32i32; break; 661 case X86::OR64ri32: NewOpc = X86::OR64i32; break; 662 case X86::SBB8ri: NewOpc = X86::SBB8i8; break; 663 case X86::SBB16ri: NewOpc = X86::SBB16i16; break; 664 case X86::SBB32ri: NewOpc = X86::SBB32i32; break; 665 case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; 666 case X86::SUB8ri: NewOpc = X86::SUB8i8; break; 667 case X86::SUB16ri: NewOpc = X86::SUB16i16; break; 668 case X86::SUB32ri: NewOpc = X86::SUB32i32; break; 669 case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; 670 case X86::TEST8ri: NewOpc = X86::TEST8i8; break; 671 case X86::TEST16ri: NewOpc = X86::TEST16i16; break; 672 case X86::TEST32ri: NewOpc = X86::TEST32i32; break; 673 case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; 674 case X86::XOR8ri: NewOpc = X86::XOR8i8; break; 675 case X86::XOR16ri: NewOpc = X86::XOR16i16; break; 676 case X86::XOR32ri: NewOpc = X86::XOR32i32; break; 677 case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; 678 } 679 SimplifyShortImmForm(OutMI, NewOpc); 680 break; 681 } 682 683 // Try to shrink some forms of movsx. 684 case X86::MOVSX16rr8: 685 case X86::MOVSX32rr16: 686 case X86::MOVSX64rr32: 687 SimplifyMOVSX(OutMI); 688 break; 689 } 690 } 691 692 void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, 693 const MachineInstr &MI) { 694 695 bool is64Bits = MI.getOpcode() == X86::TLS_addr64 || 696 MI.getOpcode() == X86::TLS_base_addr64; 697 698 bool needsPadding = MI.getOpcode() == X86::TLS_addr64; 699 700 MCContext &context = OutStreamer->getContext(); 701 702 if (needsPadding) 703 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 704 705 MCSymbolRefExpr::VariantKind SRVK; 706 switch (MI.getOpcode()) { 707 case X86::TLS_addr32: 708 case X86::TLS_addr64: 709 SRVK = MCSymbolRefExpr::VK_TLSGD; 710 break; 711 case X86::TLS_base_addr32: 712 SRVK = MCSymbolRefExpr::VK_TLSLDM; 713 break; 714 case X86::TLS_base_addr64: 715 SRVK = MCSymbolRefExpr::VK_TLSLD; 716 break; 717 default: 718 llvm_unreachable("unexpected opcode"); 719 } 720 721 MCSymbol *sym = MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)); 722 const MCSymbolRefExpr *symRef = MCSymbolRefExpr::create(sym, SRVK, context); 723 724 MCInst LEA; 725 if (is64Bits) { 726 LEA.setOpcode(X86::LEA64r); 727 LEA.addOperand(MCOperand::createReg(X86::RDI)); // dest 728 LEA.addOperand(MCOperand::createReg(X86::RIP)); // base 729 LEA.addOperand(MCOperand::createImm(1)); // scale 730 LEA.addOperand(MCOperand::createReg(0)); // index 731 LEA.addOperand(MCOperand::createExpr(symRef)); // disp 732 LEA.addOperand(MCOperand::createReg(0)); // seg 733 } else if (SRVK == MCSymbolRefExpr::VK_TLSLDM) { 734 LEA.setOpcode(X86::LEA32r); 735 LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest 736 LEA.addOperand(MCOperand::createReg(X86::EBX)); // base 737 LEA.addOperand(MCOperand::createImm(1)); // scale 738 LEA.addOperand(MCOperand::createReg(0)); // index 739 LEA.addOperand(MCOperand::createExpr(symRef)); // disp 740 LEA.addOperand(MCOperand::createReg(0)); // seg 741 } else { 742 LEA.setOpcode(X86::LEA32r); 743 LEA.addOperand(MCOperand::createReg(X86::EAX)); // dest 744 LEA.addOperand(MCOperand::createReg(0)); // base 745 LEA.addOperand(MCOperand::createImm(1)); // scale 746 LEA.addOperand(MCOperand::createReg(X86::EBX)); // index 747 LEA.addOperand(MCOperand::createExpr(symRef)); // disp 748 LEA.addOperand(MCOperand::createReg(0)); // seg 749 } 750 EmitAndCountInstruction(LEA); 751 752 if (needsPadding) { 753 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 754 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 755 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); 756 } 757 758 StringRef name = is64Bits ? "__tls_get_addr" : "___tls_get_addr"; 759 MCSymbol *tlsGetAddr = context.getOrCreateSymbol(name); 760 const MCSymbolRefExpr *tlsRef = 761 MCSymbolRefExpr::create(tlsGetAddr, 762 MCSymbolRefExpr::VK_PLT, 763 context); 764 765 EmitAndCountInstruction(MCInstBuilder(is64Bits ? X86::CALL64pcrel32 766 : X86::CALLpcrel32) 767 .addExpr(tlsRef)); 768 } 769 770 /// \brief Emit the largest nop instruction smaller than or equal to \p NumBytes 771 /// bytes. Return the size of nop emitted. 772 static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 773 const MCSubtargetInfo &STI) { 774 // This works only for 64bit. For 32bit we have to do additional checking if 775 // the CPU supports multi-byte nops. 776 assert(Is64Bit && "EmitNops only supports X86-64"); 777 778 unsigned NopSize; 779 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; 780 Opc = IndexReg = Displacement = SegmentReg = 0; 781 BaseReg = X86::RAX; 782 ScaleVal = 1; 783 switch (NumBytes) { 784 case 0: llvm_unreachable("Zero nops?"); break; 785 case 1: NopSize = 1; Opc = X86::NOOP; break; 786 case 2: NopSize = 2; Opc = X86::XCHG16ar; break; 787 case 3: NopSize = 3; Opc = X86::NOOPL; break; 788 case 4: NopSize = 4; Opc = X86::NOOPL; Displacement = 8; break; 789 case 5: NopSize = 5; Opc = X86::NOOPL; Displacement = 8; 790 IndexReg = X86::RAX; break; 791 case 6: NopSize = 6; Opc = X86::NOOPW; Displacement = 8; 792 IndexReg = X86::RAX; break; 793 case 7: NopSize = 7; Opc = X86::NOOPL; Displacement = 512; break; 794 case 8: NopSize = 8; Opc = X86::NOOPL; Displacement = 512; 795 IndexReg = X86::RAX; break; 796 case 9: NopSize = 9; Opc = X86::NOOPW; Displacement = 512; 797 IndexReg = X86::RAX; break; 798 default: NopSize = 10; Opc = X86::NOOPW; Displacement = 512; 799 IndexReg = X86::RAX; SegmentReg = X86::CS; break; 800 } 801 802 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); 803 NopSize += NumPrefixes; 804 for (unsigned i = 0; i != NumPrefixes; ++i) 805 OS.EmitBytes("\x66"); 806 807 switch (Opc) { 808 default: 809 llvm_unreachable("Unexpected opcode"); 810 break; 811 case X86::NOOP: 812 OS.EmitInstruction(MCInstBuilder(Opc), STI); 813 break; 814 case X86::XCHG16ar: 815 OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI); 816 break; 817 case X86::NOOPL: 818 case X86::NOOPW: 819 OS.EmitInstruction(MCInstBuilder(Opc) 820 .addReg(BaseReg) 821 .addImm(ScaleVal) 822 .addReg(IndexReg) 823 .addImm(Displacement) 824 .addReg(SegmentReg), 825 STI); 826 break; 827 } 828 assert(NopSize <= NumBytes && "We overemitted?"); 829 return NopSize; 830 } 831 832 /// \brief Emit the optimal amount of multi-byte nops on X86. 833 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 834 const MCSubtargetInfo &STI) { 835 unsigned NopsToEmit = NumBytes; 836 (void)NopsToEmit; 837 while (NumBytes) { 838 NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); 839 assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!"); 840 } 841 } 842 843 void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, 844 X86MCInstLower &MCIL) { 845 assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64"); 846 847 StatepointOpers SOpers(&MI); 848 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { 849 EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), 850 getSubtargetInfo()); 851 } else { 852 // Lower call target and choose correct opcode 853 const MachineOperand &CallTarget = SOpers.getCallTarget(); 854 MCOperand CallTargetMCOp; 855 unsigned CallOpcode; 856 switch (CallTarget.getType()) { 857 case MachineOperand::MO_GlobalAddress: 858 case MachineOperand::MO_ExternalSymbol: 859 CallTargetMCOp = MCIL.LowerSymbolOperand( 860 CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); 861 CallOpcode = X86::CALL64pcrel32; 862 // Currently, we only support relative addressing with statepoints. 863 // Otherwise, we'll need a scratch register to hold the target 864 // address. You'll fail asserts during load & relocation if this 865 // symbol is to far away. (TODO: support non-relative addressing) 866 break; 867 case MachineOperand::MO_Immediate: 868 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); 869 CallOpcode = X86::CALL64pcrel32; 870 // Currently, we only support relative addressing with statepoints. 871 // Otherwise, we'll need a scratch register to hold the target 872 // immediate. You'll fail asserts during load & relocation if this 873 // address is to far away. (TODO: support non-relative addressing) 874 break; 875 case MachineOperand::MO_Register: 876 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); 877 CallOpcode = X86::CALL64r; 878 break; 879 default: 880 llvm_unreachable("Unsupported operand type in statepoint call target"); 881 break; 882 } 883 884 // Emit call 885 MCInst CallInst; 886 CallInst.setOpcode(CallOpcode); 887 CallInst.addOperand(CallTargetMCOp); 888 OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); 889 } 890 891 // Record our statepoint node in the same section used by STACKMAP 892 // and PATCHPOINT 893 SM.recordStatepoint(MI); 894 } 895 896 void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI, 897 X86MCInstLower &MCIL) { 898 // FAULTING_LOAD_OP <def>, <MBB handler>, <load opcode>, <load operands> 899 900 unsigned LoadDefRegister = MI.getOperand(0).getReg(); 901 MCSymbol *HandlerLabel = MI.getOperand(1).getMBB()->getSymbol(); 902 unsigned LoadOpcode = MI.getOperand(2).getImm(); 903 unsigned LoadOperandsBeginIdx = 3; 904 905 FM.recordFaultingOp(FaultMaps::FaultingLoad, HandlerLabel); 906 907 MCInst LoadMI; 908 LoadMI.setOpcode(LoadOpcode); 909 910 if (LoadDefRegister != X86::NoRegister) 911 LoadMI.addOperand(MCOperand::createReg(LoadDefRegister)); 912 913 for (auto I = MI.operands_begin() + LoadOperandsBeginIdx, 914 E = MI.operands_end(); 915 I != E; ++I) 916 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, *I)) 917 LoadMI.addOperand(MaybeOperand.getValue()); 918 919 OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo()); 920 } 921 922 void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, 923 X86MCInstLower &MCIL) { 924 // PATCHABLE_OP minsize, opcode, operands 925 926 unsigned MinSize = MI.getOperand(0).getImm(); 927 unsigned Opcode = MI.getOperand(1).getImm(); 928 929 MCInst MCI; 930 MCI.setOpcode(Opcode); 931 for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) 932 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 933 MCI.addOperand(MaybeOperand.getValue()); 934 935 SmallString<256> Code; 936 SmallVector<MCFixup, 4> Fixups; 937 raw_svector_ostream VecOS(Code); 938 CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); 939 940 if (Code.size() < MinSize) { 941 if (MinSize == 2 && Opcode == X86::PUSH64r) { 942 // This is an optimization that lets us get away without emitting a nop in 943 // many cases. 944 // 945 // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two 946 // bytes too, so the check on MinSize is important. 947 MCI.setOpcode(X86::PUSH64rmr); 948 } else { 949 unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), 950 getSubtargetInfo()); 951 assert(NopSize == MinSize && "Could not implement MinSize!"); 952 (void) NopSize; 953 } 954 } 955 956 OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); 957 } 958 959 // Lower a stackmap of the form: 960 // <id>, <shadowBytes>, ... 961 void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { 962 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 963 SM.recordStackMap(MI); 964 unsigned NumShadowBytes = MI.getOperand(1).getImm(); 965 SMShadowTracker.reset(NumShadowBytes); 966 } 967 968 // Lower a patchpoint of the form: 969 // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... 970 void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, 971 X86MCInstLower &MCIL) { 972 assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64"); 973 974 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 975 976 SM.recordPatchPoint(MI); 977 978 PatchPointOpers opers(&MI); 979 unsigned ScratchIdx = opers.getNextScratchIdx(); 980 unsigned EncodedBytes = 0; 981 const MachineOperand &CalleeMO = opers.getCallTarget(); 982 983 // Check for null target. If target is non-null (i.e. is non-zero or is 984 // symbolic) then emit a call. 985 if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { 986 MCOperand CalleeMCOp; 987 switch (CalleeMO.getType()) { 988 default: 989 /// FIXME: Add a verifier check for bad callee types. 990 llvm_unreachable("Unrecognized callee operand type."); 991 case MachineOperand::MO_Immediate: 992 if (CalleeMO.getImm()) 993 CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); 994 break; 995 case MachineOperand::MO_ExternalSymbol: 996 case MachineOperand::MO_GlobalAddress: 997 CalleeMCOp = 998 MCIL.LowerSymbolOperand(CalleeMO, 999 MCIL.GetSymbolFromOperand(CalleeMO)); 1000 break; 1001 } 1002 1003 // Emit MOV to materialize the target address and the CALL to target. 1004 // This is encoded with 12-13 bytes, depending on which register is used. 1005 unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); 1006 if (X86II::isX86_64ExtendedReg(ScratchReg)) 1007 EncodedBytes = 13; 1008 else 1009 EncodedBytes = 12; 1010 1011 EmitAndCountInstruction( 1012 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); 1013 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); 1014 } 1015 1016 // Emit padding. 1017 unsigned NumBytes = opers.getNumPatchBytes(); 1018 assert(NumBytes >= EncodedBytes && 1019 "Patchpoint can't request size less than the length of a call."); 1020 1021 EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), 1022 getSubtargetInfo()); 1023 } 1024 1025 void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, 1026 X86MCInstLower &MCIL) { 1027 // We want to emit the following pattern: 1028 // 1029 // .p2align 1, ... 1030 // .Lxray_sled_N: 1031 // jmp .tmpN 1032 // # 9 bytes worth of noops 1033 // .tmpN 1034 // 1035 // We need the 9 bytes because at runtime, we'd be patching over the full 11 1036 // bytes with the following pattern: 1037 // 1038 // mov %r10, <function id, 32-bit> // 6 bytes 1039 // call <relative offset, 32-bits> // 5 bytes 1040 // 1041 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1042 OutStreamer->EmitCodeAlignment(2); 1043 OutStreamer->EmitLabel(CurSled); 1044 auto Target = OutContext.createTempSymbol(); 1045 1046 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1047 // an operand (computed as an offset from the jmp instruction). 1048 // FIXME: Find another less hacky way do force the relative jump. 1049 OutStreamer->EmitBytes("\xeb\x09"); 1050 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1051 OutStreamer->EmitLabel(Target); 1052 recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); 1053 } 1054 1055 void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, 1056 X86MCInstLower &MCIL) { 1057 // Since PATCHABLE_RET takes the opcode of the return statement as an 1058 // argument, we use that to emit the correct form of the RET that we want. 1059 // i.e. when we see this: 1060 // 1061 // PATCHABLE_RET X86::RET ... 1062 // 1063 // We should emit the RET followed by sleds. 1064 // 1065 // .p2align 1, ... 1066 // .Lxray_sled_N: 1067 // ret # or equivalent instruction 1068 // # 10 bytes worth of noops 1069 // 1070 // This just makes sure that the alignment for the next instruction is 2. 1071 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1072 OutStreamer->EmitCodeAlignment(2); 1073 OutStreamer->EmitLabel(CurSled); 1074 unsigned OpCode = MI.getOperand(0).getImm(); 1075 MCInst Ret; 1076 Ret.setOpcode(OpCode); 1077 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1078 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1079 Ret.addOperand(MaybeOperand.getValue()); 1080 OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); 1081 EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); 1082 recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); 1083 } 1084 1085 void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL) { 1086 // Like PATCHABLE_RET, we have the actual instruction in the operands to this 1087 // instruction so we lower that particular instruction and its operands. 1088 // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how 1089 // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to 1090 // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual 1091 // tail call much like how we have it in PATCHABLE_RET. 1092 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1093 OutStreamer->EmitCodeAlignment(2); 1094 OutStreamer->EmitLabel(CurSled); 1095 auto Target = OutContext.createTempSymbol(); 1096 1097 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1098 // an operand (computed as an offset from the jmp instruction). 1099 // FIXME: Find another less hacky way do force the relative jump. 1100 OutStreamer->EmitBytes("\xeb\x09"); 1101 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1102 OutStreamer->EmitLabel(Target); 1103 recordSled(CurSled, MI, SledKind::TAIL_CALL); 1104 1105 unsigned OpCode = MI.getOperand(0).getImm(); 1106 MCInst TC; 1107 TC.setOpcode(OpCode); 1108 1109 // Before emitting the instruction, add a comment to indicate that this is 1110 // indeed a tail call. 1111 OutStreamer->AddComment("TAILCALL"); 1112 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1113 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1114 TC.addOperand(MaybeOperand.getValue()); 1115 OutStreamer->EmitInstruction(TC, getSubtargetInfo()); 1116 } 1117 1118 // Returns instruction preceding MBBI in MachineFunction. 1119 // If MBBI is the first instruction of the first basic block, returns null. 1120 static MachineBasicBlock::const_iterator 1121 PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { 1122 const MachineBasicBlock *MBB = MBBI->getParent(); 1123 while (MBBI == MBB->begin()) { 1124 if (MBB == &MBB->getParent()->front()) 1125 return MachineBasicBlock::const_iterator(); 1126 MBB = MBB->getPrevNode(); 1127 MBBI = MBB->end(); 1128 } 1129 return --MBBI; 1130 } 1131 1132 static const Constant *getConstantFromPool(const MachineInstr &MI, 1133 const MachineOperand &Op) { 1134 if (!Op.isCPI()) 1135 return nullptr; 1136 1137 ArrayRef<MachineConstantPoolEntry> Constants = 1138 MI.getParent()->getParent()->getConstantPool()->getConstants(); 1139 const MachineConstantPoolEntry &ConstantEntry = 1140 Constants[Op.getIndex()]; 1141 1142 // Bail if this is a machine constant pool entry, we won't be able to dig out 1143 // anything useful. 1144 if (ConstantEntry.isMachineConstantPoolEntry()) 1145 return nullptr; 1146 1147 auto *C = dyn_cast<Constant>(ConstantEntry.Val.ConstVal); 1148 assert((!C || ConstantEntry.getType() == C->getType()) && 1149 "Expected a constant of the same type!"); 1150 return C; 1151 } 1152 1153 static std::string getShuffleComment(const MachineInstr *MI, 1154 unsigned SrcOp1Idx, 1155 unsigned SrcOp2Idx, 1156 ArrayRef<int> Mask) { 1157 std::string Comment; 1158 1159 // Compute the name for a register. This is really goofy because we have 1160 // multiple instruction printers that could (in theory) use different 1161 // names. Fortunately most people use the ATT style (outside of Windows) 1162 // and they actually agree on register naming here. Ultimately, this is 1163 // a comment, and so its OK if it isn't perfect. 1164 auto GetRegisterName = [](unsigned RegNum) -> StringRef { 1165 return X86ATTInstPrinter::getRegisterName(RegNum); 1166 }; 1167 1168 const MachineOperand &DstOp = MI->getOperand(0); 1169 const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); 1170 const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); 1171 1172 StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; 1173 StringRef Src1Name = 1174 SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; 1175 StringRef Src2Name = 1176 SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; 1177 1178 // One source operand, fix the mask to print all elements in one span. 1179 SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); 1180 if (Src1Name == Src2Name) 1181 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) 1182 if (ShuffleMask[i] >= e) 1183 ShuffleMask[i] -= e; 1184 1185 raw_string_ostream CS(Comment); 1186 CS << DstName; 1187 1188 // Handle AVX512 MASK/MASXZ write mask comments. 1189 // MASK: zmmX {%kY} 1190 // MASKZ: zmmX {%kY} {z} 1191 if (SrcOp1Idx > 1) { 1192 assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask"); 1193 1194 const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); 1195 if (WriteMaskOp.isReg()) { 1196 CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; 1197 1198 if (SrcOp1Idx == 2) { 1199 CS << " {z}"; 1200 } 1201 } 1202 } 1203 1204 CS << " = "; 1205 1206 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { 1207 if (i != 0) 1208 CS << ","; 1209 if (ShuffleMask[i] == SM_SentinelZero) { 1210 CS << "zero"; 1211 continue; 1212 } 1213 1214 // Otherwise, it must come from src1 or src2. Print the span of elements 1215 // that comes from this src. 1216 bool isSrc1 = ShuffleMask[i] < (int)e; 1217 CS << (isSrc1 ? Src1Name : Src2Name) << '['; 1218 1219 bool IsFirst = true; 1220 while (i != e && ShuffleMask[i] != SM_SentinelZero && 1221 (ShuffleMask[i] < (int)e) == isSrc1) { 1222 if (!IsFirst) 1223 CS << ','; 1224 else 1225 IsFirst = false; 1226 if (ShuffleMask[i] == SM_SentinelUndef) 1227 CS << "u"; 1228 else 1229 CS << ShuffleMask[i] % (int)e; 1230 ++i; 1231 } 1232 CS << ']'; 1233 --i; // For loop increments element #. 1234 } 1235 CS.flush(); 1236 1237 return Comment; 1238 } 1239 1240 void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { 1241 X86MCInstLower MCInstLowering(*MF, *this); 1242 const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1243 1244 // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that 1245 // are compressed from EVEX encoding to VEX encoding. 1246 if (TM.Options.MCOptions.ShowMCEncoding) { 1247 if (MI->getAsmPrinterFlags() & AC_EVEX_2_VEX) 1248 OutStreamer->AddComment("EVEX TO VEX Compression ", false); 1249 } 1250 1251 switch (MI->getOpcode()) { 1252 case TargetOpcode::DBG_VALUE: 1253 llvm_unreachable("Should be handled target independently"); 1254 1255 // Emit nothing here but a comment if we can. 1256 case X86::Int_MemBarrier: 1257 OutStreamer->emitRawComment("MEMBARRIER"); 1258 return; 1259 1260 1261 case X86::EH_RETURN: 1262 case X86::EH_RETURN64: { 1263 // Lower these as normal, but add some comments. 1264 unsigned Reg = MI->getOperand(0).getReg(); 1265 OutStreamer->AddComment(StringRef("eh_return, addr: %") + 1266 X86ATTInstPrinter::getRegisterName(Reg)); 1267 break; 1268 } 1269 case X86::CLEANUPRET: { 1270 // Lower these as normal, but add some comments. 1271 OutStreamer->AddComment("CLEANUPRET"); 1272 break; 1273 } 1274 1275 case X86::CATCHRET: { 1276 // Lower these as normal, but add some comments. 1277 OutStreamer->AddComment("CATCHRET"); 1278 break; 1279 } 1280 1281 case X86::TAILJMPr: 1282 case X86::TAILJMPm: 1283 case X86::TAILJMPd: 1284 case X86::TAILJMPd_CC: 1285 case X86::TAILJMPr64: 1286 case X86::TAILJMPm64: 1287 case X86::TAILJMPd64: 1288 case X86::TAILJMPd64_CC: 1289 case X86::TAILJMPr64_REX: 1290 case X86::TAILJMPm64_REX: 1291 // Lower these as normal, but add some comments. 1292 OutStreamer->AddComment("TAILCALL"); 1293 break; 1294 1295 case X86::TLS_addr32: 1296 case X86::TLS_addr64: 1297 case X86::TLS_base_addr32: 1298 case X86::TLS_base_addr64: 1299 return LowerTlsAddr(MCInstLowering, *MI); 1300 1301 case X86::MOVPC32r: { 1302 // This is a pseudo op for a two instruction sequence with a label, which 1303 // looks like: 1304 // call "L1$pb" 1305 // "L1$pb": 1306 // popl %esi 1307 1308 // Emit the call. 1309 MCSymbol *PICBase = MF->getPICBaseSymbol(); 1310 // FIXME: We would like an efficient form for this, so we don't have to do a 1311 // lot of extra uniquing. 1312 EmitAndCountInstruction(MCInstBuilder(X86::CALLpcrel32) 1313 .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); 1314 1315 const X86FrameLowering* FrameLowering = 1316 MF->getSubtarget<X86Subtarget>().getFrameLowering(); 1317 bool hasFP = FrameLowering->hasFP(*MF); 1318 1319 // TODO: This is needed only if we require precise CFA. 1320 bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && 1321 !OutStreamer->getDwarfFrameInfos().back().End; 1322 1323 int stackGrowth = -RI->getSlotSize(); 1324 1325 if (HasActiveDwarfFrame && !hasFP) { 1326 OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); 1327 } 1328 1329 // Emit the label. 1330 OutStreamer->EmitLabel(PICBase); 1331 1332 // popl $reg 1333 EmitAndCountInstruction(MCInstBuilder(X86::POP32r) 1334 .addReg(MI->getOperand(0).getReg())); 1335 1336 if (HasActiveDwarfFrame && !hasFP) { 1337 OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); 1338 } 1339 return; 1340 } 1341 1342 case X86::ADD32ri: { 1343 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. 1344 if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) 1345 break; 1346 1347 // Okay, we have something like: 1348 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) 1349 1350 // For this, we want to print something like: 1351 // MYGLOBAL + (. - PICBASE) 1352 // However, we can't generate a ".", so just emit a new label here and refer 1353 // to it. 1354 MCSymbol *DotSym = OutContext.createTempSymbol(); 1355 OutStreamer->EmitLabel(DotSym); 1356 1357 // Now that we have emitted the label, lower the complex operand expression. 1358 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); 1359 1360 const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); 1361 const MCExpr *PICBase = 1362 MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); 1363 DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); 1364 1365 DotExpr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(OpSym,OutContext), 1366 DotExpr, OutContext); 1367 1368 EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) 1369 .addReg(MI->getOperand(0).getReg()) 1370 .addReg(MI->getOperand(1).getReg()) 1371 .addExpr(DotExpr)); 1372 return; 1373 } 1374 case TargetOpcode::STATEPOINT: 1375 return LowerSTATEPOINT(*MI, MCInstLowering); 1376 1377 case TargetOpcode::FAULTING_LOAD_OP: 1378 return LowerFAULTING_LOAD_OP(*MI, MCInstLowering); 1379 1380 case TargetOpcode::PATCHABLE_OP: 1381 return LowerPATCHABLE_OP(*MI, MCInstLowering); 1382 1383 case TargetOpcode::STACKMAP: 1384 return LowerSTACKMAP(*MI); 1385 1386 case TargetOpcode::PATCHPOINT: 1387 return LowerPATCHPOINT(*MI, MCInstLowering); 1388 1389 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: 1390 return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); 1391 1392 case TargetOpcode::PATCHABLE_RET: 1393 return LowerPATCHABLE_RET(*MI, MCInstLowering); 1394 1395 case TargetOpcode::PATCHABLE_TAIL_CALL: 1396 return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); 1397 1398 case X86::MORESTACK_RET: 1399 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 1400 return; 1401 1402 case X86::MORESTACK_RET_RESTORE_R10: 1403 // Return, then restore R10. 1404 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 1405 EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr) 1406 .addReg(X86::R10) 1407 .addReg(X86::RAX)); 1408 return; 1409 1410 case X86::SEH_PushReg: 1411 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1412 OutStreamer->EmitWinCFIPushReg(RI->getSEHRegNum(MI->getOperand(0).getImm())); 1413 return; 1414 1415 case X86::SEH_SaveReg: 1416 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1417 OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), 1418 MI->getOperand(1).getImm()); 1419 return; 1420 1421 case X86::SEH_SaveXMM: 1422 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1423 OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), 1424 MI->getOperand(1).getImm()); 1425 return; 1426 1427 case X86::SEH_StackAlloc: 1428 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1429 OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); 1430 return; 1431 1432 case X86::SEH_SetFrame: 1433 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1434 OutStreamer->EmitWinCFISetFrame(RI->getSEHRegNum(MI->getOperand(0).getImm()), 1435 MI->getOperand(1).getImm()); 1436 return; 1437 1438 case X86::SEH_PushFrame: 1439 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1440 OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); 1441 return; 1442 1443 case X86::SEH_EndPrologue: 1444 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1445 OutStreamer->EmitWinCFIEndProlog(); 1446 return; 1447 1448 case X86::SEH_Epilogue: { 1449 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1450 MachineBasicBlock::const_iterator MBBI(MI); 1451 // Check if preceded by a call and emit nop if so. 1452 for (MBBI = PrevCrossBBInst(MBBI); 1453 MBBI != MachineBasicBlock::const_iterator(); 1454 MBBI = PrevCrossBBInst(MBBI)) { 1455 // Conservatively assume that pseudo instructions don't emit code and keep 1456 // looking for a call. We may emit an unnecessary nop in some cases. 1457 if (!MBBI->isPseudo()) { 1458 if (MBBI->isCall()) 1459 EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); 1460 break; 1461 } 1462 } 1463 return; 1464 } 1465 1466 // Lower PSHUFB and VPERMILP normally but add a comment if we can find 1467 // a constant shuffle mask. We won't be able to do this at the MC layer 1468 // because the mask isn't an immediate. 1469 case X86::PSHUFBrm: 1470 case X86::VPSHUFBrm: 1471 case X86::VPSHUFBYrm: 1472 case X86::VPSHUFBZ128rm: 1473 case X86::VPSHUFBZ128rmk: 1474 case X86::VPSHUFBZ128rmkz: 1475 case X86::VPSHUFBZ256rm: 1476 case X86::VPSHUFBZ256rmk: 1477 case X86::VPSHUFBZ256rmkz: 1478 case X86::VPSHUFBZrm: 1479 case X86::VPSHUFBZrmk: 1480 case X86::VPSHUFBZrmkz: { 1481 if (!OutStreamer->isVerboseAsm()) 1482 break; 1483 unsigned SrcIdx, MaskIdx; 1484 switch (MI->getOpcode()) { 1485 default: llvm_unreachable("Invalid opcode"); 1486 case X86::PSHUFBrm: 1487 case X86::VPSHUFBrm: 1488 case X86::VPSHUFBYrm: 1489 case X86::VPSHUFBZ128rm: 1490 case X86::VPSHUFBZ256rm: 1491 case X86::VPSHUFBZrm: 1492 SrcIdx = 1; MaskIdx = 5; break; 1493 case X86::VPSHUFBZ128rmkz: 1494 case X86::VPSHUFBZ256rmkz: 1495 case X86::VPSHUFBZrmkz: 1496 SrcIdx = 2; MaskIdx = 6; break; 1497 case X86::VPSHUFBZ128rmk: 1498 case X86::VPSHUFBZ256rmk: 1499 case X86::VPSHUFBZrmk: 1500 SrcIdx = 3; MaskIdx = 7; break; 1501 } 1502 1503 assert(MI->getNumOperands() >= 6 && 1504 "We should always have at least 6 operands!"); 1505 1506 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 1507 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 1508 SmallVector<int, 64> Mask; 1509 DecodePSHUFBMask(C, Mask); 1510 if (!Mask.empty()) 1511 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 1512 } 1513 break; 1514 } 1515 1516 case X86::VPERMILPSrm: 1517 case X86::VPERMILPSYrm: 1518 case X86::VPERMILPSZ128rm: 1519 case X86::VPERMILPSZ128rmk: 1520 case X86::VPERMILPSZ128rmkz: 1521 case X86::VPERMILPSZ256rm: 1522 case X86::VPERMILPSZ256rmk: 1523 case X86::VPERMILPSZ256rmkz: 1524 case X86::VPERMILPSZrm: 1525 case X86::VPERMILPSZrmk: 1526 case X86::VPERMILPSZrmkz: 1527 case X86::VPERMILPDrm: 1528 case X86::VPERMILPDYrm: 1529 case X86::VPERMILPDZ128rm: 1530 case X86::VPERMILPDZ128rmk: 1531 case X86::VPERMILPDZ128rmkz: 1532 case X86::VPERMILPDZ256rm: 1533 case X86::VPERMILPDZ256rmk: 1534 case X86::VPERMILPDZ256rmkz: 1535 case X86::VPERMILPDZrm: 1536 case X86::VPERMILPDZrmk: 1537 case X86::VPERMILPDZrmkz: { 1538 if (!OutStreamer->isVerboseAsm()) 1539 break; 1540 unsigned SrcIdx, MaskIdx; 1541 unsigned ElSize; 1542 switch (MI->getOpcode()) { 1543 default: llvm_unreachable("Invalid opcode"); 1544 case X86::VPERMILPSrm: 1545 case X86::VPERMILPSYrm: 1546 case X86::VPERMILPSZ128rm: 1547 case X86::VPERMILPSZ256rm: 1548 case X86::VPERMILPSZrm: 1549 SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; 1550 case X86::VPERMILPSZ128rmkz: 1551 case X86::VPERMILPSZ256rmkz: 1552 case X86::VPERMILPSZrmkz: 1553 SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; 1554 case X86::VPERMILPSZ128rmk: 1555 case X86::VPERMILPSZ256rmk: 1556 case X86::VPERMILPSZrmk: 1557 SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; 1558 case X86::VPERMILPDrm: 1559 case X86::VPERMILPDYrm: 1560 case X86::VPERMILPDZ128rm: 1561 case X86::VPERMILPDZ256rm: 1562 case X86::VPERMILPDZrm: 1563 SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; 1564 case X86::VPERMILPDZ128rmkz: 1565 case X86::VPERMILPDZ256rmkz: 1566 case X86::VPERMILPDZrmkz: 1567 SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; 1568 case X86::VPERMILPDZ128rmk: 1569 case X86::VPERMILPDZ256rmk: 1570 case X86::VPERMILPDZrmk: 1571 SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; 1572 } 1573 1574 assert(MI->getNumOperands() >= 6 && 1575 "We should always have at least 6 operands!"); 1576 1577 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 1578 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 1579 SmallVector<int, 16> Mask; 1580 DecodeVPERMILPMask(C, ElSize, Mask); 1581 if (!Mask.empty()) 1582 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 1583 } 1584 break; 1585 } 1586 1587 case X86::VPERMIL2PDrm: 1588 case X86::VPERMIL2PSrm: 1589 case X86::VPERMIL2PDrmY: 1590 case X86::VPERMIL2PSrmY: { 1591 if (!OutStreamer->isVerboseAsm()) 1592 break; 1593 assert(MI->getNumOperands() >= 8 && 1594 "We should always have at least 8 operands!"); 1595 1596 const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); 1597 if (!CtrlOp.isImm()) 1598 break; 1599 1600 unsigned ElSize; 1601 switch (MI->getOpcode()) { 1602 default: llvm_unreachable("Invalid opcode"); 1603 case X86::VPERMIL2PSrm: case X86::VPERMIL2PSrmY: ElSize = 32; break; 1604 case X86::VPERMIL2PDrm: case X86::VPERMIL2PDrmY: ElSize = 64; break; 1605 } 1606 1607 const MachineOperand &MaskOp = MI->getOperand(6); 1608 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 1609 SmallVector<int, 16> Mask; 1610 DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask); 1611 if (!Mask.empty()) 1612 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 1613 } 1614 break; 1615 } 1616 1617 case X86::VPPERMrrm: { 1618 if (!OutStreamer->isVerboseAsm()) 1619 break; 1620 assert(MI->getNumOperands() >= 7 && 1621 "We should always have at least 7 operands!"); 1622 1623 const MachineOperand &MaskOp = MI->getOperand(6); 1624 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 1625 SmallVector<int, 16> Mask; 1626 DecodeVPPERMMask(C, Mask); 1627 if (!Mask.empty()) 1628 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 1629 } 1630 break; 1631 } 1632 1633 #define MOV_CASE(Prefix, Suffix) \ 1634 case X86::Prefix##MOVAPD##Suffix##rm: \ 1635 case X86::Prefix##MOVAPS##Suffix##rm: \ 1636 case X86::Prefix##MOVUPD##Suffix##rm: \ 1637 case X86::Prefix##MOVUPS##Suffix##rm: \ 1638 case X86::Prefix##MOVDQA##Suffix##rm: \ 1639 case X86::Prefix##MOVDQU##Suffix##rm: 1640 1641 #define MOV_AVX512_CASE(Suffix) \ 1642 case X86::VMOVDQA64##Suffix##rm: \ 1643 case X86::VMOVDQA32##Suffix##rm: \ 1644 case X86::VMOVDQU64##Suffix##rm: \ 1645 case X86::VMOVDQU32##Suffix##rm: \ 1646 case X86::VMOVDQU16##Suffix##rm: \ 1647 case X86::VMOVDQU8##Suffix##rm: \ 1648 case X86::VMOVAPS##Suffix##rm: \ 1649 case X86::VMOVAPD##Suffix##rm: \ 1650 case X86::VMOVUPS##Suffix##rm: \ 1651 case X86::VMOVUPD##Suffix##rm: 1652 1653 #define CASE_ALL_MOV_RM() \ 1654 MOV_CASE(, ) /* SSE */ \ 1655 MOV_CASE(V, ) /* AVX-128 */ \ 1656 MOV_CASE(V, Y) /* AVX-256 */ \ 1657 MOV_AVX512_CASE(Z) \ 1658 MOV_AVX512_CASE(Z256) \ 1659 MOV_AVX512_CASE(Z128) 1660 1661 // For loads from a constant pool to a vector register, print the constant 1662 // loaded. 1663 CASE_ALL_MOV_RM() 1664 if (!OutStreamer->isVerboseAsm()) 1665 break; 1666 if (MI->getNumOperands() <= 4) 1667 break; 1668 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 1669 std::string Comment; 1670 raw_string_ostream CS(Comment); 1671 const MachineOperand &DstOp = MI->getOperand(0); 1672 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 1673 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { 1674 CS << "["; 1675 for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) { 1676 if (i != 0) 1677 CS << ","; 1678 if (CDS->getElementType()->isIntegerTy()) 1679 CS << CDS->getElementAsInteger(i); 1680 else if (CDS->getElementType()->isFloatTy()) 1681 CS << CDS->getElementAsFloat(i); 1682 else if (CDS->getElementType()->isDoubleTy()) 1683 CS << CDS->getElementAsDouble(i); 1684 else 1685 CS << "?"; 1686 } 1687 CS << "]"; 1688 OutStreamer->AddComment(CS.str()); 1689 } else if (auto *CV = dyn_cast<ConstantVector>(C)) { 1690 CS << "<"; 1691 for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) { 1692 if (i != 0) 1693 CS << ","; 1694 Constant *COp = CV->getOperand(i); 1695 if (isa<UndefValue>(COp)) { 1696 CS << "u"; 1697 } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { 1698 if (CI->getBitWidth() <= 64) { 1699 CS << CI->getZExtValue(); 1700 } else { 1701 // print multi-word constant as (w0,w1) 1702 const auto &Val = CI->getValue(); 1703 CS << "("; 1704 for (int i = 0, N = Val.getNumWords(); i < N; ++i) { 1705 if (i > 0) 1706 CS << ","; 1707 CS << Val.getRawData()[i]; 1708 } 1709 CS << ")"; 1710 } 1711 } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { 1712 SmallString<32> Str; 1713 CF->getValueAPF().toString(Str); 1714 CS << Str; 1715 } else { 1716 CS << "?"; 1717 } 1718 } 1719 CS << ">"; 1720 OutStreamer->AddComment(CS.str()); 1721 } 1722 } 1723 break; 1724 } 1725 1726 MCInst TmpInst; 1727 MCInstLowering.Lower(MI, TmpInst); 1728 1729 // Stackmap shadows cannot include branch targets, so we can count the bytes 1730 // in a call towards the shadow, but must ensure that the no thread returns 1731 // in to the stackmap shadow. The only way to achieve this is if the call 1732 // is at the end of the shadow. 1733 if (MI->isCall()) { 1734 // Count then size of the call towards the shadow 1735 SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); 1736 // Then flush the shadow so that we fill with nops before the call, not 1737 // after it. 1738 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 1739 // Then emit the call 1740 OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); 1741 return; 1742 } 1743 1744 EmitAndCountInstruction(TmpInst); 1745 } 1746