1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// \brief This file implements the WebAssemblyTargetLowering class. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "WebAssemblyISelLowering.h" 16 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 17 #include "WebAssemblyMachineFunctionInfo.h" 18 #include "WebAssemblySubtarget.h" 19 #include "WebAssemblyTargetMachine.h" 20 #include "llvm/CodeGen/Analysis.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineJumpTableInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/SelectionDAG.h" 25 #include "llvm/IR/DiagnosticInfo.h" 26 #include "llvm/IR/DiagnosticPrinter.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include "llvm/Target/TargetOptions.h" 33 using namespace llvm; 34 35 #define DEBUG_TYPE "wasm-lower" 36 37 WebAssemblyTargetLowering::WebAssemblyTargetLowering( 38 const TargetMachine &TM, const WebAssemblySubtarget &STI) 39 : TargetLowering(TM), Subtarget(&STI) { 40 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32; 41 42 // Booleans always contain 0 or 1. 43 setBooleanContents(ZeroOrOneBooleanContent); 44 // WebAssembly does not produce floating-point exceptions on normal floating 45 // point operations. 46 setHasFloatingPointExceptions(false); 47 // We don't know the microarchitecture here, so just reduce register pressure. 48 setSchedulingPreference(Sched::RegPressure); 49 // Tell ISel that we have a stack pointer. 50 setStackPointerRegisterToSaveRestore( 51 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32); 52 // Set up the register classes. 53 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass); 54 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass); 55 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass); 56 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass); 57 if (Subtarget->hasSIMD128()) { 58 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass); 59 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); 60 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); 61 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); 62 } 63 // Compute derived properties from the register classes. 64 computeRegisterProperties(Subtarget->getRegisterInfo()); 65 66 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom); 67 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom); 68 setOperationAction(ISD::JumpTable, MVTPtr, Custom); 69 setOperationAction(ISD::BlockAddress, MVTPtr, Custom); 70 setOperationAction(ISD::BRIND, MVT::Other, Custom); 71 72 // Take the default expansion for va_arg, va_copy, and va_end. There is no 73 // default action for va_start, so we do that custom. 74 setOperationAction(ISD::VASTART, MVT::Other, Custom); 75 setOperationAction(ISD::VAARG, MVT::Other, Expand); 76 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 77 setOperationAction(ISD::VAEND, MVT::Other, Expand); 78 79 for (auto T : {MVT::f32, MVT::f64}) { 80 // Don't expand the floating-point types to constant pools. 81 setOperationAction(ISD::ConstantFP, T, Legal); 82 // Expand floating-point comparisons. 83 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE, 84 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE}) 85 setCondCodeAction(CC, T, Expand); 86 // Expand floating-point library function operators. 87 for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOWI, ISD::FPOW, 88 ISD::FREM, ISD::FMA}) 89 setOperationAction(Op, T, Expand); 90 // Note supported floating-point library function operators that otherwise 91 // default to expand. 92 for (auto Op : 93 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT}) 94 setOperationAction(Op, T, Legal); 95 // Support minnan and maxnan, which otherwise default to expand. 96 setOperationAction(ISD::FMINNAN, T, Legal); 97 setOperationAction(ISD::FMAXNAN, T, Legal); 98 } 99 100 for (auto T : {MVT::i32, MVT::i64}) { 101 // Expand unavailable integer operations. 102 for (auto Op : 103 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, 104 ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, 105 ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, 106 ISD::SUBE}) { 107 setOperationAction(Op, T, Expand); 108 } 109 } 110 111 // As a special case, these operators use the type to mean the type to 112 // sign-extend from. 113 for (auto T : {MVT::i1, MVT::i8, MVT::i16, MVT::i32}) 114 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); 115 116 // Dynamic stack allocation: use the default expansion. 117 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 118 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 119 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand); 120 121 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 122 setOperationAction(ISD::CopyToReg, MVT::Other, Custom); 123 124 // Expand these forms; we pattern-match the forms that we can handle in isel. 125 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) 126 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC}) 127 setOperationAction(Op, T, Expand); 128 129 // We have custom switch handling. 130 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 131 132 // WebAssembly doesn't have: 133 // - Floating-point extending loads. 134 // - Floating-point truncating stores. 135 // - i1 extending loads. 136 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 137 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 138 for (auto T : MVT::integer_valuetypes()) 139 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) 140 setLoadExtAction(Ext, T, MVT::i1, Promote); 141 142 // Trap lowers to wasm unreachable 143 setOperationAction(ISD::TRAP, MVT::Other, Legal); 144 } 145 146 FastISel *WebAssemblyTargetLowering::createFastISel( 147 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const { 148 return WebAssembly::createFastISel(FuncInfo, LibInfo); 149 } 150 151 bool WebAssemblyTargetLowering::isOffsetFoldingLegal( 152 const GlobalAddressSDNode * /*GA*/) const { 153 // All offsets can be folded. 154 return true; 155 } 156 157 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/, 158 EVT VT) const { 159 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1); 160 if (BitWidth > 1 && BitWidth < 8) BitWidth = 8; 161 162 if (BitWidth > 64) { 163 // The shift will be lowered to a libcall, and compiler-rt libcalls expect 164 // the count to be an i32. 165 BitWidth = 32; 166 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) && 167 "32-bit shift counts ought to be enough for anyone"); 168 } 169 170 MVT Result = MVT::getIntegerVT(BitWidth); 171 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE && 172 "Unable to represent scalar shift amount type"); 173 return Result; 174 } 175 176 const char *WebAssemblyTargetLowering::getTargetNodeName( 177 unsigned Opcode) const { 178 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) { 179 case WebAssemblyISD::FIRST_NUMBER: 180 break; 181 #define HANDLE_NODETYPE(NODE) \ 182 case WebAssemblyISD::NODE: \ 183 return "WebAssemblyISD::" #NODE; 184 #include "WebAssemblyISD.def" 185 #undef HANDLE_NODETYPE 186 } 187 return nullptr; 188 } 189 190 std::pair<unsigned, const TargetRegisterClass *> 191 WebAssemblyTargetLowering::getRegForInlineAsmConstraint( 192 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 193 // First, see if this is a constraint that directly corresponds to a 194 // WebAssembly register class. 195 if (Constraint.size() == 1) { 196 switch (Constraint[0]) { 197 case 'r': 198 assert(VT != MVT::iPTR && "Pointer MVT not expected here"); 199 if (Subtarget->hasSIMD128() && VT.isVector()) { 200 if (VT.getSizeInBits() == 128) 201 return std::make_pair(0U, &WebAssembly::V128RegClass); 202 } 203 if (VT.isInteger() && !VT.isVector()) { 204 if (VT.getSizeInBits() <= 32) 205 return std::make_pair(0U, &WebAssembly::I32RegClass); 206 if (VT.getSizeInBits() <= 64) 207 return std::make_pair(0U, &WebAssembly::I64RegClass); 208 } 209 break; 210 default: 211 break; 212 } 213 } 214 215 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 216 } 217 218 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const { 219 // Assume ctz is a relatively cheap operation. 220 return true; 221 } 222 223 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const { 224 // Assume clz is a relatively cheap operation. 225 return true; 226 } 227 228 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL, 229 const AddrMode &AM, 230 Type *Ty, 231 unsigned AS) const { 232 // WebAssembly offsets are added as unsigned without wrapping. The 233 // isLegalAddressingMode gives us no way to determine if wrapping could be 234 // happening, so we approximate this by accepting only non-negative offsets. 235 if (AM.BaseOffs < 0) return false; 236 237 // WebAssembly has no scale register operands. 238 if (AM.Scale != 0) return false; 239 240 // Everything else is legal. 241 return true; 242 } 243 244 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses( 245 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const { 246 // WebAssembly supports unaligned accesses, though it should be declared 247 // with the p2align attribute on loads and stores which do so, and there 248 // may be a performance impact. We tell LLVM they're "fast" because 249 // for the kinds of things that LLVM uses this for (merging adjacent stores 250 // of constants, etc.), WebAssembly implementations will either want the 251 // unaligned access or they'll split anyway. 252 if (Fast) *Fast = true; 253 return true; 254 } 255 256 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const { 257 // The current thinking is that wasm engines will perform this optimization, 258 // so we can save on code size. 259 return true; 260 } 261 262 //===----------------------------------------------------------------------===// 263 // WebAssembly Lowering private implementation. 264 //===----------------------------------------------------------------------===// 265 266 //===----------------------------------------------------------------------===// 267 // Lowering Code 268 //===----------------------------------------------------------------------===// 269 270 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) { 271 MachineFunction &MF = DAG.getMachineFunction(); 272 DAG.getContext()->diagnose( 273 DiagnosticInfoUnsupported(*MF.getFunction(), msg, DL.getDebugLoc())); 274 } 275 276 // Test whether the given calling convention is supported. 277 static bool CallingConvSupported(CallingConv::ID CallConv) { 278 // We currently support the language-independent target-independent 279 // conventions. We don't yet have a way to annotate calls with properties like 280 // "cold", and we don't have any call-clobbered registers, so these are mostly 281 // all handled the same. 282 return CallConv == CallingConv::C || CallConv == CallingConv::Fast || 283 CallConv == CallingConv::Cold || 284 CallConv == CallingConv::PreserveMost || 285 CallConv == CallingConv::PreserveAll || 286 CallConv == CallingConv::CXX_FAST_TLS; 287 } 288 289 SDValue WebAssemblyTargetLowering::LowerCall( 290 CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const { 291 SelectionDAG &DAG = CLI.DAG; 292 SDLoc DL = CLI.DL; 293 SDValue Chain = CLI.Chain; 294 SDValue Callee = CLI.Callee; 295 MachineFunction &MF = DAG.getMachineFunction(); 296 auto Layout = MF.getDataLayout(); 297 298 CallingConv::ID CallConv = CLI.CallConv; 299 if (!CallingConvSupported(CallConv)) 300 fail(DL, DAG, 301 "WebAssembly doesn't support language-specific or target-specific " 302 "calling conventions yet"); 303 if (CLI.IsPatchPoint) 304 fail(DL, DAG, "WebAssembly doesn't support patch point yet"); 305 306 // WebAssembly doesn't currently support explicit tail calls. If they are 307 // required, fail. Otherwise, just disable them. 308 if ((CallConv == CallingConv::Fast && CLI.IsTailCall && 309 MF.getTarget().Options.GuaranteedTailCallOpt) || 310 (CLI.CS && CLI.CS->isMustTailCall())) 311 fail(DL, DAG, "WebAssembly doesn't support tail call yet"); 312 CLI.IsTailCall = false; 313 314 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 315 if (Ins.size() > 1) 316 fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet"); 317 318 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 319 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 320 for (unsigned i = 0; i < Outs.size(); ++i) { 321 const ISD::OutputArg &Out = Outs[i]; 322 SDValue &OutVal = OutVals[i]; 323 if (Out.Flags.isNest()) 324 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 325 if (Out.Flags.isInAlloca()) 326 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 327 if (Out.Flags.isInConsecutiveRegs()) 328 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 329 if (Out.Flags.isInConsecutiveRegsLast()) 330 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 331 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) { 332 auto &MFI = MF.getFrameInfo(); 333 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(), 334 Out.Flags.getByValAlign(), 335 /*isSS=*/false); 336 SDValue SizeNode = 337 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32); 338 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 339 Chain = DAG.getMemcpy( 340 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(), 341 /*isVolatile*/ false, /*AlwaysInline=*/false, 342 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); 343 OutVal = FINode; 344 } 345 } 346 347 bool IsVarArg = CLI.IsVarArg; 348 unsigned NumFixedArgs = CLI.NumFixedArgs; 349 350 auto PtrVT = getPointerTy(Layout); 351 352 // Analyze operands of the call, assigning locations to each operand. 353 SmallVector<CCValAssign, 16> ArgLocs; 354 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 355 356 if (IsVarArg) { 357 // Outgoing non-fixed arguments are placed in a buffer. First 358 // compute their offsets and the total amount of buffer space needed. 359 for (SDValue Arg : 360 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { 361 EVT VT = Arg.getValueType(); 362 assert(VT != MVT::iPTR && "Legalized args should be concrete"); 363 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 364 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), 365 Layout.getABITypeAlignment(Ty)); 366 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), 367 Offset, VT.getSimpleVT(), 368 CCValAssign::Full)); 369 } 370 } 371 372 unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); 373 374 SDValue FINode; 375 if (IsVarArg && NumBytes) { 376 // For non-fixed arguments, next emit stores to store the argument values 377 // to the stack buffer at the offsets computed above. 378 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, 379 Layout.getStackAlignment(), 380 /*isSS=*/false); 381 unsigned ValNo = 0; 382 SmallVector<SDValue, 8> Chains; 383 for (SDValue Arg : 384 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) { 385 assert(ArgLocs[ValNo].getValNo() == ValNo && 386 "ArgLocs should remain in order and only hold varargs args"); 387 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset(); 388 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout)); 389 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode, 390 DAG.getConstant(Offset, DL, PtrVT)); 391 Chains.push_back(DAG.getStore( 392 Chain, DL, Arg, Add, 393 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0)); 394 } 395 if (!Chains.empty()) 396 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 397 } else if (IsVarArg) { 398 FINode = DAG.getIntPtrConstant(0, DL); 399 } 400 401 // Compute the operands for the CALLn node. 402 SmallVector<SDValue, 16> Ops; 403 Ops.push_back(Chain); 404 Ops.push_back(Callee); 405 406 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs 407 // isn't reliable. 408 Ops.append(OutVals.begin(), 409 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end()); 410 // Add a pointer to the vararg buffer. 411 if (IsVarArg) Ops.push_back(FINode); 412 413 SmallVector<EVT, 8> InTys; 414 for (const auto &In : Ins) { 415 assert(!In.Flags.isByVal() && "byval is not valid for return values"); 416 assert(!In.Flags.isNest() && "nest is not valid for return values"); 417 if (In.Flags.isInAlloca()) 418 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values"); 419 if (In.Flags.isInConsecutiveRegs()) 420 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values"); 421 if (In.Flags.isInConsecutiveRegsLast()) 422 fail(DL, DAG, 423 "WebAssembly hasn't implemented cons regs last return values"); 424 // Ignore In.getOrigAlign() because all our arguments are passed in 425 // registers. 426 InTys.push_back(In.VT); 427 } 428 InTys.push_back(MVT::Other); 429 SDVTList InTyList = DAG.getVTList(InTys); 430 SDValue Res = 431 DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1, 432 DL, InTyList, Ops); 433 if (Ins.empty()) { 434 Chain = Res; 435 } else { 436 InVals.push_back(Res); 437 Chain = Res.getValue(1); 438 } 439 440 return Chain; 441 } 442 443 bool WebAssemblyTargetLowering::CanLowerReturn( 444 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/, 445 const SmallVectorImpl<ISD::OutputArg> &Outs, 446 LLVMContext & /*Context*/) const { 447 // WebAssembly can't currently handle returning tuples. 448 return Outs.size() <= 1; 449 } 450 451 SDValue WebAssemblyTargetLowering::LowerReturn( 452 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/, 453 const SmallVectorImpl<ISD::OutputArg> &Outs, 454 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 455 SelectionDAG &DAG) const { 456 assert(Outs.size() <= 1 && "WebAssembly can only return up to one value"); 457 if (!CallingConvSupported(CallConv)) 458 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 459 460 SmallVector<SDValue, 4> RetOps(1, Chain); 461 RetOps.append(OutVals.begin(), OutVals.end()); 462 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps); 463 464 // Record the number and types of the return values. 465 for (const ISD::OutputArg &Out : Outs) { 466 assert(!Out.Flags.isByVal() && "byval is not valid for return values"); 467 assert(!Out.Flags.isNest() && "nest is not valid for return values"); 468 assert(Out.IsFixed && "non-fixed return value is not valid"); 469 if (Out.Flags.isInAlloca()) 470 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results"); 471 if (Out.Flags.isInConsecutiveRegs()) 472 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results"); 473 if (Out.Flags.isInConsecutiveRegsLast()) 474 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results"); 475 } 476 477 return Chain; 478 } 479 480 SDValue WebAssemblyTargetLowering::LowerFormalArguments( 481 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 482 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 483 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 484 MachineFunction &MF = DAG.getMachineFunction(); 485 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>(); 486 487 if (!CallingConvSupported(CallConv)) 488 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions"); 489 490 // Set up the incoming ARGUMENTS value, which serves to represent the liveness 491 // of the incoming values before they're represented by virtual registers. 492 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS); 493 494 for (const ISD::InputArg &In : Ins) { 495 if (In.Flags.isInAlloca()) 496 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments"); 497 if (In.Flags.isNest()) 498 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments"); 499 if (In.Flags.isInConsecutiveRegs()) 500 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments"); 501 if (In.Flags.isInConsecutiveRegsLast()) 502 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments"); 503 // Ignore In.getOrigAlign() because all our arguments are passed in 504 // registers. 505 InVals.push_back( 506 In.Used 507 ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT, 508 DAG.getTargetConstant(InVals.size(), DL, MVT::i32)) 509 : DAG.getUNDEF(In.VT)); 510 511 // Record the number and types of arguments. 512 MFI->addParam(In.VT); 513 } 514 515 // Varargs are copied into a buffer allocated by the caller, and a pointer to 516 // the buffer is passed as an argument. 517 if (IsVarArg) { 518 MVT PtrVT = getPointerTy(MF.getDataLayout()); 519 unsigned VarargVreg = 520 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT)); 521 MFI->setVarargBufferVreg(VarargVreg); 522 Chain = DAG.getCopyToReg( 523 Chain, DL, VarargVreg, 524 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT, 525 DAG.getTargetConstant(Ins.size(), DL, MVT::i32))); 526 MFI->addParam(PtrVT); 527 } 528 529 return Chain; 530 } 531 532 //===----------------------------------------------------------------------===// 533 // Custom lowering hooks. 534 //===----------------------------------------------------------------------===// 535 536 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, 537 SelectionDAG &DAG) const { 538 SDLoc DL(Op); 539 switch (Op.getOpcode()) { 540 default: 541 llvm_unreachable("unimplemented operation lowering"); 542 return SDValue(); 543 case ISD::FrameIndex: 544 return LowerFrameIndex(Op, DAG); 545 case ISD::GlobalAddress: 546 return LowerGlobalAddress(Op, DAG); 547 case ISD::ExternalSymbol: 548 return LowerExternalSymbol(Op, DAG); 549 case ISD::JumpTable: 550 return LowerJumpTable(Op, DAG); 551 case ISD::BR_JT: 552 return LowerBR_JT(Op, DAG); 553 case ISD::VASTART: 554 return LowerVASTART(Op, DAG); 555 case ISD::BlockAddress: 556 case ISD::BRIND: 557 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos"); 558 return SDValue(); 559 case ISD::RETURNADDR: // Probably nothing meaningful can be returned here. 560 fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address"); 561 return SDValue(); 562 case ISD::FRAMEADDR: 563 return LowerFRAMEADDR(Op, DAG); 564 case ISD::CopyToReg: 565 return LowerCopyToReg(Op, DAG); 566 } 567 } 568 569 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op, 570 SelectionDAG &DAG) const { 571 SDValue Src = Op.getOperand(2); 572 if (isa<FrameIndexSDNode>(Src.getNode())) { 573 // CopyToReg nodes don't support FrameIndex operands. Other targets select 574 // the FI to some LEA-like instruction, but since we don't have that, we 575 // need to insert some kind of instruction that can take an FI operand and 576 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy 577 // copy_local between Op and its FI operand. 578 SDValue Chain = Op.getOperand(0); 579 SDLoc DL(Op); 580 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); 581 EVT VT = Src.getValueType(); 582 SDValue Copy( 583 DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_LOCAL_I32 584 : WebAssembly::COPY_LOCAL_I64, 585 DL, VT, Src), 586 0); 587 return Op.getNode()->getNumValues() == 1 588 ? DAG.getCopyToReg(Chain, DL, Reg, Copy) 589 : DAG.getCopyToReg(Chain, DL, Reg, Copy, Op.getNumOperands() == 4 590 ? Op.getOperand(3) 591 : SDValue()); 592 } 593 return SDValue(); 594 } 595 596 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op, 597 SelectionDAG &DAG) const { 598 int FI = cast<FrameIndexSDNode>(Op)->getIndex(); 599 return DAG.getTargetFrameIndex(FI, Op.getValueType()); 600 } 601 602 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op, 603 SelectionDAG &DAG) const { 604 // Non-zero depths are not supported by WebAssembly currently. Use the 605 // legalizer's default expansion, which is to return 0 (what this function is 606 // documented to do). 607 if (Op.getConstantOperandVal(0) > 0) 608 return SDValue(); 609 610 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true); 611 EVT VT = Op.getValueType(); 612 unsigned FP = 613 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction()); 614 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT); 615 } 616 617 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op, 618 SelectionDAG &DAG) const { 619 SDLoc DL(Op); 620 const auto *GA = cast<GlobalAddressSDNode>(Op); 621 EVT VT = Op.getValueType(); 622 assert(GA->getTargetFlags() == 0 && 623 "Unexpected target flags on generic GlobalAddressSDNode"); 624 if (GA->getAddressSpace() != 0) 625 fail(DL, DAG, "WebAssembly only expects the 0 address space"); 626 return DAG.getNode( 627 WebAssemblyISD::Wrapper, DL, VT, 628 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset())); 629 } 630 631 SDValue WebAssemblyTargetLowering::LowerExternalSymbol( 632 SDValue Op, SelectionDAG &DAG) const { 633 SDLoc DL(Op); 634 const auto *ES = cast<ExternalSymbolSDNode>(Op); 635 EVT VT = Op.getValueType(); 636 assert(ES->getTargetFlags() == 0 && 637 "Unexpected target flags on generic ExternalSymbolSDNode"); 638 // Set the TargetFlags to 0x1 which indicates that this is a "function" 639 // symbol rather than a data symbol. We do this unconditionally even though 640 // we don't know anything about the symbol other than its name, because all 641 // external symbols used in target-independent SelectionDAG code are for 642 // functions. 643 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, 644 DAG.getTargetExternalSymbol(ES->getSymbol(), VT, 645 /*TargetFlags=*/0x1)); 646 } 647 648 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op, 649 SelectionDAG &DAG) const { 650 // There's no need for a Wrapper node because we always incorporate a jump 651 // table operand into a BR_TABLE instruction, rather than ever 652 // materializing it in a register. 653 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 654 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(), 655 JT->getTargetFlags()); 656 } 657 658 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op, 659 SelectionDAG &DAG) const { 660 SDLoc DL(Op); 661 SDValue Chain = Op.getOperand(0); 662 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1)); 663 SDValue Index = Op.getOperand(2); 664 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags"); 665 666 SmallVector<SDValue, 8> Ops; 667 Ops.push_back(Chain); 668 Ops.push_back(Index); 669 670 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo(); 671 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs; 672 673 // Add an operand for each case. 674 for (auto MBB : MBBs) Ops.push_back(DAG.getBasicBlock(MBB)); 675 676 // TODO: For now, we just pick something arbitrary for a default case for now. 677 // We really want to sniff out the guard and put in the real default case (and 678 // delete the guard). 679 Ops.push_back(DAG.getBasicBlock(MBBs[0])); 680 681 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops); 682 } 683 684 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, 685 SelectionDAG &DAG) const { 686 SDLoc DL(Op); 687 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout()); 688 689 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>(); 690 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 691 692 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL, 693 MFI->getVarargBufferVreg(), PtrVT); 694 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1), 695 MachinePointerInfo(SV), 0); 696 } 697 698 //===----------------------------------------------------------------------===// 699 // WebAssembly Optimization Hooks 700 //===----------------------------------------------------------------------===// 701