1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARMISelLowering.h" 16 #include "ARMCallingConv.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMPerfectShuffle.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "ARMTargetObjectFile.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/CodeGen/CallingConvLower.h" 28 #include "llvm/CodeGen/IntrinsicLowering.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineJumpTableInfo.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/IR/CallingConv.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GlobalValue.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/MC/MCSectionMachO.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/MathExtras.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include "llvm/Target/TargetOptions.h" 54 #include <utility> 55 using namespace llvm; 56 57 #define DEBUG_TYPE "arm-isel" 58 59 STATISTIC(NumTailCalls, "Number of tail calls"); 60 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 61 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 62 63 static cl::opt<bool> 64 ARMInterworking("arm-interworking", cl::Hidden, 65 cl::desc("Enable / disable ARM interworking (for debugging only)"), 66 cl::init(true)); 67 68 namespace { 69 class ARMCCState : public CCState { 70 public: 71 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 72 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, 73 ParmContext PC) 74 : CCState(CC, isVarArg, MF, locs, C) { 75 assert(((PC == Call) || (PC == Prologue)) && 76 "ARMCCState users must specify whether their context is call" 77 "or prologue generation."); 78 CallOrPrologue = PC; 79 } 80 }; 81 } 82 83 // The APCS parameter registers. 84 static const MCPhysReg GPRArgRegs[] = { 85 ARM::R0, ARM::R1, ARM::R2, ARM::R3 86 }; 87 88 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 89 MVT PromotedBitwiseVT) { 90 if (VT != PromotedLdStVT) { 91 setOperationAction(ISD::LOAD, VT, Promote); 92 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 93 94 setOperationAction(ISD::STORE, VT, Promote); 95 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 96 } 97 98 MVT ElemTy = VT.getVectorElementType(); 99 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 100 setOperationAction(ISD::SETCC, VT, Custom); 101 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 102 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 103 if (ElemTy == MVT::i32) { 104 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 105 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 106 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 107 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 108 } else { 109 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 110 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 111 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 112 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 113 } 114 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 115 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 116 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 117 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 118 setOperationAction(ISD::SELECT, VT, Expand); 119 setOperationAction(ISD::SELECT_CC, VT, Expand); 120 setOperationAction(ISD::VSELECT, VT, Expand); 121 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 122 if (VT.isInteger()) { 123 setOperationAction(ISD::SHL, VT, Custom); 124 setOperationAction(ISD::SRA, VT, Custom); 125 setOperationAction(ISD::SRL, VT, Custom); 126 } 127 128 // Promote all bit-wise operations. 129 if (VT.isInteger() && VT != PromotedBitwiseVT) { 130 setOperationAction(ISD::AND, VT, Promote); 131 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 132 setOperationAction(ISD::OR, VT, Promote); 133 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 134 setOperationAction(ISD::XOR, VT, Promote); 135 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 136 } 137 138 // Neon does not support vector divide/remainder operations. 139 setOperationAction(ISD::SDIV, VT, Expand); 140 setOperationAction(ISD::UDIV, VT, Expand); 141 setOperationAction(ISD::FDIV, VT, Expand); 142 setOperationAction(ISD::SREM, VT, Expand); 143 setOperationAction(ISD::UREM, VT, Expand); 144 setOperationAction(ISD::FREM, VT, Expand); 145 146 if (!VT.isFloatingPoint() && 147 VT != MVT::v2i64 && VT != MVT::v1i64) 148 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 149 setOperationAction(Opcode, VT, Legal); 150 } 151 152 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 153 addRegisterClass(VT, &ARM::DPRRegClass); 154 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 155 } 156 157 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 158 addRegisterClass(VT, &ARM::DPairRegClass); 159 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 160 } 161 162 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 163 const ARMSubtarget &STI) 164 : TargetLowering(TM), Subtarget(&STI) { 165 RegInfo = Subtarget->getRegisterInfo(); 166 Itins = Subtarget->getInstrItineraryData(); 167 168 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 169 170 if (Subtarget->isTargetMachO()) { 171 // Uses VFP for Thumb libfuncs if available. 172 if (Subtarget->isThumb() && Subtarget->hasVFP2() && 173 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 174 static const struct { 175 const RTLIB::Libcall Op; 176 const char * const Name; 177 const ISD::CondCode Cond; 178 } LibraryCalls[] = { 179 // Single-precision floating-point arithmetic. 180 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 181 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 182 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 183 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 184 185 // Double-precision floating-point arithmetic. 186 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 187 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 188 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 189 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 190 191 // Single-precision comparisons. 192 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 193 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 194 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 195 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 196 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 197 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 198 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 199 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ }, 200 201 // Double-precision comparisons. 202 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 203 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 204 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 205 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 206 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 207 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 208 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 209 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ }, 210 211 // Floating-point to integer conversions. 212 // i64 conversions are done via library routines even when generating VFP 213 // instructions, so use the same ones. 214 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 215 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 216 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 217 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 218 219 // Conversions between floating types. 220 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 221 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 222 223 // Integer to floating-point conversions. 224 // i64 conversions are done via library routines even when generating VFP 225 // instructions, so use the same ones. 226 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 227 // e.g., __floatunsidf vs. __floatunssidfvfp. 228 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 229 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 230 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 231 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 232 }; 233 234 for (const auto &LC : LibraryCalls) { 235 setLibcallName(LC.Op, LC.Name); 236 if (LC.Cond != ISD::SETCC_INVALID) 237 setCmpLibcallCC(LC.Op, LC.Cond); 238 } 239 } 240 241 // Set the correct calling convention for ARMv7k WatchOS. It's just 242 // AAPCS_VFP for functions as simple as libcalls. 243 if (Subtarget->isTargetWatchABI()) { 244 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) 245 setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP); 246 } 247 } 248 249 // These libcalls are not available in 32-bit. 250 setLibcallName(RTLIB::SHL_I128, nullptr); 251 setLibcallName(RTLIB::SRL_I128, nullptr); 252 setLibcallName(RTLIB::SRA_I128, nullptr); 253 254 // RTLIB 255 if (Subtarget->isAAPCS_ABI() && 256 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 257 Subtarget->isTargetAndroid())) { 258 static const struct { 259 const RTLIB::Libcall Op; 260 const char * const Name; 261 const CallingConv::ID CC; 262 const ISD::CondCode Cond; 263 } LibraryCalls[] = { 264 // Double-precision floating-point arithmetic helper functions 265 // RTABI chapter 4.1.2, Table 2 266 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 267 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 268 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 269 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 270 271 // Double-precision floating-point comparison helper functions 272 // RTABI chapter 4.1.2, Table 3 273 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 274 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 275 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 276 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 277 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 278 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 279 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 280 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 281 282 // Single-precision floating-point arithmetic helper functions 283 // RTABI chapter 4.1.2, Table 4 284 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 285 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 286 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 287 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 288 289 // Single-precision floating-point comparison helper functions 290 // RTABI chapter 4.1.2, Table 5 291 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 292 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 293 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 294 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 295 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 296 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 297 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 298 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 299 300 // Floating-point to integer conversions. 301 // RTABI chapter 4.1.2, Table 6 302 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 303 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 304 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 305 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 306 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 307 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 308 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 309 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 310 311 // Conversions between floating types. 312 // RTABI chapter 4.1.2, Table 7 313 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 314 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 315 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 316 317 // Integer to floating-point conversions. 318 // RTABI chapter 4.1.2, Table 8 319 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 320 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 321 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 322 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 323 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 324 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 325 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 326 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 327 328 // Long long helper functions 329 // RTABI chapter 4.2, Table 9 330 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 331 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 332 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 333 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 334 335 // Integer division functions 336 // RTABI chapter 4.3.1 337 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 338 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 339 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 340 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 341 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 342 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 343 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 344 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 345 }; 346 347 for (const auto &LC : LibraryCalls) { 348 setLibcallName(LC.Op, LC.Name); 349 setLibcallCallingConv(LC.Op, LC.CC); 350 if (LC.Cond != ISD::SETCC_INVALID) 351 setCmpLibcallCC(LC.Op, LC.Cond); 352 } 353 354 // EABI dependent RTLIB 355 if (TM.Options.EABIVersion == EABI::EABI4 || 356 TM.Options.EABIVersion == EABI::EABI5) { 357 static const struct { 358 const RTLIB::Libcall Op; 359 const char *const Name; 360 const CallingConv::ID CC; 361 const ISD::CondCode Cond; 362 } MemOpsLibraryCalls[] = { 363 // Memory operations 364 // RTABI chapter 4.3.4 365 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 366 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 367 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 368 }; 369 370 for (const auto &LC : MemOpsLibraryCalls) { 371 setLibcallName(LC.Op, LC.Name); 372 setLibcallCallingConv(LC.Op, LC.CC); 373 if (LC.Cond != ISD::SETCC_INVALID) 374 setCmpLibcallCC(LC.Op, LC.Cond); 375 } 376 } 377 } 378 379 if (Subtarget->isTargetWindows()) { 380 static const struct { 381 const RTLIB::Libcall Op; 382 const char * const Name; 383 const CallingConv::ID CC; 384 } LibraryCalls[] = { 385 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 386 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 387 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 388 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 389 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 390 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 391 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 392 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 393 }; 394 395 for (const auto &LC : LibraryCalls) { 396 setLibcallName(LC.Op, LC.Name); 397 setLibcallCallingConv(LC.Op, LC.CC); 398 } 399 } 400 401 // Use divmod compiler-rt calls for iOS 5.0 and later. 402 if (Subtarget->isTargetWatchOS() || 403 (Subtarget->isTargetIOS() && 404 !Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 405 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 406 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 407 } 408 409 // The half <-> float conversion functions are always soft-float, but are 410 // needed for some targets which use a hard-float calling convention by 411 // default. 412 if (Subtarget->isAAPCS_ABI()) { 413 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 414 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 416 } else { 417 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 418 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 419 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 420 } 421 422 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 423 // a __gnu_ prefix (which is the default). 424 if (Subtarget->isTargetAEABI()) { 425 setLibcallName(RTLIB::FPROUND_F32_F16, "__aeabi_f2h"); 426 setLibcallName(RTLIB::FPROUND_F64_F16, "__aeabi_d2h"); 427 setLibcallName(RTLIB::FPEXT_F16_F32, "__aeabi_h2f"); 428 } 429 430 if (Subtarget->isThumb1Only()) 431 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 432 else 433 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 434 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 435 !Subtarget->isThumb1Only()) { 436 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 437 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 438 } 439 440 for (MVT VT : MVT::vector_valuetypes()) { 441 for (MVT InnerVT : MVT::vector_valuetypes()) { 442 setTruncStoreAction(VT, InnerVT, Expand); 443 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 444 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 445 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 446 } 447 448 setOperationAction(ISD::MULHS, VT, Expand); 449 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 450 setOperationAction(ISD::MULHU, VT, Expand); 451 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 452 453 setOperationAction(ISD::BSWAP, VT, Expand); 454 } 455 456 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 457 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 458 459 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 460 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 461 462 if (Subtarget->hasNEON()) { 463 addDRTypeForNEON(MVT::v2f32); 464 addDRTypeForNEON(MVT::v8i8); 465 addDRTypeForNEON(MVT::v4i16); 466 addDRTypeForNEON(MVT::v2i32); 467 addDRTypeForNEON(MVT::v1i64); 468 469 addQRTypeForNEON(MVT::v4f32); 470 addQRTypeForNEON(MVT::v2f64); 471 addQRTypeForNEON(MVT::v16i8); 472 addQRTypeForNEON(MVT::v8i16); 473 addQRTypeForNEON(MVT::v4i32); 474 addQRTypeForNEON(MVT::v2i64); 475 476 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 477 // neither Neon nor VFP support any arithmetic operations on it. 478 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 479 // supported for v4f32. 480 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 481 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 482 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 483 // FIXME: Code duplication: FDIV and FREM are expanded always, see 484 // ARMTargetLowering::addTypeForNEON method for details. 485 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 486 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 487 // FIXME: Create unittest. 488 // In another words, find a way when "copysign" appears in DAG with vector 489 // operands. 490 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 491 // FIXME: Code duplication: SETCC has custom operation action, see 492 // ARMTargetLowering::addTypeForNEON method for details. 493 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 494 // FIXME: Create unittest for FNEG and for FABS. 495 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 496 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 497 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 498 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 499 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 500 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 501 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 502 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 503 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 504 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 505 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 506 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 507 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 508 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 509 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 510 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 511 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 512 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 513 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 514 515 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 516 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 517 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 518 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 519 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 520 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 521 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 522 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 523 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 524 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 525 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 526 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 527 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 528 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 529 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 530 531 // Mark v2f32 intrinsics. 532 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 533 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 534 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 535 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 536 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 537 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 538 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 539 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 540 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 541 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 542 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 543 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 544 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 545 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 546 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 547 548 // Neon does not support some operations on v1i64 and v2i64 types. 549 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 550 // Custom handling for some quad-vector types to detect VMULL. 551 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 552 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 553 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 554 // Custom handling for some vector types to avoid expensive expansions 555 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 556 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 557 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 558 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 559 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 560 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 561 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 562 // a destination type that is wider than the source, and nor does 563 // it have a FP_TO_[SU]INT instruction with a narrower destination than 564 // source. 565 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 566 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 567 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 568 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 569 570 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 571 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 572 573 // NEON does not have single instruction CTPOP for vectors with element 574 // types wider than 8-bits. However, custom lowering can leverage the 575 // v8i8/v16i8 vcnt instruction. 576 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 577 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 578 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 579 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 580 setOperationAction(ISD::CTPOP, MVT::v1i64, Expand); 581 setOperationAction(ISD::CTPOP, MVT::v2i64, Expand); 582 583 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 584 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 585 586 // NEON does not have single instruction CTTZ for vectors. 587 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 588 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 589 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 590 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 591 592 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 593 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 594 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 595 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 596 597 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 598 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 599 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 600 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 601 602 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 603 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 604 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 605 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 606 607 // NEON only has FMA instructions as of VFP4. 608 if (!Subtarget->hasVFP4()) { 609 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 610 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 611 } 612 613 setTargetDAGCombine(ISD::INTRINSIC_VOID); 614 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 615 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 616 setTargetDAGCombine(ISD::SHL); 617 setTargetDAGCombine(ISD::SRL); 618 setTargetDAGCombine(ISD::SRA); 619 setTargetDAGCombine(ISD::SIGN_EXTEND); 620 setTargetDAGCombine(ISD::ZERO_EXTEND); 621 setTargetDAGCombine(ISD::ANY_EXTEND); 622 setTargetDAGCombine(ISD::BUILD_VECTOR); 623 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 624 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 625 setTargetDAGCombine(ISD::STORE); 626 setTargetDAGCombine(ISD::FP_TO_SINT); 627 setTargetDAGCombine(ISD::FP_TO_UINT); 628 setTargetDAGCombine(ISD::FDIV); 629 setTargetDAGCombine(ISD::LOAD); 630 631 // It is legal to extload from v4i8 to v4i16 or v4i32. 632 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 633 MVT::v2i32}) { 634 for (MVT VT : MVT::integer_vector_valuetypes()) { 635 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 636 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 637 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 638 } 639 } 640 } 641 642 // ARM and Thumb2 support UMLAL/SMLAL. 643 if (!Subtarget->isThumb1Only()) 644 setTargetDAGCombine(ISD::ADDC); 645 646 if (Subtarget->isFPOnlySP()) { 647 // When targeting a floating-point unit with only single-precision 648 // operations, f64 is legal for the few double-precision instructions which 649 // are present However, no double-precision operations other than moves, 650 // loads and stores are provided by the hardware. 651 setOperationAction(ISD::FADD, MVT::f64, Expand); 652 setOperationAction(ISD::FSUB, MVT::f64, Expand); 653 setOperationAction(ISD::FMUL, MVT::f64, Expand); 654 setOperationAction(ISD::FMA, MVT::f64, Expand); 655 setOperationAction(ISD::FDIV, MVT::f64, Expand); 656 setOperationAction(ISD::FREM, MVT::f64, Expand); 657 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 658 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 659 setOperationAction(ISD::FNEG, MVT::f64, Expand); 660 setOperationAction(ISD::FABS, MVT::f64, Expand); 661 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 662 setOperationAction(ISD::FSIN, MVT::f64, Expand); 663 setOperationAction(ISD::FCOS, MVT::f64, Expand); 664 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 665 setOperationAction(ISD::FPOW, MVT::f64, Expand); 666 setOperationAction(ISD::FLOG, MVT::f64, Expand); 667 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 668 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 669 setOperationAction(ISD::FEXP, MVT::f64, Expand); 670 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 671 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 672 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 673 setOperationAction(ISD::FRINT, MVT::f64, Expand); 674 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 675 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 676 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 677 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 678 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 679 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 680 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 681 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 682 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 683 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 684 } 685 686 computeRegisterProperties(Subtarget->getRegisterInfo()); 687 688 // ARM does not have floating-point extending loads. 689 for (MVT VT : MVT::fp_valuetypes()) { 690 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 691 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 692 } 693 694 // ... or truncating stores 695 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 696 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 697 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 698 699 // ARM does not have i1 sign extending load. 700 for (MVT VT : MVT::integer_valuetypes()) 701 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 702 703 // ARM supports all 4 flavors of integer indexed load / store. 704 if (!Subtarget->isThumb1Only()) { 705 for (unsigned im = (unsigned)ISD::PRE_INC; 706 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 707 setIndexedLoadAction(im, MVT::i1, Legal); 708 setIndexedLoadAction(im, MVT::i8, Legal); 709 setIndexedLoadAction(im, MVT::i16, Legal); 710 setIndexedLoadAction(im, MVT::i32, Legal); 711 setIndexedStoreAction(im, MVT::i1, Legal); 712 setIndexedStoreAction(im, MVT::i8, Legal); 713 setIndexedStoreAction(im, MVT::i16, Legal); 714 setIndexedStoreAction(im, MVT::i32, Legal); 715 } 716 } 717 718 setOperationAction(ISD::SADDO, MVT::i32, Custom); 719 setOperationAction(ISD::UADDO, MVT::i32, Custom); 720 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 721 setOperationAction(ISD::USUBO, MVT::i32, Custom); 722 723 // i64 operation support. 724 setOperationAction(ISD::MUL, MVT::i64, Expand); 725 setOperationAction(ISD::MULHU, MVT::i32, Expand); 726 if (Subtarget->isThumb1Only()) { 727 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 728 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 729 } 730 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 731 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 732 setOperationAction(ISD::MULHS, MVT::i32, Expand); 733 734 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 735 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 736 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 737 setOperationAction(ISD::SRL, MVT::i64, Custom); 738 setOperationAction(ISD::SRA, MVT::i64, Custom); 739 740 if (!Subtarget->isThumb1Only()) { 741 // FIXME: We should do this for Thumb1 as well. 742 setOperationAction(ISD::ADDC, MVT::i32, Custom); 743 setOperationAction(ISD::ADDE, MVT::i32, Custom); 744 setOperationAction(ISD::SUBC, MVT::i32, Custom); 745 setOperationAction(ISD::SUBE, MVT::i32, Custom); 746 } 747 748 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 749 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 750 751 // ARM does not have ROTL. 752 setOperationAction(ISD::ROTL, MVT::i32, Expand); 753 for (MVT VT : MVT::vector_valuetypes()) { 754 setOperationAction(ISD::ROTL, VT, Expand); 755 setOperationAction(ISD::ROTR, VT, Expand); 756 } 757 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 758 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 759 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 760 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 761 762 // @llvm.readcyclecounter requires the Performance Monitors extension. 763 // Default to the 0 expansion on unsupported platforms. 764 // FIXME: Technically there are older ARM CPUs that have 765 // implementation-specific ways of obtaining this information. 766 if (Subtarget->hasPerfMon()) 767 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 768 769 // Only ARMv6 has BSWAP. 770 if (!Subtarget->hasV6Ops()) 771 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 772 773 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide() 774 : Subtarget->hasDivideInARMMode(); 775 if (!hasDivide) { 776 // These are expanded into libcalls if the cpu doesn't have HW divider. 777 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 778 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 779 } 780 781 if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) { 782 setOperationAction(ISD::SDIV, MVT::i32, Custom); 783 setOperationAction(ISD::UDIV, MVT::i32, Custom); 784 785 setOperationAction(ISD::SDIV, MVT::i64, Custom); 786 setOperationAction(ISD::UDIV, MVT::i64, Custom); 787 } 788 789 setOperationAction(ISD::SREM, MVT::i32, Expand); 790 setOperationAction(ISD::UREM, MVT::i32, Expand); 791 // Register based DivRem for AEABI (RTABI 4.2) 792 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 793 Subtarget->isTargetGNUAEABI()) { 794 setOperationAction(ISD::SREM, MVT::i64, Custom); 795 setOperationAction(ISD::UREM, MVT::i64, Custom); 796 797 setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); 798 setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); 799 setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); 800 setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); 801 setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); 802 setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); 803 setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); 804 setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); 805 806 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); 807 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); 808 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); 809 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); 810 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); 811 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); 812 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); 813 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); 814 815 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 816 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 817 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 818 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 819 } else { 820 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 821 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 822 } 823 824 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 825 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 826 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 827 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 828 829 setOperationAction(ISD::TRAP, MVT::Other, Legal); 830 831 // Use the default implementation. 832 setOperationAction(ISD::VASTART, MVT::Other, Custom); 833 setOperationAction(ISD::VAARG, MVT::Other, Expand); 834 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 835 setOperationAction(ISD::VAEND, MVT::Other, Expand); 836 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 837 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 838 839 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 840 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 841 else 842 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 843 844 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 845 // the default expansion. 846 InsertFencesForAtomic = false; 847 if (Subtarget->hasAnyDataBarrier() && 848 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 849 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 850 // to ldrex/strex loops already. 851 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 852 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 853 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 854 855 // On v8, we have particularly efficient implementations of atomic fences 856 // if they can be combined with nearby atomic loads and stores. 857 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) { 858 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 859 InsertFencesForAtomic = true; 860 } 861 } else { 862 // If there's anything we can use as a barrier, go through custom lowering 863 // for ATOMIC_FENCE. 864 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 865 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 866 867 // Set them all for expansion, which will force libcalls. 868 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 869 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 870 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 871 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 872 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 873 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 874 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 875 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 876 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 877 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 878 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 879 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 880 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 881 // Unordered/Monotonic case. 882 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 883 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 884 } 885 886 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 887 888 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 889 if (!Subtarget->hasV6Ops()) { 890 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 891 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 892 } 893 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 894 895 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 896 !Subtarget->isThumb1Only()) { 897 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 898 // iff target supports vfp2. 899 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 900 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 901 } 902 903 // We want to custom lower some of our intrinsics. 904 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 905 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 906 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 907 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 908 if (Subtarget->useSjLjEH()) 909 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 910 911 setOperationAction(ISD::SETCC, MVT::i32, Expand); 912 setOperationAction(ISD::SETCC, MVT::f32, Expand); 913 setOperationAction(ISD::SETCC, MVT::f64, Expand); 914 setOperationAction(ISD::SELECT, MVT::i32, Custom); 915 setOperationAction(ISD::SELECT, MVT::f32, Custom); 916 setOperationAction(ISD::SELECT, MVT::f64, Custom); 917 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 918 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 919 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 920 921 // Thumb-1 cannot currently select ARMISD::SUBE. 922 if (!Subtarget->isThumb1Only()) 923 setOperationAction(ISD::SETCCE, MVT::i32, Custom); 924 925 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 926 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 927 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 928 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 929 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 930 931 // We don't support sin/cos/fmod/copysign/pow 932 setOperationAction(ISD::FSIN, MVT::f64, Expand); 933 setOperationAction(ISD::FSIN, MVT::f32, Expand); 934 setOperationAction(ISD::FCOS, MVT::f32, Expand); 935 setOperationAction(ISD::FCOS, MVT::f64, Expand); 936 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 937 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 938 setOperationAction(ISD::FREM, MVT::f64, Expand); 939 setOperationAction(ISD::FREM, MVT::f32, Expand); 940 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 941 !Subtarget->isThumb1Only()) { 942 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 943 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 944 } 945 setOperationAction(ISD::FPOW, MVT::f64, Expand); 946 setOperationAction(ISD::FPOW, MVT::f32, Expand); 947 948 if (!Subtarget->hasVFP4()) { 949 setOperationAction(ISD::FMA, MVT::f64, Expand); 950 setOperationAction(ISD::FMA, MVT::f32, Expand); 951 } 952 953 // Various VFP goodness 954 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 955 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 956 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { 957 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 958 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 959 } 960 961 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 962 if (!Subtarget->hasFP16()) { 963 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 964 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 965 } 966 } 967 968 // Combine sin / cos into one node or libcall if possible. 969 if (Subtarget->hasSinCos()) { 970 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 971 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 972 if (Subtarget->isTargetWatchABI()) { 973 setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP); 974 setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP); 975 } 976 if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) { 977 // For iOS, we don't want to the normal expansion of a libcall to 978 // sincos. We want to issue a libcall to __sincos_stret. 979 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 980 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 981 } 982 } 983 984 // FP-ARMv8 implements a lot of rounding-like FP operations. 985 if (Subtarget->hasFPARMv8()) { 986 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 987 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 988 setOperationAction(ISD::FROUND, MVT::f32, Legal); 989 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 990 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 991 setOperationAction(ISD::FRINT, MVT::f32, Legal); 992 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 993 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 994 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 995 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 996 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 997 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 998 999 if (!Subtarget->isFPOnlySP()) { 1000 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1001 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1002 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1003 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1004 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1005 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1006 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1007 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1008 } 1009 } 1010 1011 if (Subtarget->hasNEON()) { 1012 // vmin and vmax aren't available in a scalar form, so we use 1013 // a NEON instruction with an undef lane instead. 1014 setOperationAction(ISD::FMINNAN, MVT::f32, Legal); 1015 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); 1016 setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal); 1017 setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal); 1018 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); 1019 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); 1020 } 1021 1022 // We have target-specific dag combine patterns for the following nodes: 1023 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1024 setTargetDAGCombine(ISD::ADD); 1025 setTargetDAGCombine(ISD::SUB); 1026 setTargetDAGCombine(ISD::MUL); 1027 setTargetDAGCombine(ISD::AND); 1028 setTargetDAGCombine(ISD::OR); 1029 setTargetDAGCombine(ISD::XOR); 1030 1031 if (Subtarget->hasV6Ops()) 1032 setTargetDAGCombine(ISD::SRL); 1033 1034 setStackPointerRegisterToSaveRestore(ARM::SP); 1035 1036 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1037 !Subtarget->hasVFP2()) 1038 setSchedulingPreference(Sched::RegPressure); 1039 else 1040 setSchedulingPreference(Sched::Hybrid); 1041 1042 //// temporary - rewrite interface to use type 1043 MaxStoresPerMemset = 8; 1044 MaxStoresPerMemsetOptSize = 4; 1045 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1046 MaxStoresPerMemcpyOptSize = 2; 1047 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1048 MaxStoresPerMemmoveOptSize = 2; 1049 1050 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1051 // are at least 4 bytes aligned. 1052 setMinStackArgumentAlignment(4); 1053 1054 // Prefer likely predicted branches to selects on out-of-order cores. 1055 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1056 1057 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 1058 } 1059 1060 bool ARMTargetLowering::useSoftFloat() const { 1061 return Subtarget->useSoftFloat(); 1062 } 1063 1064 // FIXME: It might make sense to define the representative register class as the 1065 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1066 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1067 // SPR's representative would be DPR_VFP2. This should work well if register 1068 // pressure tracking were modified such that a register use would increment the 1069 // pressure of the register class's representative and all of it's super 1070 // classes' representatives transitively. We have not implemented this because 1071 // of the difficulty prior to coalescing of modeling operand register classes 1072 // due to the common occurrence of cross class copies and subregister insertions 1073 // and extractions. 1074 std::pair<const TargetRegisterClass *, uint8_t> 1075 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1076 MVT VT) const { 1077 const TargetRegisterClass *RRC = nullptr; 1078 uint8_t Cost = 1; 1079 switch (VT.SimpleTy) { 1080 default: 1081 return TargetLowering::findRepresentativeClass(TRI, VT); 1082 // Use DPR as representative register class for all floating point 1083 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1084 // the cost is 1 for both f32 and f64. 1085 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1086 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1087 RRC = &ARM::DPRRegClass; 1088 // When NEON is used for SP, only half of the register file is available 1089 // because operations that define both SP and DP results will be constrained 1090 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1091 // coalescing by double-counting the SP regs. See the FIXME above. 1092 if (Subtarget->useNEONForSinglePrecisionFP()) 1093 Cost = 2; 1094 break; 1095 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1096 case MVT::v4f32: case MVT::v2f64: 1097 RRC = &ARM::DPRRegClass; 1098 Cost = 2; 1099 break; 1100 case MVT::v4i64: 1101 RRC = &ARM::DPRRegClass; 1102 Cost = 4; 1103 break; 1104 case MVT::v8i64: 1105 RRC = &ARM::DPRRegClass; 1106 Cost = 8; 1107 break; 1108 } 1109 return std::make_pair(RRC, Cost); 1110 } 1111 1112 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1113 switch ((ARMISD::NodeType)Opcode) { 1114 case ARMISD::FIRST_NUMBER: break; 1115 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1116 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1117 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1118 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; 1119 case ARMISD::CALL: return "ARMISD::CALL"; 1120 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1121 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1122 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1123 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1124 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1125 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1126 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1127 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1128 case ARMISD::CMP: return "ARMISD::CMP"; 1129 case ARMISD::CMN: return "ARMISD::CMN"; 1130 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1131 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1132 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1133 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1134 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1135 1136 case ARMISD::CMOV: return "ARMISD::CMOV"; 1137 1138 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1139 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1140 case ARMISD::RRX: return "ARMISD::RRX"; 1141 1142 case ARMISD::ADDC: return "ARMISD::ADDC"; 1143 case ARMISD::ADDE: return "ARMISD::ADDE"; 1144 case ARMISD::SUBC: return "ARMISD::SUBC"; 1145 case ARMISD::SUBE: return "ARMISD::SUBE"; 1146 1147 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1148 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1149 1150 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1151 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; 1152 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; 1153 1154 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1155 1156 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1157 1158 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1159 1160 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1161 1162 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1163 1164 case ARMISD::WIN__CHKSTK: return "ARMISD:::WIN__CHKSTK"; 1165 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; 1166 1167 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 1168 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 1169 case ARMISD::VCGE: return "ARMISD::VCGE"; 1170 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 1171 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 1172 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 1173 case ARMISD::VCGT: return "ARMISD::VCGT"; 1174 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1175 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1176 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1177 case ARMISD::VTST: return "ARMISD::VTST"; 1178 1179 case ARMISD::VSHL: return "ARMISD::VSHL"; 1180 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1181 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1182 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1183 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1184 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1185 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1186 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1187 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1188 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1189 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1190 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1191 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1192 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1193 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1194 case ARMISD::VSLI: return "ARMISD::VSLI"; 1195 case ARMISD::VSRI: return "ARMISD::VSRI"; 1196 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1197 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1198 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1199 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1200 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1201 case ARMISD::VDUP: return "ARMISD::VDUP"; 1202 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1203 case ARMISD::VEXT: return "ARMISD::VEXT"; 1204 case ARMISD::VREV64: return "ARMISD::VREV64"; 1205 case ARMISD::VREV32: return "ARMISD::VREV32"; 1206 case ARMISD::VREV16: return "ARMISD::VREV16"; 1207 case ARMISD::VZIP: return "ARMISD::VZIP"; 1208 case ARMISD::VUZP: return "ARMISD::VUZP"; 1209 case ARMISD::VTRN: return "ARMISD::VTRN"; 1210 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1211 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1212 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1213 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1214 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1215 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1216 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1217 case ARMISD::BFI: return "ARMISD::BFI"; 1218 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1219 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1220 case ARMISD::VBSL: return "ARMISD::VBSL"; 1221 case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; 1222 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1223 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1224 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1225 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1226 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1227 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1228 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1229 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1230 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1231 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1232 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1233 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1234 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1235 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1236 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1237 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1238 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1239 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1240 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1241 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1242 } 1243 return nullptr; 1244 } 1245 1246 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1247 EVT VT) const { 1248 if (!VT.isVector()) 1249 return getPointerTy(DL); 1250 return VT.changeVectorElementTypeToInteger(); 1251 } 1252 1253 /// getRegClassFor - Return the register class that should be used for the 1254 /// specified value type. 1255 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1256 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1257 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1258 // load / store 4 to 8 consecutive D registers. 1259 if (Subtarget->hasNEON()) { 1260 if (VT == MVT::v4i64) 1261 return &ARM::QQPRRegClass; 1262 if (VT == MVT::v8i64) 1263 return &ARM::QQQQPRRegClass; 1264 } 1265 return TargetLowering::getRegClassFor(VT); 1266 } 1267 1268 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1269 // source/dest is aligned and the copy size is large enough. We therefore want 1270 // to align such objects passed to memory intrinsics. 1271 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1272 unsigned &PrefAlign) const { 1273 if (!isa<MemIntrinsic>(CI)) 1274 return false; 1275 MinSize = 8; 1276 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1277 // cycle faster than 4-byte aligned LDM. 1278 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1279 return true; 1280 } 1281 1282 // Create a fast isel object. 1283 FastISel * 1284 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1285 const TargetLibraryInfo *libInfo) const { 1286 return ARM::createFastISel(funcInfo, libInfo); 1287 } 1288 1289 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1290 unsigned NumVals = N->getNumValues(); 1291 if (!NumVals) 1292 return Sched::RegPressure; 1293 1294 for (unsigned i = 0; i != NumVals; ++i) { 1295 EVT VT = N->getValueType(i); 1296 if (VT == MVT::Glue || VT == MVT::Other) 1297 continue; 1298 if (VT.isFloatingPoint() || VT.isVector()) 1299 return Sched::ILP; 1300 } 1301 1302 if (!N->isMachineOpcode()) 1303 return Sched::RegPressure; 1304 1305 // Load are scheduled for latency even if there instruction itinerary 1306 // is not available. 1307 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1308 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1309 1310 if (MCID.getNumDefs() == 0) 1311 return Sched::RegPressure; 1312 if (!Itins->isEmpty() && 1313 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1314 return Sched::ILP; 1315 1316 return Sched::RegPressure; 1317 } 1318 1319 //===----------------------------------------------------------------------===// 1320 // Lowering Code 1321 //===----------------------------------------------------------------------===// 1322 1323 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1324 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1325 switch (CC) { 1326 default: llvm_unreachable("Unknown condition code!"); 1327 case ISD::SETNE: return ARMCC::NE; 1328 case ISD::SETEQ: return ARMCC::EQ; 1329 case ISD::SETGT: return ARMCC::GT; 1330 case ISD::SETGE: return ARMCC::GE; 1331 case ISD::SETLT: return ARMCC::LT; 1332 case ISD::SETLE: return ARMCC::LE; 1333 case ISD::SETUGT: return ARMCC::HI; 1334 case ISD::SETUGE: return ARMCC::HS; 1335 case ISD::SETULT: return ARMCC::LO; 1336 case ISD::SETULE: return ARMCC::LS; 1337 } 1338 } 1339 1340 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1341 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1342 ARMCC::CondCodes &CondCode2) { 1343 CondCode2 = ARMCC::AL; 1344 switch (CC) { 1345 default: llvm_unreachable("Unknown FP condition!"); 1346 case ISD::SETEQ: 1347 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1348 case ISD::SETGT: 1349 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1350 case ISD::SETGE: 1351 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1352 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1353 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1354 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1355 case ISD::SETO: CondCode = ARMCC::VC; break; 1356 case ISD::SETUO: CondCode = ARMCC::VS; break; 1357 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1358 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1359 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1360 case ISD::SETLT: 1361 case ISD::SETULT: CondCode = ARMCC::LT; break; 1362 case ISD::SETLE: 1363 case ISD::SETULE: CondCode = ARMCC::LE; break; 1364 case ISD::SETNE: 1365 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1366 } 1367 } 1368 1369 //===----------------------------------------------------------------------===// 1370 // Calling Convention Implementation 1371 //===----------------------------------------------------------------------===// 1372 1373 #include "ARMGenCallingConv.inc" 1374 1375 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1376 /// account presence of floating point hardware and calling convention 1377 /// limitations, such as support for variadic functions. 1378 CallingConv::ID 1379 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1380 bool isVarArg) const { 1381 switch (CC) { 1382 default: 1383 llvm_unreachable("Unsupported calling convention"); 1384 case CallingConv::ARM_AAPCS: 1385 case CallingConv::ARM_APCS: 1386 case CallingConv::GHC: 1387 return CC; 1388 case CallingConv::PreserveMost: 1389 return CallingConv::PreserveMost; 1390 case CallingConv::ARM_AAPCS_VFP: 1391 case CallingConv::Swift: 1392 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1393 case CallingConv::C: 1394 if (!Subtarget->isAAPCS_ABI()) 1395 return CallingConv::ARM_APCS; 1396 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && 1397 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1398 !isVarArg) 1399 return CallingConv::ARM_AAPCS_VFP; 1400 else 1401 return CallingConv::ARM_AAPCS; 1402 case CallingConv::Fast: 1403 case CallingConv::CXX_FAST_TLS: 1404 if (!Subtarget->isAAPCS_ABI()) { 1405 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1406 return CallingConv::Fast; 1407 return CallingConv::ARM_APCS; 1408 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1409 return CallingConv::ARM_AAPCS_VFP; 1410 else 1411 return CallingConv::ARM_AAPCS; 1412 } 1413 } 1414 1415 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 1416 /// CallingConvention. 1417 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1418 bool Return, 1419 bool isVarArg) const { 1420 switch (getEffectiveCallingConv(CC, isVarArg)) { 1421 default: 1422 llvm_unreachable("Unsupported calling convention"); 1423 case CallingConv::ARM_APCS: 1424 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1425 case CallingConv::ARM_AAPCS: 1426 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1427 case CallingConv::ARM_AAPCS_VFP: 1428 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1429 case CallingConv::Fast: 1430 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1431 case CallingConv::GHC: 1432 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1433 case CallingConv::PreserveMost: 1434 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1435 } 1436 } 1437 1438 /// LowerCallResult - Lower the result values of a call into the 1439 /// appropriate copies out of appropriate physical registers. 1440 SDValue 1441 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1442 CallingConv::ID CallConv, bool isVarArg, 1443 const SmallVectorImpl<ISD::InputArg> &Ins, 1444 SDLoc dl, SelectionDAG &DAG, 1445 SmallVectorImpl<SDValue> &InVals, 1446 bool isThisReturn, SDValue ThisVal) const { 1447 1448 // Assign locations to each value returned by this call. 1449 SmallVector<CCValAssign, 16> RVLocs; 1450 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1451 *DAG.getContext(), Call); 1452 CCInfo.AnalyzeCallResult(Ins, 1453 CCAssignFnForNode(CallConv, /* Return*/ true, 1454 isVarArg)); 1455 1456 // Copy all of the result registers out of their specified physreg. 1457 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1458 CCValAssign VA = RVLocs[i]; 1459 1460 // Pass 'this' value directly from the argument to return value, to avoid 1461 // reg unit interference 1462 if (i == 0 && isThisReturn) { 1463 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1464 "unexpected return calling convention register assignment"); 1465 InVals.push_back(ThisVal); 1466 continue; 1467 } 1468 1469 SDValue Val; 1470 if (VA.needsCustom()) { 1471 // Handle f64 or half of a v2f64. 1472 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1473 InFlag); 1474 Chain = Lo.getValue(1); 1475 InFlag = Lo.getValue(2); 1476 VA = RVLocs[++i]; // skip ahead to next loc 1477 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1478 InFlag); 1479 Chain = Hi.getValue(1); 1480 InFlag = Hi.getValue(2); 1481 if (!Subtarget->isLittle()) 1482 std::swap (Lo, Hi); 1483 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1484 1485 if (VA.getLocVT() == MVT::v2f64) { 1486 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1487 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1488 DAG.getConstant(0, dl, MVT::i32)); 1489 1490 VA = RVLocs[++i]; // skip ahead to next loc 1491 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1492 Chain = Lo.getValue(1); 1493 InFlag = Lo.getValue(2); 1494 VA = RVLocs[++i]; // skip ahead to next loc 1495 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1496 Chain = Hi.getValue(1); 1497 InFlag = Hi.getValue(2); 1498 if (!Subtarget->isLittle()) 1499 std::swap (Lo, Hi); 1500 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1501 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1502 DAG.getConstant(1, dl, MVT::i32)); 1503 } 1504 } else { 1505 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1506 InFlag); 1507 Chain = Val.getValue(1); 1508 InFlag = Val.getValue(2); 1509 } 1510 1511 switch (VA.getLocInfo()) { 1512 default: llvm_unreachable("Unknown loc info!"); 1513 case CCValAssign::Full: break; 1514 case CCValAssign::BCvt: 1515 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1516 break; 1517 } 1518 1519 InVals.push_back(Val); 1520 } 1521 1522 return Chain; 1523 } 1524 1525 /// LowerMemOpCallTo - Store the argument to the stack. 1526 SDValue 1527 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1528 SDValue StackPtr, SDValue Arg, 1529 SDLoc dl, SelectionDAG &DAG, 1530 const CCValAssign &VA, 1531 ISD::ArgFlagsTy Flags) const { 1532 unsigned LocMemOffset = VA.getLocMemOffset(); 1533 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1534 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 1535 StackPtr, PtrOff); 1536 return DAG.getStore( 1537 Chain, dl, Arg, PtrOff, 1538 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset), 1539 false, false, 0); 1540 } 1541 1542 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 1543 SDValue Chain, SDValue &Arg, 1544 RegsToPassVector &RegsToPass, 1545 CCValAssign &VA, CCValAssign &NextVA, 1546 SDValue &StackPtr, 1547 SmallVectorImpl<SDValue> &MemOpChains, 1548 ISD::ArgFlagsTy Flags) const { 1549 1550 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1551 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1552 unsigned id = Subtarget->isLittle() ? 0 : 1; 1553 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 1554 1555 if (NextVA.isRegLoc()) 1556 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 1557 else { 1558 assert(NextVA.isMemLoc()); 1559 if (!StackPtr.getNode()) 1560 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 1561 getPointerTy(DAG.getDataLayout())); 1562 1563 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 1564 dl, DAG, NextVA, 1565 Flags)); 1566 } 1567 } 1568 1569 /// LowerCall - Lowering a call into a callseq_start <- 1570 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1571 /// nodes. 1572 SDValue 1573 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1574 SmallVectorImpl<SDValue> &InVals) const { 1575 SelectionDAG &DAG = CLI.DAG; 1576 SDLoc &dl = CLI.DL; 1577 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1578 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1579 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1580 SDValue Chain = CLI.Chain; 1581 SDValue Callee = CLI.Callee; 1582 bool &isTailCall = CLI.IsTailCall; 1583 CallingConv::ID CallConv = CLI.CallConv; 1584 bool doesNotRet = CLI.DoesNotReturn; 1585 bool isVarArg = CLI.IsVarArg; 1586 1587 MachineFunction &MF = DAG.getMachineFunction(); 1588 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1589 bool isThisReturn = false; 1590 bool isSibCall = false; 1591 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); 1592 1593 // Disable tail calls if they're not supported. 1594 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") 1595 isTailCall = false; 1596 1597 if (isTailCall) { 1598 // Check if it's really possible to do a tail call. 1599 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1600 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1601 Outs, OutVals, Ins, DAG); 1602 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 1603 report_fatal_error("failed to perform tail call elimination on a call " 1604 "site marked musttail"); 1605 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1606 // detected sibcalls. 1607 if (isTailCall) { 1608 ++NumTailCalls; 1609 isSibCall = true; 1610 } 1611 } 1612 1613 // Analyze operands of the call, assigning locations to each operand. 1614 SmallVector<CCValAssign, 16> ArgLocs; 1615 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1616 *DAG.getContext(), Call); 1617 CCInfo.AnalyzeCallOperands(Outs, 1618 CCAssignFnForNode(CallConv, /* Return*/ false, 1619 isVarArg)); 1620 1621 // Get a count of how many bytes are to be pushed on the stack. 1622 unsigned NumBytes = CCInfo.getNextStackOffset(); 1623 1624 // For tail calls, memory operands are available in our caller's stack. 1625 if (isSibCall) 1626 NumBytes = 0; 1627 1628 // Adjust the stack pointer for the new arguments... 1629 // These operations are automatically eliminated by the prolog/epilog pass 1630 if (!isSibCall) 1631 Chain = DAG.getCALLSEQ_START(Chain, 1632 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 1633 1634 SDValue StackPtr = 1635 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 1636 1637 RegsToPassVector RegsToPass; 1638 SmallVector<SDValue, 8> MemOpChains; 1639 1640 // Walk the register/memloc assignments, inserting copies/loads. In the case 1641 // of tail call optimization, arguments are handled later. 1642 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1643 i != e; 1644 ++i, ++realArgIdx) { 1645 CCValAssign &VA = ArgLocs[i]; 1646 SDValue Arg = OutVals[realArgIdx]; 1647 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1648 bool isByVal = Flags.isByVal(); 1649 1650 // Promote the value if needed. 1651 switch (VA.getLocInfo()) { 1652 default: llvm_unreachable("Unknown loc info!"); 1653 case CCValAssign::Full: break; 1654 case CCValAssign::SExt: 1655 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1656 break; 1657 case CCValAssign::ZExt: 1658 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1659 break; 1660 case CCValAssign::AExt: 1661 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1662 break; 1663 case CCValAssign::BCvt: 1664 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1665 break; 1666 } 1667 1668 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1669 if (VA.needsCustom()) { 1670 if (VA.getLocVT() == MVT::v2f64) { 1671 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1672 DAG.getConstant(0, dl, MVT::i32)); 1673 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1674 DAG.getConstant(1, dl, MVT::i32)); 1675 1676 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1677 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1678 1679 VA = ArgLocs[++i]; // skip ahead to next loc 1680 if (VA.isRegLoc()) { 1681 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1682 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1683 } else { 1684 assert(VA.isMemLoc()); 1685 1686 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1687 dl, DAG, VA, Flags)); 1688 } 1689 } else { 1690 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1691 StackPtr, MemOpChains, Flags); 1692 } 1693 } else if (VA.isRegLoc()) { 1694 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1695 assert(VA.getLocVT() == MVT::i32 && 1696 "unexpected calling convention register assignment"); 1697 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1698 "unexpected use of 'returned'"); 1699 isThisReturn = true; 1700 } 1701 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1702 } else if (isByVal) { 1703 assert(VA.isMemLoc()); 1704 unsigned offset = 0; 1705 1706 // True if this byval aggregate will be split between registers 1707 // and memory. 1708 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1709 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 1710 1711 if (CurByValIdx < ByValArgsCount) { 1712 1713 unsigned RegBegin, RegEnd; 1714 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1715 1716 EVT PtrVT = 1717 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1718 unsigned int i, j; 1719 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1720 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 1721 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1722 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1723 MachinePointerInfo(), 1724 false, false, false, 1725 DAG.InferPtrAlignment(AddArg)); 1726 MemOpChains.push_back(Load.getValue(1)); 1727 RegsToPass.push_back(std::make_pair(j, Load)); 1728 } 1729 1730 // If parameter size outsides register area, "offset" value 1731 // helps us to calculate stack slot for remained part properly. 1732 offset = RegEnd - RegBegin; 1733 1734 CCInfo.nextInRegsParam(); 1735 } 1736 1737 if (Flags.getByValSize() > 4*offset) { 1738 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1739 unsigned LocMemOffset = VA.getLocMemOffset(); 1740 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1741 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 1742 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 1743 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 1744 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 1745 MVT::i32); 1746 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, 1747 MVT::i32); 1748 1749 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1750 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1751 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1752 Ops)); 1753 } 1754 } else if (!isSibCall) { 1755 assert(VA.isMemLoc()); 1756 1757 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1758 dl, DAG, VA, Flags)); 1759 } 1760 } 1761 1762 if (!MemOpChains.empty()) 1763 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1764 1765 // Build a sequence of copy-to-reg nodes chained together with token chain 1766 // and flag operands which copy the outgoing args into the appropriate regs. 1767 SDValue InFlag; 1768 // Tail call byval lowering might overwrite argument registers so in case of 1769 // tail call optimization the copies to registers are lowered later. 1770 if (!isTailCall) 1771 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1772 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1773 RegsToPass[i].second, InFlag); 1774 InFlag = Chain.getValue(1); 1775 } 1776 1777 // For tail calls lower the arguments to the 'real' stack slot. 1778 if (isTailCall) { 1779 // Force all the incoming stack arguments to be loaded from the stack 1780 // before any new outgoing arguments are stored to the stack, because the 1781 // outgoing stack slots may alias the incoming argument stack slots, and 1782 // the alias isn't otherwise explicit. This is slightly more conservative 1783 // than necessary, because it means that each store effectively depends 1784 // on every argument instead of just those arguments it would clobber. 1785 1786 // Do not flag preceding copytoreg stuff together with the following stuff. 1787 InFlag = SDValue(); 1788 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1789 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1790 RegsToPass[i].second, InFlag); 1791 InFlag = Chain.getValue(1); 1792 } 1793 InFlag = SDValue(); 1794 } 1795 1796 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1797 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1798 // node so that legalize doesn't hack it. 1799 bool isDirect = false; 1800 bool isARMFunc = false; 1801 bool isLocalARMFunc = false; 1802 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1803 auto PtrVt = getPointerTy(DAG.getDataLayout()); 1804 1805 if (Subtarget->genLongCalls()) { 1806 assert((Subtarget->isTargetWindows() || 1807 getTargetMachine().getRelocationModel() == Reloc::Static) && 1808 "long-calls with non-static relocation model!"); 1809 // Handle a global address or an external symbol. If it's not one of 1810 // those, the target's already in a register, so we don't need to do 1811 // anything extra. 1812 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1813 const GlobalValue *GV = G->getGlobal(); 1814 // Create a constant pool entry for the callee address 1815 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1816 ARMConstantPoolValue *CPV = 1817 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1818 1819 // Get the address of the callee into a register 1820 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1821 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1822 Callee = DAG.getLoad( 1823 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1824 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1825 false, false, 0); 1826 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1827 const char *Sym = S->getSymbol(); 1828 1829 // Create a constant pool entry for the callee address 1830 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1831 ARMConstantPoolValue *CPV = 1832 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1833 ARMPCLabelIndex, 0); 1834 // Get the address of the callee into a register 1835 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1836 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1837 Callee = DAG.getLoad( 1838 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1839 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1840 false, false, 0); 1841 } 1842 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1843 const GlobalValue *GV = G->getGlobal(); 1844 isDirect = true; 1845 bool isDef = GV->isStrongDefinitionForLinker(); 1846 bool isStub = (!isDef && Subtarget->isTargetMachO()) && 1847 getTargetMachine().getRelocationModel() != Reloc::Static; 1848 isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 1849 // ARM call to a local ARM function is predicable. 1850 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 1851 // tBX takes a register source operand. 1852 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1853 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 1854 Callee = DAG.getNode( 1855 ARMISD::WrapperPIC, dl, PtrVt, 1856 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 1857 Callee = DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), Callee, 1858 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 1859 false, false, true, 0); 1860 } else if (Subtarget->isTargetCOFF()) { 1861 assert(Subtarget->isTargetWindows() && 1862 "Windows is the only supported COFF target"); 1863 unsigned TargetFlags = GV->hasDLLImportStorageClass() 1864 ? ARMII::MO_DLLIMPORT 1865 : ARMII::MO_NO_FLAG; 1866 Callee = 1867 DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, TargetFlags); 1868 if (GV->hasDLLImportStorageClass()) 1869 Callee = 1870 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 1871 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 1872 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 1873 false, false, false, 0); 1874 } else { 1875 // On ELF targets for PIC code, direct calls should go through the PLT 1876 unsigned OpFlags = 0; 1877 if (Subtarget->isTargetELF() && 1878 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1879 OpFlags = ARMII::MO_PLT; 1880 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, OpFlags); 1881 } 1882 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1883 isDirect = true; 1884 bool isStub = Subtarget->isTargetMachO() && 1885 getTargetMachine().getRelocationModel() != Reloc::Static; 1886 isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 1887 // tBX takes a register source operand. 1888 const char *Sym = S->getSymbol(); 1889 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1890 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1891 ARMConstantPoolValue *CPV = 1892 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1893 ARMPCLabelIndex, 4); 1894 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1895 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1896 Callee = DAG.getLoad( 1897 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1898 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1899 false, false, 0); 1900 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 1901 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 1902 } else { 1903 unsigned OpFlags = 0; 1904 // On ELF targets for PIC code, direct calls should go through the PLT 1905 if (Subtarget->isTargetELF() && 1906 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1907 OpFlags = ARMII::MO_PLT; 1908 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, OpFlags); 1909 } 1910 } 1911 1912 // FIXME: handle tail calls differently. 1913 unsigned CallOpc; 1914 if (Subtarget->isThumb()) { 1915 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1916 CallOpc = ARMISD::CALL_NOLINK; 1917 else 1918 CallOpc = ARMISD::CALL; 1919 } else { 1920 if (!isDirect && !Subtarget->hasV5TOps()) 1921 CallOpc = ARMISD::CALL_NOLINK; 1922 else if (doesNotRet && isDirect && Subtarget->hasRAS() && 1923 // Emit regular call when code size is the priority 1924 !MF.getFunction()->optForMinSize()) 1925 // "mov lr, pc; b _foo" to avoid confusing the RSP 1926 CallOpc = ARMISD::CALL_NOLINK; 1927 else 1928 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1929 } 1930 1931 std::vector<SDValue> Ops; 1932 Ops.push_back(Chain); 1933 Ops.push_back(Callee); 1934 1935 // Add argument registers to the end of the list so that they are known live 1936 // into the call. 1937 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1938 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1939 RegsToPass[i].second.getValueType())); 1940 1941 // Add a register mask operand representing the call-preserved registers. 1942 if (!isTailCall) { 1943 const uint32_t *Mask; 1944 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 1945 if (isThisReturn) { 1946 // For 'this' returns, use the R0-preserving mask if applicable 1947 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 1948 if (!Mask) { 1949 // Set isThisReturn to false if the calling convention is not one that 1950 // allows 'returned' to be modeled in this way, so LowerCallResult does 1951 // not try to pass 'this' straight through 1952 isThisReturn = false; 1953 Mask = ARI->getCallPreservedMask(MF, CallConv); 1954 } 1955 } else 1956 Mask = ARI->getCallPreservedMask(MF, CallConv); 1957 1958 assert(Mask && "Missing call preserved mask for calling convention"); 1959 Ops.push_back(DAG.getRegisterMask(Mask)); 1960 } 1961 1962 if (InFlag.getNode()) 1963 Ops.push_back(InFlag); 1964 1965 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1966 if (isTailCall) { 1967 MF.getFrameInfo()->setHasTailCall(); 1968 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 1969 } 1970 1971 // Returns a chain and a flag for retval copy to use. 1972 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 1973 InFlag = Chain.getValue(1); 1974 1975 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 1976 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 1977 if (!Ins.empty()) 1978 InFlag = Chain.getValue(1); 1979 1980 // Handle result values, copying them out of physregs into vregs that we 1981 // return. 1982 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 1983 InVals, isThisReturn, 1984 isThisReturn ? OutVals[0] : SDValue()); 1985 } 1986 1987 /// HandleByVal - Every parameter *after* a byval parameter is passed 1988 /// on the stack. Remember the next parameter register to allocate, 1989 /// and then confiscate the rest of the parameter registers to insure 1990 /// this. 1991 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 1992 unsigned Align) const { 1993 assert((State->getCallOrPrologue() == Prologue || 1994 State->getCallOrPrologue() == Call) && 1995 "unhandled ParmContext"); 1996 1997 // Byval (as with any stack) slots are always at least 4 byte aligned. 1998 Align = std::max(Align, 4U); 1999 2000 unsigned Reg = State->AllocateReg(GPRArgRegs); 2001 if (!Reg) 2002 return; 2003 2004 unsigned AlignInRegs = Align / 4; 2005 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2006 for (unsigned i = 0; i < Waste; ++i) 2007 Reg = State->AllocateReg(GPRArgRegs); 2008 2009 if (!Reg) 2010 return; 2011 2012 unsigned Excess = 4 * (ARM::R4 - Reg); 2013 2014 // Special case when NSAA != SP and parameter size greater than size of 2015 // all remained GPR regs. In that case we can't split parameter, we must 2016 // send it to stack. We also must set NCRN to R4, so waste all 2017 // remained registers. 2018 const unsigned NSAAOffset = State->getNextStackOffset(); 2019 if (NSAAOffset != 0 && Size > Excess) { 2020 while (State->AllocateReg(GPRArgRegs)) 2021 ; 2022 return; 2023 } 2024 2025 // First register for byval parameter is the first register that wasn't 2026 // allocated before this method call, so it would be "reg". 2027 // If parameter is small enough to be saved in range [reg, r4), then 2028 // the end (first after last) register would be reg + param-size-in-regs, 2029 // else parameter would be splitted between registers and stack, 2030 // end register would be r4 in this case. 2031 unsigned ByValRegBegin = Reg; 2032 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2033 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2034 // Note, first register is allocated in the beginning of function already, 2035 // allocate remained amount of registers we need. 2036 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2037 State->AllocateReg(GPRArgRegs); 2038 // A byval parameter that is split between registers and memory needs its 2039 // size truncated here. 2040 // In the case where the entire structure fits in registers, we set the 2041 // size in memory to zero. 2042 Size = std::max<int>(Size - Excess, 0); 2043 } 2044 2045 /// MatchingStackOffset - Return true if the given stack call argument is 2046 /// already available in the same position (relatively) of the caller's 2047 /// incoming argument stack. 2048 static 2049 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2050 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2051 const TargetInstrInfo *TII) { 2052 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2053 int FI = INT_MAX; 2054 if (Arg.getOpcode() == ISD::CopyFromReg) { 2055 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2056 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2057 return false; 2058 MachineInstr *Def = MRI->getVRegDef(VR); 2059 if (!Def) 2060 return false; 2061 if (!Flags.isByVal()) { 2062 if (!TII->isLoadFromStackSlot(Def, FI)) 2063 return false; 2064 } else { 2065 return false; 2066 } 2067 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2068 if (Flags.isByVal()) 2069 // ByVal argument is passed in as a pointer but it's now being 2070 // dereferenced. e.g. 2071 // define @foo(%struct.X* %A) { 2072 // tail call @bar(%struct.X* byval %A) 2073 // } 2074 return false; 2075 SDValue Ptr = Ld->getBasePtr(); 2076 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2077 if (!FINode) 2078 return false; 2079 FI = FINode->getIndex(); 2080 } else 2081 return false; 2082 2083 assert(FI != INT_MAX); 2084 if (!MFI->isFixedObjectIndex(FI)) 2085 return false; 2086 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2087 } 2088 2089 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2090 /// for tail call optimization. Targets which want to do tail call 2091 /// optimization should implement this function. 2092 bool 2093 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2094 CallingConv::ID CalleeCC, 2095 bool isVarArg, 2096 bool isCalleeStructRet, 2097 bool isCallerStructRet, 2098 const SmallVectorImpl<ISD::OutputArg> &Outs, 2099 const SmallVectorImpl<SDValue> &OutVals, 2100 const SmallVectorImpl<ISD::InputArg> &Ins, 2101 SelectionDAG& DAG) const { 2102 MachineFunction &MF = DAG.getMachineFunction(); 2103 const Function *CallerF = MF.getFunction(); 2104 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2105 2106 assert(Subtarget->supportsTailCall()); 2107 2108 // Look for obvious safe cases to perform tail call optimization that do not 2109 // require ABI changes. This is what gcc calls sibcall. 2110 2111 // Do not sibcall optimize vararg calls unless the call site is not passing 2112 // any arguments. 2113 if (isVarArg && !Outs.empty()) 2114 return false; 2115 2116 // Exception-handling functions need a special set of instructions to indicate 2117 // a return to the hardware. Tail-calling another function would probably 2118 // break this. 2119 if (CallerF->hasFnAttribute("interrupt")) 2120 return false; 2121 2122 // Also avoid sibcall optimization if either caller or callee uses struct 2123 // return semantics. 2124 if (isCalleeStructRet || isCallerStructRet) 2125 return false; 2126 2127 // Externally-defined functions with weak linkage should not be 2128 // tail-called on ARM when the OS does not support dynamic 2129 // pre-emption of symbols, as the AAELF spec requires normal calls 2130 // to undefined weak functions to be replaced with a NOP or jump to the 2131 // next instruction. The behaviour of branch instructions in this 2132 // situation (as used for tail calls) is implementation-defined, so we 2133 // cannot rely on the linker replacing the tail call with a return. 2134 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2135 const GlobalValue *GV = G->getGlobal(); 2136 const Triple &TT = getTargetMachine().getTargetTriple(); 2137 if (GV->hasExternalWeakLinkage() && 2138 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2139 return false; 2140 } 2141 2142 // Check that the call results are passed in the same way. 2143 LLVMContext &C = *DAG.getContext(); 2144 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, 2145 CCAssignFnForNode(CalleeCC, true, isVarArg), 2146 CCAssignFnForNode(CallerCC, true, isVarArg))) 2147 return false; 2148 // The callee has to preserve all registers the caller needs to preserve. 2149 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2150 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2151 if (CalleeCC != CallerCC) { 2152 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2153 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2154 return false; 2155 } 2156 2157 // If Caller's vararg or byval argument has been split between registers and 2158 // stack, do not perform tail call, since part of the argument is in caller's 2159 // local frame. 2160 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2161 if (AFI_Caller->getArgRegsSaveSize()) 2162 return false; 2163 2164 // If the callee takes no arguments then go on to check the results of the 2165 // call. 2166 if (!Outs.empty()) { 2167 // Check if stack adjustment is needed. For now, do not do this if any 2168 // argument is passed on the stack. 2169 SmallVector<CCValAssign, 16> ArgLocs; 2170 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call); 2171 CCInfo.AnalyzeCallOperands(Outs, 2172 CCAssignFnForNode(CalleeCC, false, isVarArg)); 2173 if (CCInfo.getNextStackOffset()) { 2174 // Check if the arguments are already laid out in the right way as 2175 // the caller's fixed stack objects. 2176 MachineFrameInfo *MFI = MF.getFrameInfo(); 2177 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2178 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2179 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2180 i != e; 2181 ++i, ++realArgIdx) { 2182 CCValAssign &VA = ArgLocs[i]; 2183 EVT RegVT = VA.getLocVT(); 2184 SDValue Arg = OutVals[realArgIdx]; 2185 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2186 if (VA.getLocInfo() == CCValAssign::Indirect) 2187 return false; 2188 if (VA.needsCustom()) { 2189 // f64 and vector types are split into multiple registers or 2190 // register/stack-slot combinations. The types will not match 2191 // the registers; give up on memory f64 refs until we figure 2192 // out what to do about this. 2193 if (!VA.isRegLoc()) 2194 return false; 2195 if (!ArgLocs[++i].isRegLoc()) 2196 return false; 2197 if (RegVT == MVT::v2f64) { 2198 if (!ArgLocs[++i].isRegLoc()) 2199 return false; 2200 if (!ArgLocs[++i].isRegLoc()) 2201 return false; 2202 } 2203 } else if (!VA.isRegLoc()) { 2204 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2205 MFI, MRI, TII)) 2206 return false; 2207 } 2208 } 2209 } 2210 2211 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2212 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2213 return false; 2214 } 2215 2216 return true; 2217 } 2218 2219 bool 2220 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2221 MachineFunction &MF, bool isVarArg, 2222 const SmallVectorImpl<ISD::OutputArg> &Outs, 2223 LLVMContext &Context) const { 2224 SmallVector<CCValAssign, 16> RVLocs; 2225 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2226 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2227 isVarArg)); 2228 } 2229 2230 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2231 SDLoc DL, SelectionDAG &DAG) { 2232 const MachineFunction &MF = DAG.getMachineFunction(); 2233 const Function *F = MF.getFunction(); 2234 2235 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); 2236 2237 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2238 // version of the "preferred return address". These offsets affect the return 2239 // instruction if this is a return from PL1 without hypervisor extensions. 2240 // IRQ/FIQ: +4 "subs pc, lr, #4" 2241 // SWI: 0 "subs pc, lr, #0" 2242 // ABORT: +4 "subs pc, lr, #4" 2243 // UNDEF: +4/+2 "subs pc, lr, #0" 2244 // UNDEF varies depending on where the exception came from ARM or Thumb 2245 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2246 2247 int64_t LROffset; 2248 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2249 IntKind == "ABORT") 2250 LROffset = 4; 2251 else if (IntKind == "SWI" || IntKind == "UNDEF") 2252 LROffset = 0; 2253 else 2254 report_fatal_error("Unsupported interrupt attribute. If present, value " 2255 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2256 2257 RetOps.insert(RetOps.begin() + 1, 2258 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2259 2260 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2261 } 2262 2263 SDValue 2264 ARMTargetLowering::LowerReturn(SDValue Chain, 2265 CallingConv::ID CallConv, bool isVarArg, 2266 const SmallVectorImpl<ISD::OutputArg> &Outs, 2267 const SmallVectorImpl<SDValue> &OutVals, 2268 SDLoc dl, SelectionDAG &DAG) const { 2269 2270 // CCValAssign - represent the assignment of the return value to a location. 2271 SmallVector<CCValAssign, 16> RVLocs; 2272 2273 // CCState - Info about the registers and stack slots. 2274 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2275 *DAG.getContext(), Call); 2276 2277 // Analyze outgoing return values. 2278 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2279 isVarArg)); 2280 2281 SDValue Flag; 2282 SmallVector<SDValue, 4> RetOps; 2283 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2284 bool isLittleEndian = Subtarget->isLittle(); 2285 2286 MachineFunction &MF = DAG.getMachineFunction(); 2287 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2288 AFI->setReturnRegsCount(RVLocs.size()); 2289 2290 // Copy the result values into the output registers. 2291 for (unsigned i = 0, realRVLocIdx = 0; 2292 i != RVLocs.size(); 2293 ++i, ++realRVLocIdx) { 2294 CCValAssign &VA = RVLocs[i]; 2295 assert(VA.isRegLoc() && "Can only return in registers!"); 2296 2297 SDValue Arg = OutVals[realRVLocIdx]; 2298 2299 switch (VA.getLocInfo()) { 2300 default: llvm_unreachable("Unknown loc info!"); 2301 case CCValAssign::Full: break; 2302 case CCValAssign::BCvt: 2303 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2304 break; 2305 } 2306 2307 if (VA.needsCustom()) { 2308 if (VA.getLocVT() == MVT::v2f64) { 2309 // Extract the first half and return it in two registers. 2310 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2311 DAG.getConstant(0, dl, MVT::i32)); 2312 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2313 DAG.getVTList(MVT::i32, MVT::i32), Half); 2314 2315 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2316 HalfGPRs.getValue(isLittleEndian ? 0 : 1), 2317 Flag); 2318 Flag = Chain.getValue(1); 2319 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2320 VA = RVLocs[++i]; // skip ahead to next loc 2321 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2322 HalfGPRs.getValue(isLittleEndian ? 1 : 0), 2323 Flag); 2324 Flag = Chain.getValue(1); 2325 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2326 VA = RVLocs[++i]; // skip ahead to next loc 2327 2328 // Extract the 2nd half and fall through to handle it as an f64 value. 2329 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2330 DAG.getConstant(1, dl, MVT::i32)); 2331 } 2332 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2333 // available. 2334 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2335 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2336 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2337 fmrrd.getValue(isLittleEndian ? 0 : 1), 2338 Flag); 2339 Flag = Chain.getValue(1); 2340 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2341 VA = RVLocs[++i]; // skip ahead to next loc 2342 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2343 fmrrd.getValue(isLittleEndian ? 1 : 0), 2344 Flag); 2345 } else 2346 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2347 2348 // Guarantee that all emitted copies are 2349 // stuck together, avoiding something bad. 2350 Flag = Chain.getValue(1); 2351 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2352 } 2353 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2354 const MCPhysReg *I = 2355 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2356 if (I) { 2357 for (; *I; ++I) { 2358 if (ARM::GPRRegClass.contains(*I)) 2359 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2360 else if (ARM::DPRRegClass.contains(*I)) 2361 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 2362 else 2363 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2364 } 2365 } 2366 2367 // Update chain and glue. 2368 RetOps[0] = Chain; 2369 if (Flag.getNode()) 2370 RetOps.push_back(Flag); 2371 2372 // CPUs which aren't M-class use a special sequence to return from 2373 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2374 // though we use "subs pc, lr, #N"). 2375 // 2376 // M-class CPUs actually use a normal return sequence with a special 2377 // (hardware-provided) value in LR, so the normal code path works. 2378 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && 2379 !Subtarget->isMClass()) { 2380 if (Subtarget->isThumb1Only()) 2381 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2382 return LowerInterruptReturn(RetOps, dl, DAG); 2383 } 2384 2385 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); 2386 } 2387 2388 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2389 if (N->getNumValues() != 1) 2390 return false; 2391 if (!N->hasNUsesOfValue(1, 0)) 2392 return false; 2393 2394 SDValue TCChain = Chain; 2395 SDNode *Copy = *N->use_begin(); 2396 if (Copy->getOpcode() == ISD::CopyToReg) { 2397 // If the copy has a glue operand, we conservatively assume it isn't safe to 2398 // perform a tail call. 2399 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2400 return false; 2401 TCChain = Copy->getOperand(0); 2402 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2403 SDNode *VMov = Copy; 2404 // f64 returned in a pair of GPRs. 2405 SmallPtrSet<SDNode*, 2> Copies; 2406 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2407 UI != UE; ++UI) { 2408 if (UI->getOpcode() != ISD::CopyToReg) 2409 return false; 2410 Copies.insert(*UI); 2411 } 2412 if (Copies.size() > 2) 2413 return false; 2414 2415 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2416 UI != UE; ++UI) { 2417 SDValue UseChain = UI->getOperand(0); 2418 if (Copies.count(UseChain.getNode())) 2419 // Second CopyToReg 2420 Copy = *UI; 2421 else { 2422 // We are at the top of this chain. 2423 // If the copy has a glue operand, we conservatively assume it 2424 // isn't safe to perform a tail call. 2425 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 2426 return false; 2427 // First CopyToReg 2428 TCChain = UseChain; 2429 } 2430 } 2431 } else if (Copy->getOpcode() == ISD::BITCAST) { 2432 // f32 returned in a single GPR. 2433 if (!Copy->hasOneUse()) 2434 return false; 2435 Copy = *Copy->use_begin(); 2436 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2437 return false; 2438 // If the copy has a glue operand, we conservatively assume it isn't safe to 2439 // perform a tail call. 2440 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2441 return false; 2442 TCChain = Copy->getOperand(0); 2443 } else { 2444 return false; 2445 } 2446 2447 bool HasRet = false; 2448 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2449 UI != UE; ++UI) { 2450 if (UI->getOpcode() != ARMISD::RET_FLAG && 2451 UI->getOpcode() != ARMISD::INTRET_FLAG) 2452 return false; 2453 HasRet = true; 2454 } 2455 2456 if (!HasRet) 2457 return false; 2458 2459 Chain = TCChain; 2460 return true; 2461 } 2462 2463 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2464 if (!Subtarget->supportsTailCall()) 2465 return false; 2466 2467 auto Attr = 2468 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); 2469 if (!CI->isTailCall() || Attr.getValueAsString() == "true") 2470 return false; 2471 2472 return true; 2473 } 2474 2475 // Trying to write a 64 bit value so need to split into two 32 bit values first, 2476 // and pass the lower and high parts through. 2477 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 2478 SDLoc DL(Op); 2479 SDValue WriteValue = Op->getOperand(2); 2480 2481 // This function is only supposed to be called for i64 type argument. 2482 assert(WriteValue.getValueType() == MVT::i64 2483 && "LowerWRITE_REGISTER called for non-i64 type argument."); 2484 2485 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2486 DAG.getConstant(0, DL, MVT::i32)); 2487 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2488 DAG.getConstant(1, DL, MVT::i32)); 2489 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 2490 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 2491 } 2492 2493 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2494 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2495 // one of the above mentioned nodes. It has to be wrapped because otherwise 2496 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2497 // be used to form addressing mode. These wrapped nodes will be selected 2498 // into MOVi. 2499 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2500 EVT PtrVT = Op.getValueType(); 2501 // FIXME there is no actual debug info here 2502 SDLoc dl(Op); 2503 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2504 SDValue Res; 2505 if (CP->isMachineConstantPoolEntry()) 2506 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2507 CP->getAlignment()); 2508 else 2509 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2510 CP->getAlignment()); 2511 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2512 } 2513 2514 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2515 return MachineJumpTableInfo::EK_Inline; 2516 } 2517 2518 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2519 SelectionDAG &DAG) const { 2520 MachineFunction &MF = DAG.getMachineFunction(); 2521 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2522 unsigned ARMPCLabelIndex = 0; 2523 SDLoc DL(Op); 2524 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2525 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2526 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2527 SDValue CPAddr; 2528 if (RelocM == Reloc::Static) { 2529 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2530 } else { 2531 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2532 ARMPCLabelIndex = AFI->createPICLabelUId(); 2533 ARMConstantPoolValue *CPV = 2534 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2535 ARMCP::CPBlockAddress, PCAdj); 2536 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2537 } 2538 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2539 SDValue Result = 2540 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2541 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 2542 false, false, false, 0); 2543 if (RelocM == Reloc::Static) 2544 return Result; 2545 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 2546 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2547 } 2548 2549 /// \brief Convert a TLS address reference into the correct sequence of loads 2550 /// and calls to compute the variable's address for Darwin, and return an 2551 /// SDValue containing the final node. 2552 2553 /// Darwin only has one TLS scheme which must be capable of dealing with the 2554 /// fully general situation, in the worst case. This means: 2555 /// + "extern __thread" declaration. 2556 /// + Defined in a possibly unknown dynamic library. 2557 /// 2558 /// The general system is that each __thread variable has a [3 x i32] descriptor 2559 /// which contains information used by the runtime to calculate the address. The 2560 /// only part of this the compiler needs to know about is the first word, which 2561 /// contains a function pointer that must be called with the address of the 2562 /// entire descriptor in "r0". 2563 /// 2564 /// Since this descriptor may be in a different unit, in general access must 2565 /// proceed along the usual ARM rules. A common sequence to produce is: 2566 /// 2567 /// movw rT1, :lower16:_var$non_lazy_ptr 2568 /// movt rT1, :upper16:_var$non_lazy_ptr 2569 /// ldr r0, [rT1] 2570 /// ldr rT2, [r0] 2571 /// blx rT2 2572 /// [...address now in r0...] 2573 SDValue 2574 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 2575 SelectionDAG &DAG) const { 2576 assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin"); 2577 SDLoc DL(Op); 2578 2579 // First step is to get the address of the actua global symbol. This is where 2580 // the TLS descriptor lives. 2581 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 2582 2583 // The first entry in the descriptor is a function pointer that we must call 2584 // to obtain the address of the variable. 2585 SDValue Chain = DAG.getEntryNode(); 2586 SDValue FuncTLVGet = 2587 DAG.getLoad(MVT::i32, DL, Chain, DescAddr, 2588 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2589 false, true, true, 4); 2590 Chain = FuncTLVGet.getValue(1); 2591 2592 MachineFunction &F = DAG.getMachineFunction(); 2593 MachineFrameInfo *MFI = F.getFrameInfo(); 2594 MFI->setAdjustsStack(true); 2595 2596 // TLS calls preserve all registers except those that absolutely must be 2597 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 2598 // silly). 2599 auto TRI = 2600 getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo(); 2601 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 2602 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 2603 2604 // Finally, we can make the call. This is just a degenerate version of a 2605 // normal AArch64 call node: r0 takes the address of the descriptor, and 2606 // returns the address of the variable in this thread. 2607 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 2608 Chain = 2609 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 2610 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 2611 DAG.getRegisterMask(Mask), Chain.getValue(1)); 2612 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 2613 } 2614 2615 SDValue 2616 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 2617 SelectionDAG &DAG) const { 2618 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 2619 SDValue Chain = DAG.getEntryNode(); 2620 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2621 SDLoc DL(Op); 2622 2623 // Load the current TEB (thread environment block) 2624 SDValue Ops[] = {Chain, 2625 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 2626 DAG.getConstant(15, DL, MVT::i32), 2627 DAG.getConstant(0, DL, MVT::i32), 2628 DAG.getConstant(13, DL, MVT::i32), 2629 DAG.getConstant(0, DL, MVT::i32), 2630 DAG.getConstant(2, DL, MVT::i32)}; 2631 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 2632 DAG.getVTList(MVT::i32, MVT::Other), Ops); 2633 2634 SDValue TEB = CurrentTEB.getValue(0); 2635 Chain = CurrentTEB.getValue(1); 2636 2637 // Load the ThreadLocalStoragePointer from the TEB 2638 // A pointer to the TLS array is located at offset 0x2c from the TEB. 2639 SDValue TLSArray = 2640 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 2641 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo(), 2642 false, false, false, 0); 2643 2644 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 2645 // offset into the TLSArray. 2646 2647 // Load the TLS index from the C runtime 2648 SDValue TLSIndex = 2649 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 2650 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 2651 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo(), 2652 false, false, false, 0); 2653 2654 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 2655 DAG.getConstant(2, DL, MVT::i32)); 2656 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 2657 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 2658 MachinePointerInfo(), false, false, false, 0); 2659 2660 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, 2661 LowerGlobalAddressWindows(Op, DAG)); 2662 } 2663 2664 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2665 SDValue 2666 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2667 SelectionDAG &DAG) const { 2668 SDLoc dl(GA); 2669 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2670 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2671 MachineFunction &MF = DAG.getMachineFunction(); 2672 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2673 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2674 ARMConstantPoolValue *CPV = 2675 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2676 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2677 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2678 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2679 Argument = 2680 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2681 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 2682 false, false, false, 0); 2683 SDValue Chain = Argument.getValue(1); 2684 2685 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2686 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2687 2688 // call __tls_get_addr. 2689 ArgListTy Args; 2690 ArgListEntry Entry; 2691 Entry.Node = Argument; 2692 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2693 Args.push_back(Entry); 2694 2695 // FIXME: is there useful debug info available here? 2696 TargetLowering::CallLoweringInfo CLI(DAG); 2697 CLI.setDebugLoc(dl).setChain(Chain) 2698 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 2699 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args), 2700 0); 2701 2702 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2703 return CallResult.first; 2704 } 2705 2706 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2707 // "local exec" model. 2708 SDValue 2709 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2710 SelectionDAG &DAG, 2711 TLSModel::Model model) const { 2712 const GlobalValue *GV = GA->getGlobal(); 2713 SDLoc dl(GA); 2714 SDValue Offset; 2715 SDValue Chain = DAG.getEntryNode(); 2716 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2717 // Get the Thread Pointer 2718 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2719 2720 if (model == TLSModel::InitialExec) { 2721 MachineFunction &MF = DAG.getMachineFunction(); 2722 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2723 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2724 // Initial exec model. 2725 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2726 ARMConstantPoolValue *CPV = 2727 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2728 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2729 true); 2730 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2731 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2732 Offset = DAG.getLoad( 2733 PtrVT, dl, Chain, Offset, 2734 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2735 false, false, 0); 2736 Chain = Offset.getValue(1); 2737 2738 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2739 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2740 2741 Offset = DAG.getLoad( 2742 PtrVT, dl, Chain, Offset, 2743 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2744 false, false, 0); 2745 } else { 2746 // local exec model 2747 assert(model == TLSModel::LocalExec); 2748 ARMConstantPoolValue *CPV = 2749 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2750 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2751 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2752 Offset = DAG.getLoad( 2753 PtrVT, dl, Chain, Offset, 2754 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2755 false, false, 0); 2756 } 2757 2758 // The address of the thread local variable is the add of the thread 2759 // pointer with the offset of the variable. 2760 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2761 } 2762 2763 SDValue 2764 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2765 if (Subtarget->isTargetDarwin()) 2766 return LowerGlobalTLSAddressDarwin(Op, DAG); 2767 2768 if (Subtarget->isTargetWindows()) 2769 return LowerGlobalTLSAddressWindows(Op, DAG); 2770 2771 // TODO: implement the "local dynamic" model 2772 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 2773 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2774 if (DAG.getTarget().Options.EmulatedTLS) 2775 return LowerToTLSEmulatedModel(GA, DAG); 2776 2777 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2778 2779 switch (model) { 2780 case TLSModel::GeneralDynamic: 2781 case TLSModel::LocalDynamic: 2782 return LowerToTLSGeneralDynamicModel(GA, DAG); 2783 case TLSModel::InitialExec: 2784 case TLSModel::LocalExec: 2785 return LowerToTLSExecModels(GA, DAG, model); 2786 } 2787 llvm_unreachable("bogus TLS model"); 2788 } 2789 2790 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2791 SelectionDAG &DAG) const { 2792 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2793 SDLoc dl(Op); 2794 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2795 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2796 bool UseGOT_PREL = 2797 !(GV->hasHiddenVisibility() || GV->hasLocalLinkage()); 2798 2799 MachineFunction &MF = DAG.getMachineFunction(); 2800 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2801 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2802 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2803 SDLoc dl(Op); 2804 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2805 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2806 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2807 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2808 /*AddCurrentAddress=*/UseGOT_PREL); 2809 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2810 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2811 SDValue Result = DAG.getLoad( 2812 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2813 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2814 false, false, 0); 2815 SDValue Chain = Result.getValue(1); 2816 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2817 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2818 if (UseGOT_PREL) 2819 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2820 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2821 false, false, false, 0); 2822 return Result; 2823 } 2824 2825 // If we have T2 ops, we can materialize the address directly via movt/movw 2826 // pair. This is always cheaper. 2827 if (Subtarget->useMovt(DAG.getMachineFunction())) { 2828 ++NumMovwMovt; 2829 // FIXME: Once remat is capable of dealing with instructions with register 2830 // operands, expand this into two nodes. 2831 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2832 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2833 } else { 2834 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2835 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2836 return DAG.getLoad( 2837 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2838 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2839 false, false, 0); 2840 } 2841 } 2842 2843 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2844 SelectionDAG &DAG) const { 2845 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2846 SDLoc dl(Op); 2847 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2848 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2849 2850 if (Subtarget->useMovt(DAG.getMachineFunction())) 2851 ++NumMovwMovt; 2852 2853 // FIXME: Once remat is capable of dealing with instructions with register 2854 // operands, expand this into multiple nodes 2855 unsigned Wrapper = 2856 RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper; 2857 2858 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 2859 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 2860 2861 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2862 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2863 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2864 false, false, false, 0); 2865 return Result; 2866 } 2867 2868 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 2869 SelectionDAG &DAG) const { 2870 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 2871 assert(Subtarget->useMovt(DAG.getMachineFunction()) && 2872 "Windows on ARM expects to use movw/movt"); 2873 2874 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2875 const ARMII::TOF TargetFlags = 2876 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG); 2877 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2878 SDValue Result; 2879 SDLoc DL(Op); 2880 2881 ++NumMovwMovt; 2882 2883 // FIXME: Once remat is capable of dealing with instructions with register 2884 // operands, expand this into two nodes. 2885 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 2886 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, 2887 TargetFlags)); 2888 if (GV->hasDLLImportStorageClass()) 2889 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2890 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2891 false, false, false, 0); 2892 return Result; 2893 } 2894 2895 SDValue 2896 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2897 SDLoc dl(Op); 2898 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 2899 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2900 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2901 Op.getOperand(1), Val); 2902 } 2903 2904 SDValue 2905 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2906 SDLoc dl(Op); 2907 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2908 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 2909 } 2910 2911 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 2912 SelectionDAG &DAG) const { 2913 SDLoc dl(Op); 2914 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 2915 Op.getOperand(0)); 2916 } 2917 2918 SDValue 2919 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2920 const ARMSubtarget *Subtarget) const { 2921 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2922 SDLoc dl(Op); 2923 switch (IntNo) { 2924 default: return SDValue(); // Don't custom lower most intrinsics. 2925 case Intrinsic::arm_rbit: { 2926 assert(Op.getOperand(1).getValueType() == MVT::i32 && 2927 "RBIT intrinsic must have i32 type!"); 2928 return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1)); 2929 } 2930 case Intrinsic::thread_pointer: { 2931 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2932 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2933 } 2934 case Intrinsic::eh_sjlj_lsda: { 2935 MachineFunction &MF = DAG.getMachineFunction(); 2936 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2937 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2938 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2939 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2940 SDValue CPAddr; 2941 unsigned PCAdj = (RelocM != Reloc::PIC_) 2942 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2943 ARMConstantPoolValue *CPV = 2944 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2945 ARMCP::CPLSDA, PCAdj); 2946 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2947 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2948 SDValue Result = DAG.getLoad( 2949 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2950 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2951 false, false, 0); 2952 2953 if (RelocM == Reloc::PIC_) { 2954 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2955 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2956 } 2957 return Result; 2958 } 2959 case Intrinsic::arm_neon_vmulls: 2960 case Intrinsic::arm_neon_vmullu: { 2961 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2962 ? ARMISD::VMULLs : ARMISD::VMULLu; 2963 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2964 Op.getOperand(1), Op.getOperand(2)); 2965 } 2966 case Intrinsic::arm_neon_vminnm: 2967 case Intrinsic::arm_neon_vmaxnm: { 2968 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 2969 ? ISD::FMINNUM : ISD::FMAXNUM; 2970 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2971 Op.getOperand(1), Op.getOperand(2)); 2972 } 2973 case Intrinsic::arm_neon_vminu: 2974 case Intrinsic::arm_neon_vmaxu: { 2975 if (Op.getValueType().isFloatingPoint()) 2976 return SDValue(); 2977 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 2978 ? ISD::UMIN : ISD::UMAX; 2979 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2980 Op.getOperand(1), Op.getOperand(2)); 2981 } 2982 case Intrinsic::arm_neon_vmins: 2983 case Intrinsic::arm_neon_vmaxs: { 2984 // v{min,max}s is overloaded between signed integers and floats. 2985 if (!Op.getValueType().isFloatingPoint()) { 2986 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 2987 ? ISD::SMIN : ISD::SMAX; 2988 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2989 Op.getOperand(1), Op.getOperand(2)); 2990 } 2991 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 2992 ? ISD::FMINNAN : ISD::FMAXNAN; 2993 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2994 Op.getOperand(1), Op.getOperand(2)); 2995 } 2996 } 2997 } 2998 2999 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 3000 const ARMSubtarget *Subtarget) { 3001 // FIXME: handle "fence singlethread" more efficiently. 3002 SDLoc dl(Op); 3003 if (!Subtarget->hasDataBarrier()) { 3004 // Some ARMv6 cpus can support data barriers with an mcr instruction. 3005 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 3006 // here. 3007 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 3008 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 3009 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 3010 DAG.getConstant(0, dl, MVT::i32)); 3011 } 3012 3013 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 3014 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 3015 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 3016 if (Subtarget->isMClass()) { 3017 // Only a full system barrier exists in the M-class architectures. 3018 Domain = ARM_MB::SY; 3019 } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) { 3020 // Swift happens to implement ISHST barriers in a way that's compatible with 3021 // Release semantics but weaker than ISH so we'd be fools not to use 3022 // it. Beware: other processors probably don't! 3023 Domain = ARM_MB::ISHST; 3024 } 3025 3026 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 3027 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 3028 DAG.getConstant(Domain, dl, MVT::i32)); 3029 } 3030 3031 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 3032 const ARMSubtarget *Subtarget) { 3033 // ARM pre v5TE and Thumb1 does not have preload instructions. 3034 if (!(Subtarget->isThumb2() || 3035 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 3036 // Just preserve the chain. 3037 return Op.getOperand(0); 3038 3039 SDLoc dl(Op); 3040 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 3041 if (!isRead && 3042 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 3043 // ARMv7 with MP extension has PLDW. 3044 return Op.getOperand(0); 3045 3046 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3047 if (Subtarget->isThumb()) { 3048 // Invert the bits. 3049 isRead = ~isRead & 1; 3050 isData = ~isData & 1; 3051 } 3052 3053 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 3054 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 3055 DAG.getConstant(isData, dl, MVT::i32)); 3056 } 3057 3058 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 3059 MachineFunction &MF = DAG.getMachineFunction(); 3060 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 3061 3062 // vastart just stores the address of the VarArgsFrameIndex slot into the 3063 // memory location argument. 3064 SDLoc dl(Op); 3065 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 3066 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3067 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3068 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3069 MachinePointerInfo(SV), false, false, 0); 3070 } 3071 3072 SDValue 3073 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 3074 SDValue &Root, SelectionDAG &DAG, 3075 SDLoc dl) const { 3076 MachineFunction &MF = DAG.getMachineFunction(); 3077 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3078 3079 const TargetRegisterClass *RC; 3080 if (AFI->isThumb1OnlyFunction()) 3081 RC = &ARM::tGPRRegClass; 3082 else 3083 RC = &ARM::GPRRegClass; 3084 3085 // Transform the arguments stored in physical registers into virtual ones. 3086 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3087 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3088 3089 SDValue ArgValue2; 3090 if (NextVA.isMemLoc()) { 3091 MachineFrameInfo *MFI = MF.getFrameInfo(); 3092 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 3093 3094 // Create load node to retrieve arguments from the stack. 3095 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3096 ArgValue2 = DAG.getLoad( 3097 MVT::i32, dl, Root, FIN, 3098 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 3099 false, false, 0); 3100 } else { 3101 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 3102 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3103 } 3104 if (!Subtarget->isLittle()) 3105 std::swap (ArgValue, ArgValue2); 3106 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 3107 } 3108 3109 // The remaining GPRs hold either the beginning of variable-argument 3110 // data, or the beginning of an aggregate passed by value (usually 3111 // byval). Either way, we allocate stack slots adjacent to the data 3112 // provided by our caller, and store the unallocated registers there. 3113 // If this is a variadic function, the va_list pointer will begin with 3114 // these values; otherwise, this reassembles a (byval) structure that 3115 // was split between registers and memory. 3116 // Return: The frame index registers were stored into. 3117 int 3118 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 3119 SDLoc dl, SDValue &Chain, 3120 const Value *OrigArg, 3121 unsigned InRegsParamRecordIdx, 3122 int ArgOffset, 3123 unsigned ArgSize) const { 3124 // Currently, two use-cases possible: 3125 // Case #1. Non-var-args function, and we meet first byval parameter. 3126 // Setup first unallocated register as first byval register; 3127 // eat all remained registers 3128 // (these two actions are performed by HandleByVal method). 3129 // Then, here, we initialize stack frame with 3130 // "store-reg" instructions. 3131 // Case #2. Var-args function, that doesn't contain byval parameters. 3132 // The same: eat all remained unallocated registers, 3133 // initialize stack frame. 3134 3135 MachineFunction &MF = DAG.getMachineFunction(); 3136 MachineFrameInfo *MFI = MF.getFrameInfo(); 3137 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3138 unsigned RBegin, REnd; 3139 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 3140 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 3141 } else { 3142 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3143 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 3144 REnd = ARM::R4; 3145 } 3146 3147 if (REnd != RBegin) 3148 ArgOffset = -4 * (ARM::R4 - RBegin); 3149 3150 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3151 int FrameIndex = MFI->CreateFixedObject(ArgSize, ArgOffset, false); 3152 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 3153 3154 SmallVector<SDValue, 4> MemOps; 3155 const TargetRegisterClass *RC = 3156 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 3157 3158 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 3159 unsigned VReg = MF.addLiveIn(Reg, RC); 3160 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3161 SDValue Store = 3162 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3163 MachinePointerInfo(OrigArg, 4 * i), false, false, 0); 3164 MemOps.push_back(Store); 3165 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 3166 } 3167 3168 if (!MemOps.empty()) 3169 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3170 return FrameIndex; 3171 } 3172 3173 // Setup stack frame, the va_list pointer will start from. 3174 void 3175 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 3176 SDLoc dl, SDValue &Chain, 3177 unsigned ArgOffset, 3178 unsigned TotalArgRegsSaveSize, 3179 bool ForceMutable) const { 3180 MachineFunction &MF = DAG.getMachineFunction(); 3181 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3182 3183 // Try to store any remaining integer argument regs 3184 // to their spots on the stack so that they may be loaded by deferencing 3185 // the result of va_next. 3186 // If there is no regs to be stored, just point address after last 3187 // argument passed via stack. 3188 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 3189 CCInfo.getInRegsParamsCount(), 3190 CCInfo.getNextStackOffset(), 4); 3191 AFI->setVarArgsFrameIndex(FrameIndex); 3192 } 3193 3194 SDValue 3195 ARMTargetLowering::LowerFormalArguments(SDValue Chain, 3196 CallingConv::ID CallConv, bool isVarArg, 3197 const SmallVectorImpl<ISD::InputArg> 3198 &Ins, 3199 SDLoc dl, SelectionDAG &DAG, 3200 SmallVectorImpl<SDValue> &InVals) 3201 const { 3202 MachineFunction &MF = DAG.getMachineFunction(); 3203 MachineFrameInfo *MFI = MF.getFrameInfo(); 3204 3205 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3206 3207 // Assign locations to all of the incoming arguments. 3208 SmallVector<CCValAssign, 16> ArgLocs; 3209 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3210 *DAG.getContext(), Prologue); 3211 CCInfo.AnalyzeFormalArguments(Ins, 3212 CCAssignFnForNode(CallConv, /* Return*/ false, 3213 isVarArg)); 3214 3215 SmallVector<SDValue, 16> ArgValues; 3216 SDValue ArgValue; 3217 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 3218 unsigned CurArgIdx = 0; 3219 3220 // Initially ArgRegsSaveSize is zero. 3221 // Then we increase this value each time we meet byval parameter. 3222 // We also increase this value in case of varargs function. 3223 AFI->setArgRegsSaveSize(0); 3224 3225 // Calculate the amount of stack space that we need to allocate to store 3226 // byval and variadic arguments that are passed in registers. 3227 // We need to know this before we allocate the first byval or variadic 3228 // argument, as they will be allocated a stack slot below the CFA (Canonical 3229 // Frame Address, the stack pointer at entry to the function). 3230 unsigned ArgRegBegin = ARM::R4; 3231 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3232 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 3233 break; 3234 3235 CCValAssign &VA = ArgLocs[i]; 3236 unsigned Index = VA.getValNo(); 3237 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 3238 if (!Flags.isByVal()) 3239 continue; 3240 3241 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 3242 unsigned RBegin, REnd; 3243 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 3244 ArgRegBegin = std::min(ArgRegBegin, RBegin); 3245 3246 CCInfo.nextInRegsParam(); 3247 } 3248 CCInfo.rewindByValRegsInfo(); 3249 3250 int lastInsIndex = -1; 3251 if (isVarArg && MFI->hasVAStart()) { 3252 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3253 if (RegIdx != array_lengthof(GPRArgRegs)) 3254 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 3255 } 3256 3257 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 3258 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 3259 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3260 3261 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3262 CCValAssign &VA = ArgLocs[i]; 3263 if (Ins[VA.getValNo()].isOrigArg()) { 3264 std::advance(CurOrigArg, 3265 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 3266 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 3267 } 3268 // Arguments stored in registers. 3269 if (VA.isRegLoc()) { 3270 EVT RegVT = VA.getLocVT(); 3271 3272 if (VA.needsCustom()) { 3273 // f64 and vector types are split up into multiple registers or 3274 // combinations of registers and stack slots. 3275 if (VA.getLocVT() == MVT::v2f64) { 3276 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 3277 Chain, DAG, dl); 3278 VA = ArgLocs[++i]; // skip ahead to next loc 3279 SDValue ArgValue2; 3280 if (VA.isMemLoc()) { 3281 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 3282 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3283 ArgValue2 = DAG.getLoad( 3284 MVT::f64, dl, Chain, FIN, 3285 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 3286 false, false, false, 0); 3287 } else { 3288 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3289 Chain, DAG, dl); 3290 } 3291 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3292 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3293 ArgValue, ArgValue1, 3294 DAG.getIntPtrConstant(0, dl)); 3295 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3296 ArgValue, ArgValue2, 3297 DAG.getIntPtrConstant(1, dl)); 3298 } else 3299 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3300 3301 } else { 3302 const TargetRegisterClass *RC; 3303 3304 if (RegVT == MVT::f32) 3305 RC = &ARM::SPRRegClass; 3306 else if (RegVT == MVT::f64) 3307 RC = &ARM::DPRRegClass; 3308 else if (RegVT == MVT::v2f64) 3309 RC = &ARM::QPRRegClass; 3310 else if (RegVT == MVT::i32) 3311 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 3312 : &ARM::GPRRegClass; 3313 else 3314 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3315 3316 // Transform the arguments in physical registers into virtual ones. 3317 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3318 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3319 } 3320 3321 // If this is an 8 or 16-bit value, it is really passed promoted 3322 // to 32 bits. Insert an assert[sz]ext to capture this, then 3323 // truncate to the right size. 3324 switch (VA.getLocInfo()) { 3325 default: llvm_unreachable("Unknown loc info!"); 3326 case CCValAssign::Full: break; 3327 case CCValAssign::BCvt: 3328 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3329 break; 3330 case CCValAssign::SExt: 3331 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3332 DAG.getValueType(VA.getValVT())); 3333 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3334 break; 3335 case CCValAssign::ZExt: 3336 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3337 DAG.getValueType(VA.getValVT())); 3338 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3339 break; 3340 } 3341 3342 InVals.push_back(ArgValue); 3343 3344 } else { // VA.isRegLoc() 3345 3346 // sanity check 3347 assert(VA.isMemLoc()); 3348 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3349 3350 int index = VA.getValNo(); 3351 3352 // Some Ins[] entries become multiple ArgLoc[] entries. 3353 // Process them only once. 3354 if (index != lastInsIndex) 3355 { 3356 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3357 // FIXME: For now, all byval parameter objects are marked mutable. 3358 // This can be changed with more analysis. 3359 // In case of tail call optimization mark all arguments mutable. 3360 // Since they could be overwritten by lowering of arguments in case of 3361 // a tail call. 3362 if (Flags.isByVal()) { 3363 assert(Ins[index].isOrigArg() && 3364 "Byval arguments cannot be implicit"); 3365 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 3366 3367 int FrameIndex = StoreByValRegs( 3368 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 3369 VA.getLocMemOffset(), Flags.getByValSize()); 3370 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 3371 CCInfo.nextInRegsParam(); 3372 } else { 3373 unsigned FIOffset = VA.getLocMemOffset(); 3374 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3375 FIOffset, true); 3376 3377 // Create load nodes to retrieve arguments from the stack. 3378 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3379 InVals.push_back(DAG.getLoad( 3380 VA.getValVT(), dl, Chain, FIN, 3381 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 3382 false, false, false, 0)); 3383 } 3384 lastInsIndex = index; 3385 } 3386 } 3387 } 3388 3389 // varargs 3390 if (isVarArg && MFI->hasVAStart()) 3391 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3392 CCInfo.getNextStackOffset(), 3393 TotalArgRegsSaveSize); 3394 3395 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 3396 3397 return Chain; 3398 } 3399 3400 /// isFloatingPointZero - Return true if this is +0.0. 3401 static bool isFloatingPointZero(SDValue Op) { 3402 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3403 return CFP->getValueAPF().isPosZero(); 3404 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3405 // Maybe this has already been legalized into the constant pool? 3406 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3407 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3408 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3409 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3410 return CFP->getValueAPF().isPosZero(); 3411 } 3412 } else if (Op->getOpcode() == ISD::BITCAST && 3413 Op->getValueType(0) == MVT::f64) { 3414 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 3415 // created by LowerConstantFP(). 3416 SDValue BitcastOp = Op->getOperand(0); 3417 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 3418 isNullConstant(BitcastOp->getOperand(0))) 3419 return true; 3420 } 3421 return false; 3422 } 3423 3424 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3425 /// the given operands. 3426 SDValue 3427 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3428 SDValue &ARMcc, SelectionDAG &DAG, 3429 SDLoc dl) const { 3430 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3431 unsigned C = RHSC->getZExtValue(); 3432 if (!isLegalICmpImmediate(C)) { 3433 // Constant does not fit, try adjusting it by one? 3434 switch (CC) { 3435 default: break; 3436 case ISD::SETLT: 3437 case ISD::SETGE: 3438 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3439 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3440 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3441 } 3442 break; 3443 case ISD::SETULT: 3444 case ISD::SETUGE: 3445 if (C != 0 && isLegalICmpImmediate(C-1)) { 3446 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3447 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3448 } 3449 break; 3450 case ISD::SETLE: 3451 case ISD::SETGT: 3452 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3453 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3454 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3455 } 3456 break; 3457 case ISD::SETULE: 3458 case ISD::SETUGT: 3459 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3460 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3461 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3462 } 3463 break; 3464 } 3465 } 3466 } 3467 3468 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3469 ARMISD::NodeType CompareType; 3470 switch (CondCode) { 3471 default: 3472 CompareType = ARMISD::CMP; 3473 break; 3474 case ARMCC::EQ: 3475 case ARMCC::NE: 3476 // Uses only Z Flag 3477 CompareType = ARMISD::CMPZ; 3478 break; 3479 } 3480 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3481 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3482 } 3483 3484 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3485 SDValue 3486 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 3487 SDLoc dl) const { 3488 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); 3489 SDValue Cmp; 3490 if (!isFloatingPointZero(RHS)) 3491 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3492 else 3493 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3494 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3495 } 3496 3497 /// duplicateCmp - Glue values can have only one use, so this function 3498 /// duplicates a comparison node. 3499 SDValue 3500 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3501 unsigned Opc = Cmp.getOpcode(); 3502 SDLoc DL(Cmp); 3503 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3504 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3505 3506 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3507 Cmp = Cmp.getOperand(0); 3508 Opc = Cmp.getOpcode(); 3509 if (Opc == ARMISD::CMPFP) 3510 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3511 else { 3512 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3513 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3514 } 3515 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3516 } 3517 3518 std::pair<SDValue, SDValue> 3519 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 3520 SDValue &ARMcc) const { 3521 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 3522 3523 SDValue Value, OverflowCmp; 3524 SDValue LHS = Op.getOperand(0); 3525 SDValue RHS = Op.getOperand(1); 3526 SDLoc dl(Op); 3527 3528 // FIXME: We are currently always generating CMPs because we don't support 3529 // generating CMN through the backend. This is not as good as the natural 3530 // CMP case because it causes a register dependency and cannot be folded 3531 // later. 3532 3533 switch (Op.getOpcode()) { 3534 default: 3535 llvm_unreachable("Unknown overflow instruction!"); 3536 case ISD::SADDO: 3537 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3538 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3539 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3540 break; 3541 case ISD::UADDO: 3542 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3543 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3544 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3545 break; 3546 case ISD::SSUBO: 3547 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3548 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3549 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3550 break; 3551 case ISD::USUBO: 3552 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3553 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3554 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3555 break; 3556 } // switch (...) 3557 3558 return std::make_pair(Value, OverflowCmp); 3559 } 3560 3561 3562 SDValue 3563 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 3564 // Let legalize expand this if it isn't a legal type yet. 3565 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 3566 return SDValue(); 3567 3568 SDValue Value, OverflowCmp; 3569 SDValue ARMcc; 3570 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 3571 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3572 SDLoc dl(Op); 3573 // We use 0 and 1 as false and true values. 3574 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 3575 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 3576 EVT VT = Op.getValueType(); 3577 3578 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 3579 ARMcc, CCR, OverflowCmp); 3580 3581 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 3582 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 3583 } 3584 3585 3586 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3587 SDValue Cond = Op.getOperand(0); 3588 SDValue SelectTrue = Op.getOperand(1); 3589 SDValue SelectFalse = Op.getOperand(2); 3590 SDLoc dl(Op); 3591 unsigned Opc = Cond.getOpcode(); 3592 3593 if (Cond.getResNo() == 1 && 3594 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 3595 Opc == ISD::USUBO)) { 3596 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 3597 return SDValue(); 3598 3599 SDValue Value, OverflowCmp; 3600 SDValue ARMcc; 3601 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 3602 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3603 EVT VT = Op.getValueType(); 3604 3605 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 3606 OverflowCmp, DAG); 3607 } 3608 3609 // Convert: 3610 // 3611 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3612 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3613 // 3614 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3615 const ConstantSDNode *CMOVTrue = 3616 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3617 const ConstantSDNode *CMOVFalse = 3618 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3619 3620 if (CMOVTrue && CMOVFalse) { 3621 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3622 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3623 3624 SDValue True; 3625 SDValue False; 3626 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3627 True = SelectTrue; 3628 False = SelectFalse; 3629 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3630 True = SelectFalse; 3631 False = SelectTrue; 3632 } 3633 3634 if (True.getNode() && False.getNode()) { 3635 EVT VT = Op.getValueType(); 3636 SDValue ARMcc = Cond.getOperand(2); 3637 SDValue CCR = Cond.getOperand(3); 3638 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3639 assert(True.getValueType() == VT); 3640 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 3641 } 3642 } 3643 } 3644 3645 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 3646 // undefined bits before doing a full-word comparison with zero. 3647 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 3648 DAG.getConstant(1, dl, Cond.getValueType())); 3649 3650 return DAG.getSelectCC(dl, Cond, 3651 DAG.getConstant(0, dl, Cond.getValueType()), 3652 SelectTrue, SelectFalse, ISD::SETNE); 3653 } 3654 3655 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 3656 bool &swpCmpOps, bool &swpVselOps) { 3657 // Start by selecting the GE condition code for opcodes that return true for 3658 // 'equality' 3659 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 3660 CC == ISD::SETULE) 3661 CondCode = ARMCC::GE; 3662 3663 // and GT for opcodes that return false for 'equality'. 3664 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 3665 CC == ISD::SETULT) 3666 CondCode = ARMCC::GT; 3667 3668 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 3669 // to swap the compare operands. 3670 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 3671 CC == ISD::SETULT) 3672 swpCmpOps = true; 3673 3674 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 3675 // If we have an unordered opcode, we need to swap the operands to the VSEL 3676 // instruction (effectively negating the condition). 3677 // 3678 // This also has the effect of swapping which one of 'less' or 'greater' 3679 // returns true, so we also swap the compare operands. It also switches 3680 // whether we return true for 'equality', so we compensate by picking the 3681 // opposite condition code to our original choice. 3682 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 3683 CC == ISD::SETUGT) { 3684 swpCmpOps = !swpCmpOps; 3685 swpVselOps = !swpVselOps; 3686 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 3687 } 3688 3689 // 'ordered' is 'anything but unordered', so use the VS condition code and 3690 // swap the VSEL operands. 3691 if (CC == ISD::SETO) { 3692 CondCode = ARMCC::VS; 3693 swpVselOps = true; 3694 } 3695 3696 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 3697 // code and swap the VSEL operands. 3698 if (CC == ISD::SETUNE) { 3699 CondCode = ARMCC::EQ; 3700 swpVselOps = true; 3701 } 3702 } 3703 3704 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, 3705 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 3706 SDValue Cmp, SelectionDAG &DAG) const { 3707 if (Subtarget->isFPOnlySP() && VT == MVT::f64) { 3708 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 3709 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 3710 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 3711 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 3712 3713 SDValue TrueLow = TrueVal.getValue(0); 3714 SDValue TrueHigh = TrueVal.getValue(1); 3715 SDValue FalseLow = FalseVal.getValue(0); 3716 SDValue FalseHigh = FalseVal.getValue(1); 3717 3718 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 3719 ARMcc, CCR, Cmp); 3720 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 3721 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 3722 3723 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 3724 } else { 3725 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 3726 Cmp); 3727 } 3728 } 3729 3730 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3731 EVT VT = Op.getValueType(); 3732 SDValue LHS = Op.getOperand(0); 3733 SDValue RHS = Op.getOperand(1); 3734 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3735 SDValue TrueVal = Op.getOperand(2); 3736 SDValue FalseVal = Op.getOperand(3); 3737 SDLoc dl(Op); 3738 3739 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 3740 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 3741 dl); 3742 3743 // If softenSetCCOperands only returned one value, we should compare it to 3744 // zero. 3745 if (!RHS.getNode()) { 3746 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 3747 CC = ISD::SETNE; 3748 } 3749 } 3750 3751 if (LHS.getValueType() == MVT::i32) { 3752 // Try to generate VSEL on ARMv8. 3753 // The VSEL instruction can't use all the usual ARM condition 3754 // codes: it only has two bits to select the condition code, so it's 3755 // constrained to use only GE, GT, VS and EQ. 3756 // 3757 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 3758 // swap the operands of the previous compare instruction (effectively 3759 // inverting the compare condition, swapping 'less' and 'greater') and 3760 // sometimes need to swap the operands to the VSEL (which inverts the 3761 // condition in the sense of firing whenever the previous condition didn't) 3762 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3763 TrueVal.getValueType() == MVT::f64)) { 3764 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3765 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 3766 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 3767 CC = ISD::getSetCCInverse(CC, true); 3768 std::swap(TrueVal, FalseVal); 3769 } 3770 } 3771 3772 SDValue ARMcc; 3773 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3774 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3775 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 3776 } 3777 3778 ARMCC::CondCodes CondCode, CondCode2; 3779 FPCCToARMCC(CC, CondCode, CondCode2); 3780 3781 // Try to generate VMAXNM/VMINNM on ARMv8. 3782 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3783 TrueVal.getValueType() == MVT::f64)) { 3784 bool swpCmpOps = false; 3785 bool swpVselOps = false; 3786 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 3787 3788 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 3789 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 3790 if (swpCmpOps) 3791 std::swap(LHS, RHS); 3792 if (swpVselOps) 3793 std::swap(TrueVal, FalseVal); 3794 } 3795 } 3796 3797 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3798 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3799 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3800 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 3801 if (CondCode2 != ARMCC::AL) { 3802 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 3803 // FIXME: Needs another CMP because flag can have but one use. 3804 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 3805 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 3806 } 3807 return Result; 3808 } 3809 3810 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 3811 /// to morph to an integer compare sequence. 3812 static bool canChangeToInt(SDValue Op, bool &SeenZero, 3813 const ARMSubtarget *Subtarget) { 3814 SDNode *N = Op.getNode(); 3815 if (!N->hasOneUse()) 3816 // Otherwise it requires moving the value from fp to integer registers. 3817 return false; 3818 if (!N->getNumValues()) 3819 return false; 3820 EVT VT = Op.getValueType(); 3821 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3822 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3823 // vmrs are very slow, e.g. cortex-a8. 3824 return false; 3825 3826 if (isFloatingPointZero(Op)) { 3827 SeenZero = true; 3828 return true; 3829 } 3830 return ISD::isNormalLoad(N); 3831 } 3832 3833 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3834 if (isFloatingPointZero(Op)) 3835 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 3836 3837 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3838 return DAG.getLoad(MVT::i32, SDLoc(Op), 3839 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3840 Ld->isVolatile(), Ld->isNonTemporal(), 3841 Ld->isInvariant(), Ld->getAlignment()); 3842 3843 llvm_unreachable("Unknown VFP cmp argument!"); 3844 } 3845 3846 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3847 SDValue &RetVal1, SDValue &RetVal2) { 3848 SDLoc dl(Op); 3849 3850 if (isFloatingPointZero(Op)) { 3851 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 3852 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 3853 return; 3854 } 3855 3856 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3857 SDValue Ptr = Ld->getBasePtr(); 3858 RetVal1 = DAG.getLoad(MVT::i32, dl, 3859 Ld->getChain(), Ptr, 3860 Ld->getPointerInfo(), 3861 Ld->isVolatile(), Ld->isNonTemporal(), 3862 Ld->isInvariant(), Ld->getAlignment()); 3863 3864 EVT PtrType = Ptr.getValueType(); 3865 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3866 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 3867 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 3868 RetVal2 = DAG.getLoad(MVT::i32, dl, 3869 Ld->getChain(), NewPtr, 3870 Ld->getPointerInfo().getWithOffset(4), 3871 Ld->isVolatile(), Ld->isNonTemporal(), 3872 Ld->isInvariant(), NewAlign); 3873 return; 3874 } 3875 3876 llvm_unreachable("Unknown VFP cmp argument!"); 3877 } 3878 3879 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3880 /// f32 and even f64 comparisons to integer ones. 3881 SDValue 3882 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3883 SDValue Chain = Op.getOperand(0); 3884 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3885 SDValue LHS = Op.getOperand(2); 3886 SDValue RHS = Op.getOperand(3); 3887 SDValue Dest = Op.getOperand(4); 3888 SDLoc dl(Op); 3889 3890 bool LHSSeenZero = false; 3891 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3892 bool RHSSeenZero = false; 3893 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3894 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3895 // If unsafe fp math optimization is enabled and there are no other uses of 3896 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3897 // to an integer comparison. 3898 if (CC == ISD::SETOEQ) 3899 CC = ISD::SETEQ; 3900 else if (CC == ISD::SETUNE) 3901 CC = ISD::SETNE; 3902 3903 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 3904 SDValue ARMcc; 3905 if (LHS.getValueType() == MVT::f32) { 3906 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3907 bitcastf32Toi32(LHS, DAG), Mask); 3908 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3909 bitcastf32Toi32(RHS, DAG), Mask); 3910 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3911 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3912 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3913 Chain, Dest, ARMcc, CCR, Cmp); 3914 } 3915 3916 SDValue LHS1, LHS2; 3917 SDValue RHS1, RHS2; 3918 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3919 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3920 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3921 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3922 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3923 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3924 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3925 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3926 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 3927 } 3928 3929 return SDValue(); 3930 } 3931 3932 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3933 SDValue Chain = Op.getOperand(0); 3934 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3935 SDValue LHS = Op.getOperand(2); 3936 SDValue RHS = Op.getOperand(3); 3937 SDValue Dest = Op.getOperand(4); 3938 SDLoc dl(Op); 3939 3940 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 3941 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 3942 dl); 3943 3944 // If softenSetCCOperands only returned one value, we should compare it to 3945 // zero. 3946 if (!RHS.getNode()) { 3947 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 3948 CC = ISD::SETNE; 3949 } 3950 } 3951 3952 if (LHS.getValueType() == MVT::i32) { 3953 SDValue ARMcc; 3954 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3955 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3956 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3957 Chain, Dest, ARMcc, CCR, Cmp); 3958 } 3959 3960 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3961 3962 if (getTargetMachine().Options.UnsafeFPMath && 3963 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3964 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3965 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 3966 return Result; 3967 } 3968 3969 ARMCC::CondCodes CondCode, CondCode2; 3970 FPCCToARMCC(CC, CondCode, CondCode2); 3971 3972 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3973 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3974 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3975 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3976 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3977 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 3978 if (CondCode2 != ARMCC::AL) { 3979 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 3980 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3981 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 3982 } 3983 return Res; 3984 } 3985 3986 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3987 SDValue Chain = Op.getOperand(0); 3988 SDValue Table = Op.getOperand(1); 3989 SDValue Index = Op.getOperand(2); 3990 SDLoc dl(Op); 3991 3992 EVT PTy = getPointerTy(DAG.getDataLayout()); 3993 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3994 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3995 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 3996 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 3997 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3998 if (Subtarget->isThumb2()) { 3999 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 4000 // which does another jump to the destination. This also makes it easier 4001 // to translate it to TBB / TBH later. 4002 // FIXME: This might not work if the function is extremely large. 4003 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 4004 Addr, Op.getOperand(2), JTI); 4005 } 4006 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 4007 Addr = 4008 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 4009 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), 4010 false, false, false, 0); 4011 Chain = Addr.getValue(1); 4012 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 4013 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4014 } else { 4015 Addr = 4016 DAG.getLoad(PTy, dl, Chain, Addr, 4017 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), 4018 false, false, false, 0); 4019 Chain = Addr.getValue(1); 4020 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4021 } 4022 } 4023 4024 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 4025 EVT VT = Op.getValueType(); 4026 SDLoc dl(Op); 4027 4028 if (Op.getValueType().getVectorElementType() == MVT::i32) { 4029 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 4030 return Op; 4031 return DAG.UnrollVectorOp(Op.getNode()); 4032 } 4033 4034 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 4035 "Invalid type for custom lowering!"); 4036 if (VT != MVT::v4i16) 4037 return DAG.UnrollVectorOp(Op.getNode()); 4038 4039 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 4040 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 4041 } 4042 4043 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 4044 EVT VT = Op.getValueType(); 4045 if (VT.isVector()) 4046 return LowerVectorFP_TO_INT(Op, DAG); 4047 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { 4048 RTLIB::Libcall LC; 4049 if (Op.getOpcode() == ISD::FP_TO_SINT) 4050 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), 4051 Op.getValueType()); 4052 else 4053 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), 4054 Op.getValueType()); 4055 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4056 /*isSigned*/ false, SDLoc(Op)).first; 4057 } 4058 4059 return Op; 4060 } 4061 4062 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 4063 EVT VT = Op.getValueType(); 4064 SDLoc dl(Op); 4065 4066 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 4067 if (VT.getVectorElementType() == MVT::f32) 4068 return Op; 4069 return DAG.UnrollVectorOp(Op.getNode()); 4070 } 4071 4072 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 4073 "Invalid type for custom lowering!"); 4074 if (VT != MVT::v4f32) 4075 return DAG.UnrollVectorOp(Op.getNode()); 4076 4077 unsigned CastOpc; 4078 unsigned Opc; 4079 switch (Op.getOpcode()) { 4080 default: llvm_unreachable("Invalid opcode!"); 4081 case ISD::SINT_TO_FP: 4082 CastOpc = ISD::SIGN_EXTEND; 4083 Opc = ISD::SINT_TO_FP; 4084 break; 4085 case ISD::UINT_TO_FP: 4086 CastOpc = ISD::ZERO_EXTEND; 4087 Opc = ISD::UINT_TO_FP; 4088 break; 4089 } 4090 4091 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 4092 return DAG.getNode(Opc, dl, VT, Op); 4093 } 4094 4095 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 4096 EVT VT = Op.getValueType(); 4097 if (VT.isVector()) 4098 return LowerVectorINT_TO_FP(Op, DAG); 4099 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { 4100 RTLIB::Libcall LC; 4101 if (Op.getOpcode() == ISD::SINT_TO_FP) 4102 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 4103 Op.getValueType()); 4104 else 4105 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 4106 Op.getValueType()); 4107 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4108 /*isSigned*/ false, SDLoc(Op)).first; 4109 } 4110 4111 return Op; 4112 } 4113 4114 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 4115 // Implement fcopysign with a fabs and a conditional fneg. 4116 SDValue Tmp0 = Op.getOperand(0); 4117 SDValue Tmp1 = Op.getOperand(1); 4118 SDLoc dl(Op); 4119 EVT VT = Op.getValueType(); 4120 EVT SrcVT = Tmp1.getValueType(); 4121 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 4122 Tmp0.getOpcode() == ARMISD::VMOVDRR; 4123 bool UseNEON = !InGPR && Subtarget->hasNEON(); 4124 4125 if (UseNEON) { 4126 // Use VBSL to copy the sign bit. 4127 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 4128 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 4129 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 4130 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 4131 if (VT == MVT::f64) 4132 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4133 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 4134 DAG.getConstant(32, dl, MVT::i32)); 4135 else /*if (VT == MVT::f32)*/ 4136 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 4137 if (SrcVT == MVT::f32) { 4138 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 4139 if (VT == MVT::f64) 4140 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4141 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 4142 DAG.getConstant(32, dl, MVT::i32)); 4143 } else if (VT == MVT::f32) 4144 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 4145 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 4146 DAG.getConstant(32, dl, MVT::i32)); 4147 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 4148 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 4149 4150 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 4151 dl, MVT::i32); 4152 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 4153 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 4154 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 4155 4156 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 4157 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 4158 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 4159 if (VT == MVT::f32) { 4160 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 4161 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 4162 DAG.getConstant(0, dl, MVT::i32)); 4163 } else { 4164 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 4165 } 4166 4167 return Res; 4168 } 4169 4170 // Bitcast operand 1 to i32. 4171 if (SrcVT == MVT::f64) 4172 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4173 Tmp1).getValue(1); 4174 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 4175 4176 // Or in the signbit with integer operations. 4177 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 4178 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 4179 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 4180 if (VT == MVT::f32) { 4181 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 4182 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 4183 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4184 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 4185 } 4186 4187 // f64: Or the high part with signbit and then combine two parts. 4188 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4189 Tmp0); 4190 SDValue Lo = Tmp0.getValue(0); 4191 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 4192 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 4193 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 4194 } 4195 4196 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 4197 MachineFunction &MF = DAG.getMachineFunction(); 4198 MachineFrameInfo *MFI = MF.getFrameInfo(); 4199 MFI->setReturnAddressIsTaken(true); 4200 4201 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 4202 return SDValue(); 4203 4204 EVT VT = Op.getValueType(); 4205 SDLoc dl(Op); 4206 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4207 if (Depth) { 4208 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 4209 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 4210 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 4211 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 4212 MachinePointerInfo(), false, false, false, 0); 4213 } 4214 4215 // Return LR, which contains the return address. Mark it an implicit live-in. 4216 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 4217 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 4218 } 4219 4220 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 4221 const ARMBaseRegisterInfo &ARI = 4222 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 4223 MachineFunction &MF = DAG.getMachineFunction(); 4224 MachineFrameInfo *MFI = MF.getFrameInfo(); 4225 MFI->setFrameAddressIsTaken(true); 4226 4227 EVT VT = Op.getValueType(); 4228 SDLoc dl(Op); // FIXME probably not meaningful 4229 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4230 unsigned FrameReg = ARI.getFrameRegister(MF); 4231 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 4232 while (Depth--) 4233 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 4234 MachinePointerInfo(), 4235 false, false, false, 0); 4236 return FrameAddr; 4237 } 4238 4239 // FIXME? Maybe this could be a TableGen attribute on some registers and 4240 // this table could be generated automatically from RegInfo. 4241 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, 4242 SelectionDAG &DAG) const { 4243 unsigned Reg = StringSwitch<unsigned>(RegName) 4244 .Case("sp", ARM::SP) 4245 .Default(0); 4246 if (Reg) 4247 return Reg; 4248 report_fatal_error(Twine("Invalid register name \"" 4249 + StringRef(RegName) + "\".")); 4250 } 4251 4252 // Result is 64 bit value so split into two 32 bit values and return as a 4253 // pair of values. 4254 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 4255 SelectionDAG &DAG) { 4256 SDLoc DL(N); 4257 4258 // This function is only supposed to be called for i64 type destination. 4259 assert(N->getValueType(0) == MVT::i64 4260 && "ExpandREAD_REGISTER called for non-i64 type result."); 4261 4262 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 4263 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 4264 N->getOperand(0), 4265 N->getOperand(1)); 4266 4267 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 4268 Read.getValue(1))); 4269 Results.push_back(Read.getOperand(0)); 4270 } 4271 4272 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 4273 /// When \p DstVT, the destination type of \p BC, is on the vector 4274 /// register bank and the source of bitcast, \p Op, operates on the same bank, 4275 /// it might be possible to combine them, such that everything stays on the 4276 /// vector register bank. 4277 /// \p return The node that would replace \p BT, if the combine 4278 /// is possible. 4279 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 4280 SelectionDAG &DAG) { 4281 SDValue Op = BC->getOperand(0); 4282 EVT DstVT = BC->getValueType(0); 4283 4284 // The only vector instruction that can produce a scalar (remember, 4285 // since the bitcast was about to be turned into VMOVDRR, the source 4286 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 4287 // Moreover, we can do this combine only if there is one use. 4288 // Finally, if the destination type is not a vector, there is not 4289 // much point on forcing everything on the vector bank. 4290 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4291 !Op.hasOneUse()) 4292 return SDValue(); 4293 4294 // If the index is not constant, we will introduce an additional 4295 // multiply that will stick. 4296 // Give up in that case. 4297 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4298 if (!Index) 4299 return SDValue(); 4300 unsigned DstNumElt = DstVT.getVectorNumElements(); 4301 4302 // Compute the new index. 4303 const APInt &APIntIndex = Index->getAPIntValue(); 4304 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 4305 NewIndex *= APIntIndex; 4306 // Check if the new constant index fits into i32. 4307 if (NewIndex.getBitWidth() > 32) 4308 return SDValue(); 4309 4310 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 4311 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 4312 SDLoc dl(Op); 4313 SDValue ExtractSrc = Op.getOperand(0); 4314 EVT VecVT = EVT::getVectorVT( 4315 *DAG.getContext(), DstVT.getScalarType(), 4316 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 4317 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 4318 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 4319 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 4320 } 4321 4322 /// ExpandBITCAST - If the target supports VFP, this function is called to 4323 /// expand a bit convert where either the source or destination type is i64 to 4324 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 4325 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 4326 /// vectors), since the legalizer won't know what to do with that. 4327 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 4328 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4329 SDLoc dl(N); 4330 SDValue Op = N->getOperand(0); 4331 4332 // This function is only supposed to be called for i64 types, either as the 4333 // source or destination of the bit convert. 4334 EVT SrcVT = Op.getValueType(); 4335 EVT DstVT = N->getValueType(0); 4336 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 4337 "ExpandBITCAST called for non-i64 type"); 4338 4339 // Turn i64->f64 into VMOVDRR. 4340 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 4341 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 4342 // if we can combine the bitcast with its source. 4343 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 4344 return Val; 4345 4346 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4347 DAG.getConstant(0, dl, MVT::i32)); 4348 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4349 DAG.getConstant(1, dl, MVT::i32)); 4350 return DAG.getNode(ISD::BITCAST, dl, DstVT, 4351 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 4352 } 4353 4354 // Turn f64->i64 into VMOVRRD. 4355 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 4356 SDValue Cvt; 4357 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 4358 SrcVT.getVectorNumElements() > 1) 4359 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4360 DAG.getVTList(MVT::i32, MVT::i32), 4361 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 4362 else 4363 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4364 DAG.getVTList(MVT::i32, MVT::i32), Op); 4365 // Merge the pieces into a single i64 value. 4366 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 4367 } 4368 4369 return SDValue(); 4370 } 4371 4372 /// getZeroVector - Returns a vector of specified type with all zero elements. 4373 /// Zero vectors are used to represent vector negation and in those cases 4374 /// will be implemented with the NEON VNEG instruction. However, VNEG does 4375 /// not support i64 elements, so sometimes the zero vectors will need to be 4376 /// explicitly constructed. Regardless, use a canonical VMOV to create the 4377 /// zero vector. 4378 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) { 4379 assert(VT.isVector() && "Expected a vector type"); 4380 // The canonical modified immediate encoding of a zero vector is....0! 4381 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 4382 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 4383 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 4384 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4385 } 4386 4387 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 4388 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4389 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 4390 SelectionDAG &DAG) const { 4391 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4392 EVT VT = Op.getValueType(); 4393 unsigned VTBits = VT.getSizeInBits(); 4394 SDLoc dl(Op); 4395 SDValue ShOpLo = Op.getOperand(0); 4396 SDValue ShOpHi = Op.getOperand(1); 4397 SDValue ShAmt = Op.getOperand(2); 4398 SDValue ARMcc; 4399 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 4400 4401 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 4402 4403 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4404 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4405 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 4406 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4407 DAG.getConstant(VTBits, dl, MVT::i32)); 4408 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 4409 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4410 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 4411 4412 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4413 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4414 ISD::SETGE, ARMcc, DAG, dl); 4415 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 4416 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 4417 CCR, Cmp); 4418 4419 SDValue Ops[2] = { Lo, Hi }; 4420 return DAG.getMergeValues(Ops, dl); 4421 } 4422 4423 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 4424 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4425 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 4426 SelectionDAG &DAG) const { 4427 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4428 EVT VT = Op.getValueType(); 4429 unsigned VTBits = VT.getSizeInBits(); 4430 SDLoc dl(Op); 4431 SDValue ShOpLo = Op.getOperand(0); 4432 SDValue ShOpHi = Op.getOperand(1); 4433 SDValue ShAmt = Op.getOperand(2); 4434 SDValue ARMcc; 4435 4436 assert(Op.getOpcode() == ISD::SHL_PARTS); 4437 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4438 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4439 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 4440 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4441 DAG.getConstant(VTBits, dl, MVT::i32)); 4442 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 4443 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 4444 4445 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4446 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4447 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4448 ISD::SETGE, ARMcc, DAG, dl); 4449 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 4450 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 4451 CCR, Cmp); 4452 4453 SDValue Ops[2] = { Lo, Hi }; 4454 return DAG.getMergeValues(Ops, dl); 4455 } 4456 4457 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4458 SelectionDAG &DAG) const { 4459 // The rounding mode is in bits 23:22 of the FPSCR. 4460 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 4461 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 4462 // so that the shift + and get folded into a bitfield extract. 4463 SDLoc dl(Op); 4464 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 4465 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, 4466 MVT::i32)); 4467 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 4468 DAG.getConstant(1U << 22, dl, MVT::i32)); 4469 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 4470 DAG.getConstant(22, dl, MVT::i32)); 4471 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 4472 DAG.getConstant(3, dl, MVT::i32)); 4473 } 4474 4475 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 4476 const ARMSubtarget *ST) { 4477 SDLoc dl(N); 4478 EVT VT = N->getValueType(0); 4479 if (VT.isVector()) { 4480 assert(ST->hasNEON()); 4481 4482 // Compute the least significant set bit: LSB = X & -X 4483 SDValue X = N->getOperand(0); 4484 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 4485 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 4486 4487 EVT ElemTy = VT.getVectorElementType(); 4488 4489 if (ElemTy == MVT::i8) { 4490 // Compute with: cttz(x) = ctpop(lsb - 1) 4491 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4492 DAG.getTargetConstant(1, dl, ElemTy)); 4493 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 4494 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 4495 } 4496 4497 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 4498 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 4499 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 4500 unsigned NumBits = ElemTy.getSizeInBits(); 4501 SDValue WidthMinus1 = 4502 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4503 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 4504 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 4505 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 4506 } 4507 4508 // Compute with: cttz(x) = ctpop(lsb - 1) 4509 4510 // Since we can only compute the number of bits in a byte with vcnt.8, we 4511 // have to gather the result with pairwise addition (vpaddl) for i16, i32, 4512 // and i64. 4513 4514 // Compute LSB - 1. 4515 SDValue Bits; 4516 if (ElemTy == MVT::i64) { 4517 // Load constant 0xffff'ffff'ffff'ffff to register. 4518 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4519 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 4520 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 4521 } else { 4522 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4523 DAG.getTargetConstant(1, dl, ElemTy)); 4524 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 4525 } 4526 4527 // Count #bits with vcnt.8. 4528 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4529 SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits); 4530 SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8); 4531 4532 // Gather the #bits with vpaddl (pairwise add.) 4533 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4534 SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit, 4535 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4536 Cnt8); 4537 if (ElemTy == MVT::i16) 4538 return Cnt16; 4539 4540 EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32; 4541 SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit, 4542 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4543 Cnt16); 4544 if (ElemTy == MVT::i32) 4545 return Cnt32; 4546 4547 assert(ElemTy == MVT::i64); 4548 SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4549 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4550 Cnt32); 4551 return Cnt64; 4552 } 4553 4554 if (!ST->hasV6T2Ops()) 4555 return SDValue(); 4556 4557 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 4558 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 4559 } 4560 4561 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 4562 /// for each 16-bit element from operand, repeated. The basic idea is to 4563 /// leverage vcnt to get the 8-bit counts, gather and add the results. 4564 /// 4565 /// Trace for v4i16: 4566 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4567 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 4568 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 4569 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 4570 /// [b0 b1 b2 b3 b4 b5 b6 b7] 4571 /// +[b1 b0 b3 b2 b5 b4 b7 b6] 4572 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 4573 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 4574 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 4575 EVT VT = N->getValueType(0); 4576 SDLoc DL(N); 4577 4578 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4579 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 4580 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 4581 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 4582 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 4583 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 4584 } 4585 4586 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 4587 /// bit-count for each 16-bit element from the operand. We need slightly 4588 /// different sequencing for v4i16 and v8i16 to stay within NEON's available 4589 /// 64/128-bit registers. 4590 /// 4591 /// Trace for v4i16: 4592 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4593 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 4594 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 4595 /// v4i16:Extracted = [k0 k1 k2 k3 ] 4596 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 4597 EVT VT = N->getValueType(0); 4598 SDLoc DL(N); 4599 4600 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 4601 if (VT.is64BitVector()) { 4602 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 4603 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 4604 DAG.getIntPtrConstant(0, DL)); 4605 } else { 4606 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 4607 BitCounts, DAG.getIntPtrConstant(0, DL)); 4608 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 4609 } 4610 } 4611 4612 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 4613 /// bit-count for each 32-bit element from the operand. The idea here is 4614 /// to split the vector into 16-bit elements, leverage the 16-bit count 4615 /// routine, and then combine the results. 4616 /// 4617 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 4618 /// input = [v0 v1 ] (vi: 32-bit elements) 4619 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 4620 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 4621 /// vrev: N0 = [k1 k0 k3 k2 ] 4622 /// [k0 k1 k2 k3 ] 4623 /// N1 =+[k1 k0 k3 k2 ] 4624 /// [k0 k2 k1 k3 ] 4625 /// N2 =+[k1 k3 k0 k2 ] 4626 /// [k0 k2 k1 k3 ] 4627 /// Extended =+[k1 k3 k0 k2 ] 4628 /// [k0 k2 ] 4629 /// Extracted=+[k1 k3 ] 4630 /// 4631 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 4632 EVT VT = N->getValueType(0); 4633 SDLoc DL(N); 4634 4635 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4636 4637 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 4638 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 4639 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 4640 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 4641 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 4642 4643 if (VT.is64BitVector()) { 4644 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 4645 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 4646 DAG.getIntPtrConstant(0, DL)); 4647 } else { 4648 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 4649 DAG.getIntPtrConstant(0, DL)); 4650 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 4651 } 4652 } 4653 4654 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 4655 const ARMSubtarget *ST) { 4656 EVT VT = N->getValueType(0); 4657 4658 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 4659 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 4660 VT == MVT::v4i16 || VT == MVT::v8i16) && 4661 "Unexpected type for custom ctpop lowering"); 4662 4663 if (VT.getVectorElementType() == MVT::i32) 4664 return lowerCTPOP32BitElements(N, DAG); 4665 else 4666 return lowerCTPOP16BitElements(N, DAG); 4667 } 4668 4669 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 4670 const ARMSubtarget *ST) { 4671 EVT VT = N->getValueType(0); 4672 SDLoc dl(N); 4673 4674 if (!VT.isVector()) 4675 return SDValue(); 4676 4677 // Lower vector shifts on NEON to use VSHL. 4678 assert(ST->hasNEON() && "unexpected vector shift"); 4679 4680 // Left shifts translate directly to the vshiftu intrinsic. 4681 if (N->getOpcode() == ISD::SHL) 4682 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4683 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, 4684 MVT::i32), 4685 N->getOperand(0), N->getOperand(1)); 4686 4687 assert((N->getOpcode() == ISD::SRA || 4688 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 4689 4690 // NEON uses the same intrinsics for both left and right shifts. For 4691 // right shifts, the shift amounts are negative, so negate the vector of 4692 // shift amounts. 4693 EVT ShiftVT = N->getOperand(1).getValueType(); 4694 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 4695 getZeroVector(ShiftVT, DAG, dl), 4696 N->getOperand(1)); 4697 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 4698 Intrinsic::arm_neon_vshifts : 4699 Intrinsic::arm_neon_vshiftu); 4700 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4701 DAG.getConstant(vshiftInt, dl, MVT::i32), 4702 N->getOperand(0), NegatedCount); 4703 } 4704 4705 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 4706 const ARMSubtarget *ST) { 4707 EVT VT = N->getValueType(0); 4708 SDLoc dl(N); 4709 4710 // We can get here for a node like i32 = ISD::SHL i32, i64 4711 if (VT != MVT::i64) 4712 return SDValue(); 4713 4714 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 4715 "Unknown shift to lower!"); 4716 4717 // We only lower SRA, SRL of 1 here, all others use generic lowering. 4718 if (!isOneConstant(N->getOperand(1))) 4719 return SDValue(); 4720 4721 // If we are in thumb mode, we don't have RRX. 4722 if (ST->isThumb1Only()) return SDValue(); 4723 4724 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 4725 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4726 DAG.getConstant(0, dl, MVT::i32)); 4727 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4728 DAG.getConstant(1, dl, MVT::i32)); 4729 4730 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 4731 // captures the result into a carry flag. 4732 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 4733 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 4734 4735 // The low part is an ARMISD::RRX operand, which shifts the carry in. 4736 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 4737 4738 // Merge the pieces into a single i64 value. 4739 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4740 } 4741 4742 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 4743 SDValue TmpOp0, TmpOp1; 4744 bool Invert = false; 4745 bool Swap = false; 4746 unsigned Opc = 0; 4747 4748 SDValue Op0 = Op.getOperand(0); 4749 SDValue Op1 = Op.getOperand(1); 4750 SDValue CC = Op.getOperand(2); 4751 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 4752 EVT VT = Op.getValueType(); 4753 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4754 SDLoc dl(Op); 4755 4756 if (CmpVT.getVectorElementType() == MVT::i64) 4757 // 64-bit comparisons are not legal. We've marked SETCC as non-Custom, 4758 // but it's possible that our operands are 64-bit but our result is 32-bit. 4759 // Bail in this case. 4760 return SDValue(); 4761 4762 if (Op1.getValueType().isFloatingPoint()) { 4763 switch (SetCCOpcode) { 4764 default: llvm_unreachable("Illegal FP comparison"); 4765 case ISD::SETUNE: 4766 case ISD::SETNE: Invert = true; // Fallthrough 4767 case ISD::SETOEQ: 4768 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4769 case ISD::SETOLT: 4770 case ISD::SETLT: Swap = true; // Fallthrough 4771 case ISD::SETOGT: 4772 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4773 case ISD::SETOLE: 4774 case ISD::SETLE: Swap = true; // Fallthrough 4775 case ISD::SETOGE: 4776 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4777 case ISD::SETUGE: Swap = true; // Fallthrough 4778 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 4779 case ISD::SETUGT: Swap = true; // Fallthrough 4780 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 4781 case ISD::SETUEQ: Invert = true; // Fallthrough 4782 case ISD::SETONE: 4783 // Expand this to (OLT | OGT). 4784 TmpOp0 = Op0; 4785 TmpOp1 = Op1; 4786 Opc = ISD::OR; 4787 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 4788 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); 4789 break; 4790 case ISD::SETUO: Invert = true; // Fallthrough 4791 case ISD::SETO: 4792 // Expand this to (OLT | OGE). 4793 TmpOp0 = Op0; 4794 TmpOp1 = Op1; 4795 Opc = ISD::OR; 4796 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 4797 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); 4798 break; 4799 } 4800 } else { 4801 // Integer comparisons. 4802 switch (SetCCOpcode) { 4803 default: llvm_unreachable("Illegal integer comparison"); 4804 case ISD::SETNE: Invert = true; 4805 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4806 case ISD::SETLT: Swap = true; 4807 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4808 case ISD::SETLE: Swap = true; 4809 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4810 case ISD::SETULT: Swap = true; 4811 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 4812 case ISD::SETULE: Swap = true; 4813 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 4814 } 4815 4816 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 4817 if (Opc == ARMISD::VCEQ) { 4818 4819 SDValue AndOp; 4820 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4821 AndOp = Op0; 4822 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 4823 AndOp = Op1; 4824 4825 // Ignore bitconvert. 4826 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 4827 AndOp = AndOp.getOperand(0); 4828 4829 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 4830 Opc = ARMISD::VTST; 4831 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 4832 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 4833 Invert = !Invert; 4834 } 4835 } 4836 } 4837 4838 if (Swap) 4839 std::swap(Op0, Op1); 4840 4841 // If one of the operands is a constant vector zero, attempt to fold the 4842 // comparison to a specialized compare-against-zero form. 4843 SDValue SingleOp; 4844 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4845 SingleOp = Op0; 4846 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 4847 if (Opc == ARMISD::VCGE) 4848 Opc = ARMISD::VCLEZ; 4849 else if (Opc == ARMISD::VCGT) 4850 Opc = ARMISD::VCLTZ; 4851 SingleOp = Op1; 4852 } 4853 4854 SDValue Result; 4855 if (SingleOp.getNode()) { 4856 switch (Opc) { 4857 case ARMISD::VCEQ: 4858 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; 4859 case ARMISD::VCGE: 4860 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; 4861 case ARMISD::VCLEZ: 4862 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; 4863 case ARMISD::VCGT: 4864 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; 4865 case ARMISD::VCLTZ: 4866 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; 4867 default: 4868 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 4869 } 4870 } else { 4871 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 4872 } 4873 4874 Result = DAG.getSExtOrTrunc(Result, dl, VT); 4875 4876 if (Invert) 4877 Result = DAG.getNOT(dl, Result, VT); 4878 4879 return Result; 4880 } 4881 4882 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) { 4883 SDValue LHS = Op.getOperand(0); 4884 SDValue RHS = Op.getOperand(1); 4885 SDValue Carry = Op.getOperand(2); 4886 SDValue Cond = Op.getOperand(3); 4887 SDLoc DL(Op); 4888 4889 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only."); 4890 4891 assert(Carry.getOpcode() != ISD::CARRY_FALSE); 4892 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 4893 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 4894 4895 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 4896 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 4897 SDValue ARMcc = DAG.getConstant( 4898 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 4899 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4900 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 4901 Cmp.getValue(1), SDValue()); 4902 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 4903 CCR, Chain.getValue(1)); 4904 } 4905 4906 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 4907 /// valid vector constant for a NEON instruction with a "modified immediate" 4908 /// operand (e.g., VMOV). If so, return the encoded value. 4909 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 4910 unsigned SplatBitSize, SelectionDAG &DAG, 4911 SDLoc dl, EVT &VT, bool is128Bits, 4912 NEONModImmType type) { 4913 unsigned OpCmode, Imm; 4914 4915 // SplatBitSize is set to the smallest size that splats the vector, so a 4916 // zero vector will always have SplatBitSize == 8. However, NEON modified 4917 // immediate instructions others than VMOV do not support the 8-bit encoding 4918 // of a zero vector, and the default encoding of zero is supposed to be the 4919 // 32-bit version. 4920 if (SplatBits == 0) 4921 SplatBitSize = 32; 4922 4923 switch (SplatBitSize) { 4924 case 8: 4925 if (type != VMOVModImm) 4926 return SDValue(); 4927 // Any 1-byte value is OK. Op=0, Cmode=1110. 4928 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 4929 OpCmode = 0xe; 4930 Imm = SplatBits; 4931 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 4932 break; 4933 4934 case 16: 4935 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 4936 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 4937 if ((SplatBits & ~0xff) == 0) { 4938 // Value = 0x00nn: Op=x, Cmode=100x. 4939 OpCmode = 0x8; 4940 Imm = SplatBits; 4941 break; 4942 } 4943 if ((SplatBits & ~0xff00) == 0) { 4944 // Value = 0xnn00: Op=x, Cmode=101x. 4945 OpCmode = 0xa; 4946 Imm = SplatBits >> 8; 4947 break; 4948 } 4949 return SDValue(); 4950 4951 case 32: 4952 // NEON's 32-bit VMOV supports splat values where: 4953 // * only one byte is nonzero, or 4954 // * the least significant byte is 0xff and the second byte is nonzero, or 4955 // * the least significant 2 bytes are 0xff and the third is nonzero. 4956 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 4957 if ((SplatBits & ~0xff) == 0) { 4958 // Value = 0x000000nn: Op=x, Cmode=000x. 4959 OpCmode = 0; 4960 Imm = SplatBits; 4961 break; 4962 } 4963 if ((SplatBits & ~0xff00) == 0) { 4964 // Value = 0x0000nn00: Op=x, Cmode=001x. 4965 OpCmode = 0x2; 4966 Imm = SplatBits >> 8; 4967 break; 4968 } 4969 if ((SplatBits & ~0xff0000) == 0) { 4970 // Value = 0x00nn0000: Op=x, Cmode=010x. 4971 OpCmode = 0x4; 4972 Imm = SplatBits >> 16; 4973 break; 4974 } 4975 if ((SplatBits & ~0xff000000) == 0) { 4976 // Value = 0xnn000000: Op=x, Cmode=011x. 4977 OpCmode = 0x6; 4978 Imm = SplatBits >> 24; 4979 break; 4980 } 4981 4982 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 4983 if (type == OtherModImm) return SDValue(); 4984 4985 if ((SplatBits & ~0xffff) == 0 && 4986 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 4987 // Value = 0x0000nnff: Op=x, Cmode=1100. 4988 OpCmode = 0xc; 4989 Imm = SplatBits >> 8; 4990 break; 4991 } 4992 4993 if ((SplatBits & ~0xffffff) == 0 && 4994 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 4995 // Value = 0x00nnffff: Op=x, Cmode=1101. 4996 OpCmode = 0xd; 4997 Imm = SplatBits >> 16; 4998 break; 4999 } 5000 5001 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 5002 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 5003 // VMOV.I32. A (very) minor optimization would be to replicate the value 5004 // and fall through here to test for a valid 64-bit splat. But, then the 5005 // caller would also need to check and handle the change in size. 5006 return SDValue(); 5007 5008 case 64: { 5009 if (type != VMOVModImm) 5010 return SDValue(); 5011 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 5012 uint64_t BitMask = 0xff; 5013 uint64_t Val = 0; 5014 unsigned ImmMask = 1; 5015 Imm = 0; 5016 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 5017 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 5018 Val |= BitMask; 5019 Imm |= ImmMask; 5020 } else if ((SplatBits & BitMask) != 0) { 5021 return SDValue(); 5022 } 5023 BitMask <<= 8; 5024 ImmMask <<= 1; 5025 } 5026 5027 if (DAG.getDataLayout().isBigEndian()) 5028 // swap higher and lower 32 bit word 5029 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); 5030 5031 // Op=1, Cmode=1110. 5032 OpCmode = 0x1e; 5033 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 5034 break; 5035 } 5036 5037 default: 5038 llvm_unreachable("unexpected size for isNEONModifiedImm"); 5039 } 5040 5041 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 5042 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 5043 } 5044 5045 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 5046 const ARMSubtarget *ST) const { 5047 if (!ST->hasVFP3()) 5048 return SDValue(); 5049 5050 bool IsDouble = Op.getValueType() == MVT::f64; 5051 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 5052 5053 // Use the default (constant pool) lowering for double constants when we have 5054 // an SP-only FPU 5055 if (IsDouble && Subtarget->isFPOnlySP()) 5056 return SDValue(); 5057 5058 // Try splatting with a VMOV.f32... 5059 APFloat FPVal = CFP->getValueAPF(); 5060 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 5061 5062 if (ImmVal != -1) { 5063 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 5064 // We have code in place to select a valid ConstantFP already, no need to 5065 // do any mangling. 5066 return Op; 5067 } 5068 5069 // It's a float and we are trying to use NEON operations where 5070 // possible. Lower it to a splat followed by an extract. 5071 SDLoc DL(Op); 5072 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 5073 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 5074 NewVal); 5075 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 5076 DAG.getConstant(0, DL, MVT::i32)); 5077 } 5078 5079 // The rest of our options are NEON only, make sure that's allowed before 5080 // proceeding.. 5081 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 5082 return SDValue(); 5083 5084 EVT VMovVT; 5085 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 5086 5087 // It wouldn't really be worth bothering for doubles except for one very 5088 // important value, which does happen to match: 0.0. So make sure we don't do 5089 // anything stupid. 5090 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 5091 return SDValue(); 5092 5093 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 5094 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 5095 VMovVT, false, VMOVModImm); 5096 if (NewVal != SDValue()) { 5097 SDLoc DL(Op); 5098 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 5099 NewVal); 5100 if (IsDouble) 5101 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5102 5103 // It's a float: cast and extract a vector element. 5104 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5105 VecConstant); 5106 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5107 DAG.getConstant(0, DL, MVT::i32)); 5108 } 5109 5110 // Finally, try a VMVN.i32 5111 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 5112 false, VMVNModImm); 5113 if (NewVal != SDValue()) { 5114 SDLoc DL(Op); 5115 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 5116 5117 if (IsDouble) 5118 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5119 5120 // It's a float: cast and extract a vector element. 5121 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5122 VecConstant); 5123 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5124 DAG.getConstant(0, DL, MVT::i32)); 5125 } 5126 5127 return SDValue(); 5128 } 5129 5130 // check if an VEXT instruction can handle the shuffle mask when the 5131 // vector sources of the shuffle are the same. 5132 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 5133 unsigned NumElts = VT.getVectorNumElements(); 5134 5135 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5136 if (M[0] < 0) 5137 return false; 5138 5139 Imm = M[0]; 5140 5141 // If this is a VEXT shuffle, the immediate value is the index of the first 5142 // element. The other shuffle indices must be the successive elements after 5143 // the first one. 5144 unsigned ExpectedElt = Imm; 5145 for (unsigned i = 1; i < NumElts; ++i) { 5146 // Increment the expected index. If it wraps around, just follow it 5147 // back to index zero and keep going. 5148 ++ExpectedElt; 5149 if (ExpectedElt == NumElts) 5150 ExpectedElt = 0; 5151 5152 if (M[i] < 0) continue; // ignore UNDEF indices 5153 if (ExpectedElt != static_cast<unsigned>(M[i])) 5154 return false; 5155 } 5156 5157 return true; 5158 } 5159 5160 5161 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 5162 bool &ReverseVEXT, unsigned &Imm) { 5163 unsigned NumElts = VT.getVectorNumElements(); 5164 ReverseVEXT = false; 5165 5166 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5167 if (M[0] < 0) 5168 return false; 5169 5170 Imm = M[0]; 5171 5172 // If this is a VEXT shuffle, the immediate value is the index of the first 5173 // element. The other shuffle indices must be the successive elements after 5174 // the first one. 5175 unsigned ExpectedElt = Imm; 5176 for (unsigned i = 1; i < NumElts; ++i) { 5177 // Increment the expected index. If it wraps around, it may still be 5178 // a VEXT but the source vectors must be swapped. 5179 ExpectedElt += 1; 5180 if (ExpectedElt == NumElts * 2) { 5181 ExpectedElt = 0; 5182 ReverseVEXT = true; 5183 } 5184 5185 if (M[i] < 0) continue; // ignore UNDEF indices 5186 if (ExpectedElt != static_cast<unsigned>(M[i])) 5187 return false; 5188 } 5189 5190 // Adjust the index value if the source operands will be swapped. 5191 if (ReverseVEXT) 5192 Imm -= NumElts; 5193 5194 return true; 5195 } 5196 5197 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 5198 /// instruction with the specified blocksize. (The order of the elements 5199 /// within each block of the vector is reversed.) 5200 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 5201 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 5202 "Only possible block sizes for VREV are: 16, 32, 64"); 5203 5204 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5205 if (EltSz == 64) 5206 return false; 5207 5208 unsigned NumElts = VT.getVectorNumElements(); 5209 unsigned BlockElts = M[0] + 1; 5210 // If the first shuffle index is UNDEF, be optimistic. 5211 if (M[0] < 0) 5212 BlockElts = BlockSize / EltSz; 5213 5214 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 5215 return false; 5216 5217 for (unsigned i = 0; i < NumElts; ++i) { 5218 if (M[i] < 0) continue; // ignore UNDEF indices 5219 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 5220 return false; 5221 } 5222 5223 return true; 5224 } 5225 5226 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 5227 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 5228 // range, then 0 is placed into the resulting vector. So pretty much any mask 5229 // of 8 elements can work here. 5230 return VT == MVT::v8i8 && M.size() == 8; 5231 } 5232 5233 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 5234 // checking that pairs of elements in the shuffle mask represent the same index 5235 // in each vector, incrementing the expected index by 2 at each step. 5236 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 5237 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 5238 // v2={e,f,g,h} 5239 // WhichResult gives the offset for each element in the mask based on which 5240 // of the two results it belongs to. 5241 // 5242 // The transpose can be represented either as: 5243 // result1 = shufflevector v1, v2, result1_shuffle_mask 5244 // result2 = shufflevector v1, v2, result2_shuffle_mask 5245 // where v1/v2 and the shuffle masks have the same number of elements 5246 // (here WhichResult (see below) indicates which result is being checked) 5247 // 5248 // or as: 5249 // results = shufflevector v1, v2, shuffle_mask 5250 // where both results are returned in one vector and the shuffle mask has twice 5251 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 5252 // want to check the low half and high half of the shuffle mask as if it were 5253 // the other case 5254 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5255 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5256 if (EltSz == 64) 5257 return false; 5258 5259 unsigned NumElts = VT.getVectorNumElements(); 5260 if (M.size() != NumElts && M.size() != NumElts*2) 5261 return false; 5262 5263 // If the mask is twice as long as the input vector then we need to check the 5264 // upper and lower parts of the mask with a matching value for WhichResult 5265 // FIXME: A mask with only even values will be rejected in case the first 5266 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 5267 // M[0] is used to determine WhichResult 5268 for (unsigned i = 0; i < M.size(); i += NumElts) { 5269 if (M.size() == NumElts * 2) 5270 WhichResult = i / NumElts; 5271 else 5272 WhichResult = M[i] == 0 ? 0 : 1; 5273 for (unsigned j = 0; j < NumElts; j += 2) { 5274 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5275 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 5276 return false; 5277 } 5278 } 5279 5280 if (M.size() == NumElts*2) 5281 WhichResult = 0; 5282 5283 return true; 5284 } 5285 5286 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 5287 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5288 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 5289 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5290 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5291 if (EltSz == 64) 5292 return false; 5293 5294 unsigned NumElts = VT.getVectorNumElements(); 5295 if (M.size() != NumElts && M.size() != NumElts*2) 5296 return false; 5297 5298 for (unsigned i = 0; i < M.size(); i += NumElts) { 5299 if (M.size() == NumElts * 2) 5300 WhichResult = i / NumElts; 5301 else 5302 WhichResult = M[i] == 0 ? 0 : 1; 5303 for (unsigned j = 0; j < NumElts; j += 2) { 5304 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5305 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 5306 return false; 5307 } 5308 } 5309 5310 if (M.size() == NumElts*2) 5311 WhichResult = 0; 5312 5313 return true; 5314 } 5315 5316 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 5317 // that the mask elements are either all even and in steps of size 2 or all odd 5318 // and in steps of size 2. 5319 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 5320 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 5321 // v2={e,f,g,h} 5322 // Requires similar checks to that of isVTRNMask with 5323 // respect the how results are returned. 5324 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5325 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5326 if (EltSz == 64) 5327 return false; 5328 5329 unsigned NumElts = VT.getVectorNumElements(); 5330 if (M.size() != NumElts && M.size() != NumElts*2) 5331 return false; 5332 5333 for (unsigned i = 0; i < M.size(); i += NumElts) { 5334 WhichResult = M[i] == 0 ? 0 : 1; 5335 for (unsigned j = 0; j < NumElts; ++j) { 5336 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 5337 return false; 5338 } 5339 } 5340 5341 if (M.size() == NumElts*2) 5342 WhichResult = 0; 5343 5344 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5345 if (VT.is64BitVector() && EltSz == 32) 5346 return false; 5347 5348 return true; 5349 } 5350 5351 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 5352 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5353 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 5354 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5355 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5356 if (EltSz == 64) 5357 return false; 5358 5359 unsigned NumElts = VT.getVectorNumElements(); 5360 if (M.size() != NumElts && M.size() != NumElts*2) 5361 return false; 5362 5363 unsigned Half = NumElts / 2; 5364 for (unsigned i = 0; i < M.size(); i += NumElts) { 5365 WhichResult = M[i] == 0 ? 0 : 1; 5366 for (unsigned j = 0; j < NumElts; j += Half) { 5367 unsigned Idx = WhichResult; 5368 for (unsigned k = 0; k < Half; ++k) { 5369 int MIdx = M[i + j + k]; 5370 if (MIdx >= 0 && (unsigned) MIdx != Idx) 5371 return false; 5372 Idx += 2; 5373 } 5374 } 5375 } 5376 5377 if (M.size() == NumElts*2) 5378 WhichResult = 0; 5379 5380 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5381 if (VT.is64BitVector() && EltSz == 32) 5382 return false; 5383 5384 return true; 5385 } 5386 5387 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 5388 // that pairs of elements of the shufflemask represent the same index in each 5389 // vector incrementing sequentially through the vectors. 5390 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 5391 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 5392 // v2={e,f,g,h} 5393 // Requires similar checks to that of isVTRNMask with respect the how results 5394 // are returned. 5395 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5396 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5397 if (EltSz == 64) 5398 return false; 5399 5400 unsigned NumElts = VT.getVectorNumElements(); 5401 if (M.size() != NumElts && M.size() != NumElts*2) 5402 return false; 5403 5404 for (unsigned i = 0; i < M.size(); i += NumElts) { 5405 WhichResult = M[i] == 0 ? 0 : 1; 5406 unsigned Idx = WhichResult * NumElts / 2; 5407 for (unsigned j = 0; j < NumElts; j += 2) { 5408 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5409 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 5410 return false; 5411 Idx += 1; 5412 } 5413 } 5414 5415 if (M.size() == NumElts*2) 5416 WhichResult = 0; 5417 5418 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5419 if (VT.is64BitVector() && EltSz == 32) 5420 return false; 5421 5422 return true; 5423 } 5424 5425 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 5426 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5427 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 5428 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5429 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5430 if (EltSz == 64) 5431 return false; 5432 5433 unsigned NumElts = VT.getVectorNumElements(); 5434 if (M.size() != NumElts && M.size() != NumElts*2) 5435 return false; 5436 5437 for (unsigned i = 0; i < M.size(); i += NumElts) { 5438 WhichResult = M[i] == 0 ? 0 : 1; 5439 unsigned Idx = WhichResult * NumElts / 2; 5440 for (unsigned j = 0; j < NumElts; j += 2) { 5441 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5442 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 5443 return false; 5444 Idx += 1; 5445 } 5446 } 5447 5448 if (M.size() == NumElts*2) 5449 WhichResult = 0; 5450 5451 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5452 if (VT.is64BitVector() && EltSz == 32) 5453 return false; 5454 5455 return true; 5456 } 5457 5458 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 5459 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 5460 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 5461 unsigned &WhichResult, 5462 bool &isV_UNDEF) { 5463 isV_UNDEF = false; 5464 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5465 return ARMISD::VTRN; 5466 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5467 return ARMISD::VUZP; 5468 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5469 return ARMISD::VZIP; 5470 5471 isV_UNDEF = true; 5472 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5473 return ARMISD::VTRN; 5474 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5475 return ARMISD::VUZP; 5476 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5477 return ARMISD::VZIP; 5478 5479 return 0; 5480 } 5481 5482 /// \return true if this is a reverse operation on an vector. 5483 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 5484 unsigned NumElts = VT.getVectorNumElements(); 5485 // Make sure the mask has the right size. 5486 if (NumElts != M.size()) 5487 return false; 5488 5489 // Look for <15, ..., 3, -1, 1, 0>. 5490 for (unsigned i = 0; i != NumElts; ++i) 5491 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 5492 return false; 5493 5494 return true; 5495 } 5496 5497 // If N is an integer constant that can be moved into a register in one 5498 // instruction, return an SDValue of such a constant (will become a MOV 5499 // instruction). Otherwise return null. 5500 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 5501 const ARMSubtarget *ST, SDLoc dl) { 5502 uint64_t Val; 5503 if (!isa<ConstantSDNode>(N)) 5504 return SDValue(); 5505 Val = cast<ConstantSDNode>(N)->getZExtValue(); 5506 5507 if (ST->isThumb1Only()) { 5508 if (Val <= 255 || ~Val <= 255) 5509 return DAG.getConstant(Val, dl, MVT::i32); 5510 } else { 5511 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 5512 return DAG.getConstant(Val, dl, MVT::i32); 5513 } 5514 return SDValue(); 5515 } 5516 5517 // If this is a case we can't handle, return null and let the default 5518 // expansion code take care of it. 5519 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 5520 const ARMSubtarget *ST) const { 5521 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 5522 SDLoc dl(Op); 5523 EVT VT = Op.getValueType(); 5524 5525 APInt SplatBits, SplatUndef; 5526 unsigned SplatBitSize; 5527 bool HasAnyUndefs; 5528 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5529 if (SplatBitSize <= 64) { 5530 // Check if an immediate VMOV works. 5531 EVT VmovVT; 5532 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 5533 SplatUndef.getZExtValue(), SplatBitSize, 5534 DAG, dl, VmovVT, VT.is128BitVector(), 5535 VMOVModImm); 5536 if (Val.getNode()) { 5537 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 5538 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 5539 } 5540 5541 // Try an immediate VMVN. 5542 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 5543 Val = isNEONModifiedImm(NegatedImm, 5544 SplatUndef.getZExtValue(), SplatBitSize, 5545 DAG, dl, VmovVT, VT.is128BitVector(), 5546 VMVNModImm); 5547 if (Val.getNode()) { 5548 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 5549 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 5550 } 5551 5552 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 5553 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 5554 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 5555 if (ImmVal != -1) { 5556 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 5557 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 5558 } 5559 } 5560 } 5561 } 5562 5563 // Scan through the operands to see if only one value is used. 5564 // 5565 // As an optimisation, even if more than one value is used it may be more 5566 // profitable to splat with one value then change some lanes. 5567 // 5568 // Heuristically we decide to do this if the vector has a "dominant" value, 5569 // defined as splatted to more than half of the lanes. 5570 unsigned NumElts = VT.getVectorNumElements(); 5571 bool isOnlyLowElement = true; 5572 bool usesOnlyOneValue = true; 5573 bool hasDominantValue = false; 5574 bool isConstant = true; 5575 5576 // Map of the number of times a particular SDValue appears in the 5577 // element list. 5578 DenseMap<SDValue, unsigned> ValueCounts; 5579 SDValue Value; 5580 for (unsigned i = 0; i < NumElts; ++i) { 5581 SDValue V = Op.getOperand(i); 5582 if (V.isUndef()) 5583 continue; 5584 if (i > 0) 5585 isOnlyLowElement = false; 5586 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 5587 isConstant = false; 5588 5589 ValueCounts.insert(std::make_pair(V, 0)); 5590 unsigned &Count = ValueCounts[V]; 5591 5592 // Is this value dominant? (takes up more than half of the lanes) 5593 if (++Count > (NumElts / 2)) { 5594 hasDominantValue = true; 5595 Value = V; 5596 } 5597 } 5598 if (ValueCounts.size() != 1) 5599 usesOnlyOneValue = false; 5600 if (!Value.getNode() && ValueCounts.size() > 0) 5601 Value = ValueCounts.begin()->first; 5602 5603 if (ValueCounts.size() == 0) 5604 return DAG.getUNDEF(VT); 5605 5606 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 5607 // Keep going if we are hitting this case. 5608 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 5609 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 5610 5611 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5612 5613 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 5614 // i32 and try again. 5615 if (hasDominantValue && EltSize <= 32) { 5616 if (!isConstant) { 5617 SDValue N; 5618 5619 // If we are VDUPing a value that comes directly from a vector, that will 5620 // cause an unnecessary move to and from a GPR, where instead we could 5621 // just use VDUPLANE. We can only do this if the lane being extracted 5622 // is at a constant index, as the VDUP from lane instructions only have 5623 // constant-index forms. 5624 ConstantSDNode *constIndex; 5625 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5626 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 5627 // We need to create a new undef vector to use for the VDUPLANE if the 5628 // size of the vector from which we get the value is different than the 5629 // size of the vector that we need to create. We will insert the element 5630 // such that the register coalescer will remove unnecessary copies. 5631 if (VT != Value->getOperand(0).getValueType()) { 5632 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 5633 VT.getVectorNumElements(); 5634 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5635 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 5636 Value, DAG.getConstant(index, dl, MVT::i32)), 5637 DAG.getConstant(index, dl, MVT::i32)); 5638 } else 5639 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5640 Value->getOperand(0), Value->getOperand(1)); 5641 } else 5642 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 5643 5644 if (!usesOnlyOneValue) { 5645 // The dominant value was splatted as 'N', but we now have to insert 5646 // all differing elements. 5647 for (unsigned I = 0; I < NumElts; ++I) { 5648 if (Op.getOperand(I) == Value) 5649 continue; 5650 SmallVector<SDValue, 3> Ops; 5651 Ops.push_back(N); 5652 Ops.push_back(Op.getOperand(I)); 5653 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 5654 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 5655 } 5656 } 5657 return N; 5658 } 5659 if (VT.getVectorElementType().isFloatingPoint()) { 5660 SmallVector<SDValue, 8> Ops; 5661 for (unsigned i = 0; i < NumElts; ++i) 5662 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 5663 Op.getOperand(i))); 5664 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 5665 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 5666 Val = LowerBUILD_VECTOR(Val, DAG, ST); 5667 if (Val.getNode()) 5668 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5669 } 5670 if (usesOnlyOneValue) { 5671 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 5672 if (isConstant && Val.getNode()) 5673 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 5674 } 5675 } 5676 5677 // If all elements are constants and the case above didn't get hit, fall back 5678 // to the default expansion, which will generate a load from the constant 5679 // pool. 5680 if (isConstant) 5681 return SDValue(); 5682 5683 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 5684 if (NumElts >= 4) { 5685 SDValue shuffle = ReconstructShuffle(Op, DAG); 5686 if (shuffle != SDValue()) 5687 return shuffle; 5688 } 5689 5690 // Vectors with 32- or 64-bit elements can be built by directly assigning 5691 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 5692 // will be legalized. 5693 if (EltSize >= 32) { 5694 // Do the expansion with floating-point types, since that is what the VFP 5695 // registers are defined to use, and since i64 is not legal. 5696 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5697 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5698 SmallVector<SDValue, 8> Ops; 5699 for (unsigned i = 0; i < NumElts; ++i) 5700 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 5701 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 5702 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5703 } 5704 5705 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 5706 // know the default expansion would otherwise fall back on something even 5707 // worse. For a vector with one or two non-undef values, that's 5708 // scalar_to_vector for the elements followed by a shuffle (provided the 5709 // shuffle is valid for the target) and materialization element by element 5710 // on the stack followed by a load for everything else. 5711 if (!isConstant && !usesOnlyOneValue) { 5712 SDValue Vec = DAG.getUNDEF(VT); 5713 for (unsigned i = 0 ; i < NumElts; ++i) { 5714 SDValue V = Op.getOperand(i); 5715 if (V.isUndef()) 5716 continue; 5717 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 5718 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 5719 } 5720 return Vec; 5721 } 5722 5723 return SDValue(); 5724 } 5725 5726 // Gather data to see if the operation can be modelled as a 5727 // shuffle in combination with VEXTs. 5728 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 5729 SelectionDAG &DAG) const { 5730 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 5731 SDLoc dl(Op); 5732 EVT VT = Op.getValueType(); 5733 unsigned NumElts = VT.getVectorNumElements(); 5734 5735 struct ShuffleSourceInfo { 5736 SDValue Vec; 5737 unsigned MinElt; 5738 unsigned MaxElt; 5739 5740 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 5741 // be compatible with the shuffle we intend to construct. As a result 5742 // ShuffleVec will be some sliding window into the original Vec. 5743 SDValue ShuffleVec; 5744 5745 // Code should guarantee that element i in Vec starts at element "WindowBase 5746 // + i * WindowScale in ShuffleVec". 5747 int WindowBase; 5748 int WindowScale; 5749 5750 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 5751 ShuffleSourceInfo(SDValue Vec) 5752 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0), 5753 WindowScale(1) {} 5754 }; 5755 5756 // First gather all vectors used as an immediate source for this BUILD_VECTOR 5757 // node. 5758 SmallVector<ShuffleSourceInfo, 2> Sources; 5759 for (unsigned i = 0; i < NumElts; ++i) { 5760 SDValue V = Op.getOperand(i); 5761 if (V.isUndef()) 5762 continue; 5763 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 5764 // A shuffle can only come from building a vector from various 5765 // elements of other vectors. 5766 return SDValue(); 5767 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 5768 // Furthermore, shuffles require a constant mask, whereas extractelts 5769 // accept variable indices. 5770 return SDValue(); 5771 } 5772 5773 // Add this element source to the list if it's not already there. 5774 SDValue SourceVec = V.getOperand(0); 5775 auto Source = std::find(Sources.begin(), Sources.end(), SourceVec); 5776 if (Source == Sources.end()) 5777 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 5778 5779 // Update the minimum and maximum lane number seen. 5780 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 5781 Source->MinElt = std::min(Source->MinElt, EltNo); 5782 Source->MaxElt = std::max(Source->MaxElt, EltNo); 5783 } 5784 5785 // Currently only do something sane when at most two source vectors 5786 // are involved. 5787 if (Sources.size() > 2) 5788 return SDValue(); 5789 5790 // Find out the smallest element size among result and two sources, and use 5791 // it as element size to build the shuffle_vector. 5792 EVT SmallestEltTy = VT.getVectorElementType(); 5793 for (auto &Source : Sources) { 5794 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 5795 if (SrcEltTy.bitsLT(SmallestEltTy)) 5796 SmallestEltTy = SrcEltTy; 5797 } 5798 unsigned ResMultiplier = 5799 VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits(); 5800 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5801 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 5802 5803 // If the source vector is too wide or too narrow, we may nevertheless be able 5804 // to construct a compatible shuffle either by concatenating it with UNDEF or 5805 // extracting a suitable range of elements. 5806 for (auto &Src : Sources) { 5807 EVT SrcVT = Src.ShuffleVec.getValueType(); 5808 5809 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 5810 continue; 5811 5812 // This stage of the search produces a source with the same element type as 5813 // the original, but with a total width matching the BUILD_VECTOR output. 5814 EVT EltVT = SrcVT.getVectorElementType(); 5815 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 5816 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 5817 5818 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 5819 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) 5820 return SDValue(); 5821 // We can pad out the smaller vector for free, so if it's part of a 5822 // shuffle... 5823 Src.ShuffleVec = 5824 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 5825 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 5826 continue; 5827 } 5828 5829 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) 5830 return SDValue(); 5831 5832 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 5833 // Span too large for a VEXT to cope 5834 return SDValue(); 5835 } 5836 5837 if (Src.MinElt >= NumSrcElts) { 5838 // The extraction can just take the second half 5839 Src.ShuffleVec = 5840 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5841 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 5842 Src.WindowBase = -NumSrcElts; 5843 } else if (Src.MaxElt < NumSrcElts) { 5844 // The extraction can just take the first half 5845 Src.ShuffleVec = 5846 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5847 DAG.getConstant(0, dl, MVT::i32)); 5848 } else { 5849 // An actual VEXT is needed 5850 SDValue VEXTSrc1 = 5851 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5852 DAG.getConstant(0, dl, MVT::i32)); 5853 SDValue VEXTSrc2 = 5854 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5855 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 5856 5857 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 5858 VEXTSrc2, 5859 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 5860 Src.WindowBase = -Src.MinElt; 5861 } 5862 } 5863 5864 // Another possible incompatibility occurs from the vector element types. We 5865 // can fix this by bitcasting the source vectors to the same type we intend 5866 // for the shuffle. 5867 for (auto &Src : Sources) { 5868 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 5869 if (SrcEltTy == SmallestEltTy) 5870 continue; 5871 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 5872 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); 5873 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5874 Src.WindowBase *= Src.WindowScale; 5875 } 5876 5877 // Final sanity check before we try to actually produce a shuffle. 5878 DEBUG( 5879 for (auto Src : Sources) 5880 assert(Src.ShuffleVec.getValueType() == ShuffleVT); 5881 ); 5882 5883 // The stars all align, our next step is to produce the mask for the shuffle. 5884 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 5885 int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); 5886 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 5887 SDValue Entry = Op.getOperand(i); 5888 if (Entry.isUndef()) 5889 continue; 5890 5891 auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0)); 5892 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 5893 5894 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 5895 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 5896 // segment. 5897 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 5898 int BitsDefined = std::min(OrigEltTy.getSizeInBits(), 5899 VT.getVectorElementType().getSizeInBits()); 5900 int LanesDefined = BitsDefined / BitsPerShuffleLane; 5901 5902 // This source is expected to fill ResMultiplier lanes of the final shuffle, 5903 // starting at the appropriate offset. 5904 int *LaneMask = &Mask[i * ResMultiplier]; 5905 5906 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 5907 ExtractBase += NumElts * (Src - Sources.begin()); 5908 for (int j = 0; j < LanesDefined; ++j) 5909 LaneMask[j] = ExtractBase + j; 5910 } 5911 5912 // Final check before we try to produce nonsense... 5913 if (!isShuffleMaskLegal(Mask, ShuffleVT)) 5914 return SDValue(); 5915 5916 // We can't handle more than two sources. This should have already 5917 // been checked before this point. 5918 assert(Sources.size() <= 2 && "Too many sources!"); 5919 5920 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 5921 for (unsigned i = 0; i < Sources.size(); ++i) 5922 ShuffleOps[i] = Sources[i].ShuffleVec; 5923 5924 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 5925 ShuffleOps[1], &Mask[0]); 5926 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 5927 } 5928 5929 /// isShuffleMaskLegal - Targets can use this to indicate that they only 5930 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5931 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5932 /// are assumed to be legal. 5933 bool 5934 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 5935 EVT VT) const { 5936 if (VT.getVectorNumElements() == 4 && 5937 (VT.is128BitVector() || VT.is64BitVector())) { 5938 unsigned PFIndexes[4]; 5939 for (unsigned i = 0; i != 4; ++i) { 5940 if (M[i] < 0) 5941 PFIndexes[i] = 8; 5942 else 5943 PFIndexes[i] = M[i]; 5944 } 5945 5946 // Compute the index in the perfect shuffle table. 5947 unsigned PFTableIndex = 5948 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5949 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5950 unsigned Cost = (PFEntry >> 30); 5951 5952 if (Cost <= 4) 5953 return true; 5954 } 5955 5956 bool ReverseVEXT, isV_UNDEF; 5957 unsigned Imm, WhichResult; 5958 5959 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5960 return (EltSize >= 32 || 5961 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 5962 isVREVMask(M, VT, 64) || 5963 isVREVMask(M, VT, 32) || 5964 isVREVMask(M, VT, 16) || 5965 isVEXTMask(M, VT, ReverseVEXT, Imm) || 5966 isVTBLMask(M, VT) || 5967 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || 5968 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 5969 } 5970 5971 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5972 /// the specified operations to build the shuffle. 5973 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5974 SDValue RHS, SelectionDAG &DAG, 5975 SDLoc dl) { 5976 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5977 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5978 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5979 5980 enum { 5981 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5982 OP_VREV, 5983 OP_VDUP0, 5984 OP_VDUP1, 5985 OP_VDUP2, 5986 OP_VDUP3, 5987 OP_VEXT1, 5988 OP_VEXT2, 5989 OP_VEXT3, 5990 OP_VUZPL, // VUZP, left result 5991 OP_VUZPR, // VUZP, right result 5992 OP_VZIPL, // VZIP, left result 5993 OP_VZIPR, // VZIP, right result 5994 OP_VTRNL, // VTRN, left result 5995 OP_VTRNR // VTRN, right result 5996 }; 5997 5998 if (OpNum == OP_COPY) { 5999 if (LHSID == (1*9+2)*9+3) return LHS; 6000 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6001 return RHS; 6002 } 6003 6004 SDValue OpLHS, OpRHS; 6005 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6006 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6007 EVT VT = OpLHS.getValueType(); 6008 6009 switch (OpNum) { 6010 default: llvm_unreachable("Unknown shuffle opcode!"); 6011 case OP_VREV: 6012 // VREV divides the vector in half and swaps within the half. 6013 if (VT.getVectorElementType() == MVT::i32 || 6014 VT.getVectorElementType() == MVT::f32) 6015 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 6016 // vrev <4 x i16> -> VREV32 6017 if (VT.getVectorElementType() == MVT::i16) 6018 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 6019 // vrev <4 x i8> -> VREV16 6020 assert(VT.getVectorElementType() == MVT::i8); 6021 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 6022 case OP_VDUP0: 6023 case OP_VDUP1: 6024 case OP_VDUP2: 6025 case OP_VDUP3: 6026 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6027 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 6028 case OP_VEXT1: 6029 case OP_VEXT2: 6030 case OP_VEXT3: 6031 return DAG.getNode(ARMISD::VEXT, dl, VT, 6032 OpLHS, OpRHS, 6033 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 6034 case OP_VUZPL: 6035 case OP_VUZPR: 6036 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 6037 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 6038 case OP_VZIPL: 6039 case OP_VZIPR: 6040 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 6041 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 6042 case OP_VTRNL: 6043 case OP_VTRNR: 6044 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 6045 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 6046 } 6047 } 6048 6049 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 6050 ArrayRef<int> ShuffleMask, 6051 SelectionDAG &DAG) { 6052 // Check to see if we can use the VTBL instruction. 6053 SDValue V1 = Op.getOperand(0); 6054 SDValue V2 = Op.getOperand(1); 6055 SDLoc DL(Op); 6056 6057 SmallVector<SDValue, 8> VTBLMask; 6058 for (ArrayRef<int>::iterator 6059 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 6060 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 6061 6062 if (V2.getNode()->isUndef()) 6063 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 6064 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6065 6066 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 6067 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6068 } 6069 6070 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 6071 SelectionDAG &DAG) { 6072 SDLoc DL(Op); 6073 SDValue OpLHS = Op.getOperand(0); 6074 EVT VT = OpLHS.getValueType(); 6075 6076 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 6077 "Expect an v8i16/v16i8 type"); 6078 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 6079 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 6080 // extract the first 8 bytes into the top double word and the last 8 bytes 6081 // into the bottom double word. The v8i16 case is similar. 6082 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 6083 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 6084 DAG.getConstant(ExtractNum, DL, MVT::i32)); 6085 } 6086 6087 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 6088 SDValue V1 = Op.getOperand(0); 6089 SDValue V2 = Op.getOperand(1); 6090 SDLoc dl(Op); 6091 EVT VT = Op.getValueType(); 6092 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 6093 6094 // Convert shuffles that are directly supported on NEON to target-specific 6095 // DAG nodes, instead of keeping them as shuffles and matching them again 6096 // during code selection. This is more efficient and avoids the possibility 6097 // of inconsistencies between legalization and selection. 6098 // FIXME: floating-point vectors should be canonicalized to integer vectors 6099 // of the same time so that they get CSEd properly. 6100 ArrayRef<int> ShuffleMask = SVN->getMask(); 6101 6102 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 6103 if (EltSize <= 32) { 6104 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 6105 int Lane = SVN->getSplatIndex(); 6106 // If this is undef splat, generate it via "just" vdup, if possible. 6107 if (Lane == -1) Lane = 0; 6108 6109 // Test if V1 is a SCALAR_TO_VECTOR. 6110 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6111 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6112 } 6113 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 6114 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 6115 // reaches it). 6116 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 6117 !isa<ConstantSDNode>(V1.getOperand(0))) { 6118 bool IsScalarToVector = true; 6119 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 6120 if (!V1.getOperand(i).isUndef()) { 6121 IsScalarToVector = false; 6122 break; 6123 } 6124 if (IsScalarToVector) 6125 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6126 } 6127 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 6128 DAG.getConstant(Lane, dl, MVT::i32)); 6129 } 6130 6131 bool ReverseVEXT; 6132 unsigned Imm; 6133 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 6134 if (ReverseVEXT) 6135 std::swap(V1, V2); 6136 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 6137 DAG.getConstant(Imm, dl, MVT::i32)); 6138 } 6139 6140 if (isVREVMask(ShuffleMask, VT, 64)) 6141 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 6142 if (isVREVMask(ShuffleMask, VT, 32)) 6143 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 6144 if (isVREVMask(ShuffleMask, VT, 16)) 6145 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 6146 6147 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 6148 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 6149 DAG.getConstant(Imm, dl, MVT::i32)); 6150 } 6151 6152 // Check for Neon shuffles that modify both input vectors in place. 6153 // If both results are used, i.e., if there are two shuffles with the same 6154 // source operands and with masks corresponding to both results of one of 6155 // these operations, DAG memoization will ensure that a single node is 6156 // used for both shuffles. 6157 unsigned WhichResult; 6158 bool isV_UNDEF; 6159 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6160 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 6161 if (isV_UNDEF) 6162 V2 = V1; 6163 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 6164 .getValue(WhichResult); 6165 } 6166 6167 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 6168 // shuffles that produce a result larger than their operands with: 6169 // shuffle(concat(v1, undef), concat(v2, undef)) 6170 // -> 6171 // shuffle(concat(v1, v2), undef) 6172 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 6173 // 6174 // This is useful in the general case, but there are special cases where 6175 // native shuffles produce larger results: the two-result ops. 6176 // 6177 // Look through the concat when lowering them: 6178 // shuffle(concat(v1, v2), undef) 6179 // -> 6180 // concat(VZIP(v1, v2):0, :1) 6181 // 6182 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 6183 SDValue SubV1 = V1->getOperand(0); 6184 SDValue SubV2 = V1->getOperand(1); 6185 EVT SubVT = SubV1.getValueType(); 6186 6187 // We expect these to have been canonicalized to -1. 6188 assert(std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](int i) { 6189 return i < (int)VT.getVectorNumElements(); 6190 }) && "Unexpected shuffle index into UNDEF operand!"); 6191 6192 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6193 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 6194 if (isV_UNDEF) 6195 SubV2 = SubV1; 6196 assert((WhichResult == 0) && 6197 "In-place shuffle of concat can only have one result!"); 6198 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 6199 SubV1, SubV2); 6200 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 6201 Res.getValue(1)); 6202 } 6203 } 6204 } 6205 6206 // If the shuffle is not directly supported and it has 4 elements, use 6207 // the PerfectShuffle-generated table to synthesize it from other shuffles. 6208 unsigned NumElts = VT.getVectorNumElements(); 6209 if (NumElts == 4) { 6210 unsigned PFIndexes[4]; 6211 for (unsigned i = 0; i != 4; ++i) { 6212 if (ShuffleMask[i] < 0) 6213 PFIndexes[i] = 8; 6214 else 6215 PFIndexes[i] = ShuffleMask[i]; 6216 } 6217 6218 // Compute the index in the perfect shuffle table. 6219 unsigned PFTableIndex = 6220 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6221 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6222 unsigned Cost = (PFEntry >> 30); 6223 6224 if (Cost <= 4) 6225 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 6226 } 6227 6228 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 6229 if (EltSize >= 32) { 6230 // Do the expansion with floating-point types, since that is what the VFP 6231 // registers are defined to use, and since i64 is not legal. 6232 EVT EltVT = EVT::getFloatingPointVT(EltSize); 6233 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 6234 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 6235 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 6236 SmallVector<SDValue, 8> Ops; 6237 for (unsigned i = 0; i < NumElts; ++i) { 6238 if (ShuffleMask[i] < 0) 6239 Ops.push_back(DAG.getUNDEF(EltVT)); 6240 else 6241 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6242 ShuffleMask[i] < (int)NumElts ? V1 : V2, 6243 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 6244 dl, MVT::i32))); 6245 } 6246 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 6247 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6248 } 6249 6250 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 6251 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 6252 6253 if (VT == MVT::v8i8) 6254 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 6255 return NewOp; 6256 6257 return SDValue(); 6258 } 6259 6260 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6261 // INSERT_VECTOR_ELT is legal only for immediate indexes. 6262 SDValue Lane = Op.getOperand(2); 6263 if (!isa<ConstantSDNode>(Lane)) 6264 return SDValue(); 6265 6266 return Op; 6267 } 6268 6269 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6270 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 6271 SDValue Lane = Op.getOperand(1); 6272 if (!isa<ConstantSDNode>(Lane)) 6273 return SDValue(); 6274 6275 SDValue Vec = Op.getOperand(0); 6276 if (Op.getValueType() == MVT::i32 && 6277 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 6278 SDLoc dl(Op); 6279 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 6280 } 6281 6282 return Op; 6283 } 6284 6285 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 6286 // The only time a CONCAT_VECTORS operation can have legal types is when 6287 // two 64-bit vectors are concatenated to a 128-bit vector. 6288 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 6289 "unexpected CONCAT_VECTORS"); 6290 SDLoc dl(Op); 6291 SDValue Val = DAG.getUNDEF(MVT::v2f64); 6292 SDValue Op0 = Op.getOperand(0); 6293 SDValue Op1 = Op.getOperand(1); 6294 if (!Op0.isUndef()) 6295 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6296 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 6297 DAG.getIntPtrConstant(0, dl)); 6298 if (!Op1.isUndef()) 6299 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6300 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 6301 DAG.getIntPtrConstant(1, dl)); 6302 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 6303 } 6304 6305 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 6306 /// element has been zero/sign-extended, depending on the isSigned parameter, 6307 /// from an integer type half its size. 6308 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 6309 bool isSigned) { 6310 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 6311 EVT VT = N->getValueType(0); 6312 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 6313 SDNode *BVN = N->getOperand(0).getNode(); 6314 if (BVN->getValueType(0) != MVT::v4i32 || 6315 BVN->getOpcode() != ISD::BUILD_VECTOR) 6316 return false; 6317 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6318 unsigned HiElt = 1 - LoElt; 6319 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 6320 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 6321 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 6322 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 6323 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 6324 return false; 6325 if (isSigned) { 6326 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 6327 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 6328 return true; 6329 } else { 6330 if (Hi0->isNullValue() && Hi1->isNullValue()) 6331 return true; 6332 } 6333 return false; 6334 } 6335 6336 if (N->getOpcode() != ISD::BUILD_VECTOR) 6337 return false; 6338 6339 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 6340 SDNode *Elt = N->getOperand(i).getNode(); 6341 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 6342 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 6343 unsigned HalfSize = EltSize / 2; 6344 if (isSigned) { 6345 if (!isIntN(HalfSize, C->getSExtValue())) 6346 return false; 6347 } else { 6348 if (!isUIntN(HalfSize, C->getZExtValue())) 6349 return false; 6350 } 6351 continue; 6352 } 6353 return false; 6354 } 6355 6356 return true; 6357 } 6358 6359 /// isSignExtended - Check if a node is a vector value that is sign-extended 6360 /// or a constant BUILD_VECTOR with sign-extended elements. 6361 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 6362 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 6363 return true; 6364 if (isExtendedBUILD_VECTOR(N, DAG, true)) 6365 return true; 6366 return false; 6367 } 6368 6369 /// isZeroExtended - Check if a node is a vector value that is zero-extended 6370 /// or a constant BUILD_VECTOR with zero-extended elements. 6371 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 6372 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 6373 return true; 6374 if (isExtendedBUILD_VECTOR(N, DAG, false)) 6375 return true; 6376 return false; 6377 } 6378 6379 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 6380 if (OrigVT.getSizeInBits() >= 64) 6381 return OrigVT; 6382 6383 assert(OrigVT.isSimple() && "Expecting a simple value type"); 6384 6385 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 6386 switch (OrigSimpleTy) { 6387 default: llvm_unreachable("Unexpected Vector Type"); 6388 case MVT::v2i8: 6389 case MVT::v2i16: 6390 return MVT::v2i32; 6391 case MVT::v4i8: 6392 return MVT::v4i16; 6393 } 6394 } 6395 6396 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 6397 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 6398 /// We insert the required extension here to get the vector to fill a D register. 6399 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 6400 const EVT &OrigTy, 6401 const EVT &ExtTy, 6402 unsigned ExtOpcode) { 6403 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 6404 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 6405 // 64-bits we need to insert a new extension so that it will be 64-bits. 6406 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 6407 if (OrigTy.getSizeInBits() >= 64) 6408 return N; 6409 6410 // Must extend size to at least 64 bits to be used as an operand for VMULL. 6411 EVT NewVT = getExtensionTo64Bits(OrigTy); 6412 6413 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 6414 } 6415 6416 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 6417 /// does not do any sign/zero extension. If the original vector is less 6418 /// than 64 bits, an appropriate extension will be added after the load to 6419 /// reach a total size of 64 bits. We have to add the extension separately 6420 /// because ARM does not have a sign/zero extending load for vectors. 6421 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 6422 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 6423 6424 // The load already has the right type. 6425 if (ExtendedTy == LD->getMemoryVT()) 6426 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 6427 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 6428 LD->isNonTemporal(), LD->isInvariant(), 6429 LD->getAlignment()); 6430 6431 // We need to create a zextload/sextload. We cannot just create a load 6432 // followed by a zext/zext node because LowerMUL is also run during normal 6433 // operation legalization where we can't create illegal types. 6434 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 6435 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 6436 LD->getMemoryVT(), LD->isVolatile(), LD->isInvariant(), 6437 LD->isNonTemporal(), LD->getAlignment()); 6438 } 6439 6440 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 6441 /// extending load, or BUILD_VECTOR with extended elements, return the 6442 /// unextended value. The unextended vector should be 64 bits so that it can 6443 /// be used as an operand to a VMULL instruction. If the original vector size 6444 /// before extension is less than 64 bits we add a an extension to resize 6445 /// the vector to 64 bits. 6446 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 6447 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 6448 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 6449 N->getOperand(0)->getValueType(0), 6450 N->getValueType(0), 6451 N->getOpcode()); 6452 6453 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 6454 return SkipLoadExtensionForVMULL(LD, DAG); 6455 6456 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 6457 // have been legalized as a BITCAST from v4i32. 6458 if (N->getOpcode() == ISD::BITCAST) { 6459 SDNode *BVN = N->getOperand(0).getNode(); 6460 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 6461 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 6462 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6463 return DAG.getBuildVector( 6464 MVT::v2i32, SDLoc(N), 6465 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 6466 } 6467 // Construct a new BUILD_VECTOR with elements truncated to half the size. 6468 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 6469 EVT VT = N->getValueType(0); 6470 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 6471 unsigned NumElts = VT.getVectorNumElements(); 6472 MVT TruncVT = MVT::getIntegerVT(EltSize); 6473 SmallVector<SDValue, 8> Ops; 6474 SDLoc dl(N); 6475 for (unsigned i = 0; i != NumElts; ++i) { 6476 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 6477 const APInt &CInt = C->getAPIntValue(); 6478 // Element types smaller than 32 bits are not legal, so use i32 elements. 6479 // The values are implicitly truncated so sext vs. zext doesn't matter. 6480 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 6481 } 6482 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 6483 } 6484 6485 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 6486 unsigned Opcode = N->getOpcode(); 6487 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6488 SDNode *N0 = N->getOperand(0).getNode(); 6489 SDNode *N1 = N->getOperand(1).getNode(); 6490 return N0->hasOneUse() && N1->hasOneUse() && 6491 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 6492 } 6493 return false; 6494 } 6495 6496 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 6497 unsigned Opcode = N->getOpcode(); 6498 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6499 SDNode *N0 = N->getOperand(0).getNode(); 6500 SDNode *N1 = N->getOperand(1).getNode(); 6501 return N0->hasOneUse() && N1->hasOneUse() && 6502 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 6503 } 6504 return false; 6505 } 6506 6507 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 6508 // Multiplications are only custom-lowered for 128-bit vectors so that 6509 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 6510 EVT VT = Op.getValueType(); 6511 assert(VT.is128BitVector() && VT.isInteger() && 6512 "unexpected type for custom-lowering ISD::MUL"); 6513 SDNode *N0 = Op.getOperand(0).getNode(); 6514 SDNode *N1 = Op.getOperand(1).getNode(); 6515 unsigned NewOpc = 0; 6516 bool isMLA = false; 6517 bool isN0SExt = isSignExtended(N0, DAG); 6518 bool isN1SExt = isSignExtended(N1, DAG); 6519 if (isN0SExt && isN1SExt) 6520 NewOpc = ARMISD::VMULLs; 6521 else { 6522 bool isN0ZExt = isZeroExtended(N0, DAG); 6523 bool isN1ZExt = isZeroExtended(N1, DAG); 6524 if (isN0ZExt && isN1ZExt) 6525 NewOpc = ARMISD::VMULLu; 6526 else if (isN1SExt || isN1ZExt) { 6527 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 6528 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 6529 if (isN1SExt && isAddSubSExt(N0, DAG)) { 6530 NewOpc = ARMISD::VMULLs; 6531 isMLA = true; 6532 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 6533 NewOpc = ARMISD::VMULLu; 6534 isMLA = true; 6535 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 6536 std::swap(N0, N1); 6537 NewOpc = ARMISD::VMULLu; 6538 isMLA = true; 6539 } 6540 } 6541 6542 if (!NewOpc) { 6543 if (VT == MVT::v2i64) 6544 // Fall through to expand this. It is not legal. 6545 return SDValue(); 6546 else 6547 // Other vector multiplications are legal. 6548 return Op; 6549 } 6550 } 6551 6552 // Legalize to a VMULL instruction. 6553 SDLoc DL(Op); 6554 SDValue Op0; 6555 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 6556 if (!isMLA) { 6557 Op0 = SkipExtensionForVMULL(N0, DAG); 6558 assert(Op0.getValueType().is64BitVector() && 6559 Op1.getValueType().is64BitVector() && 6560 "unexpected types for extended operands to VMULL"); 6561 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 6562 } 6563 6564 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 6565 // isel lowering to take advantage of no-stall back to back vmul + vmla. 6566 // vmull q0, d4, d6 6567 // vmlal q0, d5, d6 6568 // is faster than 6569 // vaddl q0, d4, d5 6570 // vmovl q1, d6 6571 // vmul q0, q0, q1 6572 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 6573 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 6574 EVT Op1VT = Op1.getValueType(); 6575 return DAG.getNode(N0->getOpcode(), DL, VT, 6576 DAG.getNode(NewOpc, DL, VT, 6577 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 6578 DAG.getNode(NewOpc, DL, VT, 6579 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 6580 } 6581 6582 static SDValue 6583 LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) { 6584 // TODO: Should this propagate fast-math-flags? 6585 6586 // Convert to float 6587 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 6588 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 6589 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 6590 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 6591 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 6592 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 6593 // Get reciprocal estimate. 6594 // float4 recip = vrecpeq_f32(yf); 6595 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6596 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6597 Y); 6598 // Because char has a smaller range than uchar, we can actually get away 6599 // without any newton steps. This requires that we use a weird bias 6600 // of 0xb000, however (again, this has been exhaustively tested). 6601 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 6602 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 6603 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 6604 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 6605 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 6606 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 6607 // Convert back to short. 6608 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 6609 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 6610 return X; 6611 } 6612 6613 static SDValue 6614 LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) { 6615 // TODO: Should this propagate fast-math-flags? 6616 6617 SDValue N2; 6618 // Convert to float. 6619 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 6620 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 6621 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 6622 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 6623 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 6624 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 6625 6626 // Use reciprocal estimate and one refinement step. 6627 // float4 recip = vrecpeq_f32(yf); 6628 // recip *= vrecpsq_f32(yf, recip); 6629 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6630 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6631 N1); 6632 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6633 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6634 N1, N2); 6635 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6636 // Because short has a smaller range than ushort, we can actually get away 6637 // with only a single newton step. This requires that we use a weird bias 6638 // of 89, however (again, this has been exhaustively tested). 6639 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 6640 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 6641 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 6642 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 6643 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 6644 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 6645 // Convert back to integer and return. 6646 // return vmovn_s32(vcvt_s32_f32(result)); 6647 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 6648 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 6649 return N0; 6650 } 6651 6652 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 6653 EVT VT = Op.getValueType(); 6654 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 6655 "unexpected type for custom-lowering ISD::SDIV"); 6656 6657 SDLoc dl(Op); 6658 SDValue N0 = Op.getOperand(0); 6659 SDValue N1 = Op.getOperand(1); 6660 SDValue N2, N3; 6661 6662 if (VT == MVT::v8i8) { 6663 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 6664 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 6665 6666 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6667 DAG.getIntPtrConstant(4, dl)); 6668 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6669 DAG.getIntPtrConstant(4, dl)); 6670 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6671 DAG.getIntPtrConstant(0, dl)); 6672 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6673 DAG.getIntPtrConstant(0, dl)); 6674 6675 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 6676 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 6677 6678 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 6679 N0 = LowerCONCAT_VECTORS(N0, DAG); 6680 6681 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 6682 return N0; 6683 } 6684 return LowerSDIV_v4i16(N0, N1, dl, DAG); 6685 } 6686 6687 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 6688 // TODO: Should this propagate fast-math-flags? 6689 EVT VT = Op.getValueType(); 6690 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 6691 "unexpected type for custom-lowering ISD::UDIV"); 6692 6693 SDLoc dl(Op); 6694 SDValue N0 = Op.getOperand(0); 6695 SDValue N1 = Op.getOperand(1); 6696 SDValue N2, N3; 6697 6698 if (VT == MVT::v8i8) { 6699 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 6700 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 6701 6702 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6703 DAG.getIntPtrConstant(4, dl)); 6704 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6705 DAG.getIntPtrConstant(4, dl)); 6706 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6707 DAG.getIntPtrConstant(0, dl)); 6708 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6709 DAG.getIntPtrConstant(0, dl)); 6710 6711 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 6712 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 6713 6714 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 6715 N0 = LowerCONCAT_VECTORS(N0, DAG); 6716 6717 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 6718 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 6719 MVT::i32), 6720 N0); 6721 return N0; 6722 } 6723 6724 // v4i16 sdiv ... Convert to float. 6725 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 6726 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 6727 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 6728 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 6729 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 6730 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 6731 6732 // Use reciprocal estimate and two refinement steps. 6733 // float4 recip = vrecpeq_f32(yf); 6734 // recip *= vrecpsq_f32(yf, recip); 6735 // recip *= vrecpsq_f32(yf, recip); 6736 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6737 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6738 BN1); 6739 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6740 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6741 BN1, N2); 6742 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6743 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6744 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6745 BN1, N2); 6746 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6747 // Simply multiplying by the reciprocal estimate can leave us a few ulps 6748 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 6749 // and that it will never cause us to return an answer too large). 6750 // float4 result = as_float4(as_int4(xf*recip) + 2); 6751 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 6752 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 6753 N1 = DAG.getConstant(2, dl, MVT::v4i32); 6754 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 6755 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 6756 // Convert back to integer and return. 6757 // return vmovn_u32(vcvt_s32_f32(result)); 6758 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 6759 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 6760 return N0; 6761 } 6762 6763 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 6764 EVT VT = Op.getNode()->getValueType(0); 6765 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 6766 6767 unsigned Opc; 6768 bool ExtraOp = false; 6769 switch (Op.getOpcode()) { 6770 default: llvm_unreachable("Invalid code"); 6771 case ISD::ADDC: Opc = ARMISD::ADDC; break; 6772 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 6773 case ISD::SUBC: Opc = ARMISD::SUBC; break; 6774 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 6775 } 6776 6777 if (!ExtraOp) 6778 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6779 Op.getOperand(1)); 6780 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6781 Op.getOperand(1), Op.getOperand(2)); 6782 } 6783 6784 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 6785 assert(Subtarget->isTargetDarwin()); 6786 6787 // For iOS, we want to call an alternative entry point: __sincos_stret, 6788 // return values are passed via sret. 6789 SDLoc dl(Op); 6790 SDValue Arg = Op.getOperand(0); 6791 EVT ArgVT = Arg.getValueType(); 6792 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 6793 auto PtrVT = getPointerTy(DAG.getDataLayout()); 6794 6795 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6796 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6797 6798 // Pair of floats / doubles used to pass the result. 6799 Type *RetTy = StructType::get(ArgTy, ArgTy, nullptr); 6800 auto &DL = DAG.getDataLayout(); 6801 6802 ArgListTy Args; 6803 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 6804 SDValue SRet; 6805 if (ShouldUseSRet) { 6806 // Create stack object for sret. 6807 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 6808 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); 6809 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 6810 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 6811 6812 ArgListEntry Entry; 6813 Entry.Node = SRet; 6814 Entry.Ty = RetTy->getPointerTo(); 6815 Entry.isSExt = false; 6816 Entry.isZExt = false; 6817 Entry.isSRet = true; 6818 Args.push_back(Entry); 6819 RetTy = Type::getVoidTy(*DAG.getContext()); 6820 } 6821 6822 ArgListEntry Entry; 6823 Entry.Node = Arg; 6824 Entry.Ty = ArgTy; 6825 Entry.isSExt = false; 6826 Entry.isZExt = false; 6827 Args.push_back(Entry); 6828 6829 const char *LibcallName = 6830 (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret"; 6831 RTLIB::Libcall LC = 6832 (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32; 6833 CallingConv::ID CC = getLibcallCallingConv(LC); 6834 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 6835 6836 TargetLowering::CallLoweringInfo CLI(DAG); 6837 CLI.setDebugLoc(dl) 6838 .setChain(DAG.getEntryNode()) 6839 .setCallee(CC, RetTy, Callee, std::move(Args), 0) 6840 .setDiscardResult(ShouldUseSRet); 6841 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 6842 6843 if (!ShouldUseSRet) 6844 return CallResult.first; 6845 6846 SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet, 6847 MachinePointerInfo(), false, false, false, 0); 6848 6849 // Address of cos field. 6850 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 6851 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 6852 SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, 6853 MachinePointerInfo(), false, false, false, 0); 6854 6855 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 6856 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 6857 LoadSin.getValue(0), LoadCos.getValue(0)); 6858 } 6859 6860 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 6861 bool Signed, 6862 SDValue &Chain) const { 6863 EVT VT = Op.getValueType(); 6864 assert((VT == MVT::i32 || VT == MVT::i64) && 6865 "unexpected type for custom lowering DIV"); 6866 SDLoc dl(Op); 6867 6868 const auto &DL = DAG.getDataLayout(); 6869 const auto &TLI = DAG.getTargetLoweringInfo(); 6870 6871 const char *Name = nullptr; 6872 if (Signed) 6873 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 6874 else 6875 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 6876 6877 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 6878 6879 ARMTargetLowering::ArgListTy Args; 6880 6881 for (auto AI : {1, 0}) { 6882 ArgListEntry Arg; 6883 Arg.Node = Op.getOperand(AI); 6884 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 6885 Args.push_back(Arg); 6886 } 6887 6888 CallLoweringInfo CLI(DAG); 6889 CLI.setDebugLoc(dl) 6890 .setChain(Chain) 6891 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 6892 ES, std::move(Args), 0); 6893 6894 return LowerCallTo(CLI).first; 6895 } 6896 6897 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 6898 bool Signed) const { 6899 assert(Op.getValueType() == MVT::i32 && 6900 "unexpected type for custom lowering DIV"); 6901 SDLoc dl(Op); 6902 6903 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 6904 DAG.getEntryNode(), Op.getOperand(1)); 6905 6906 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 6907 } 6908 6909 void ARMTargetLowering::ExpandDIV_Windows( 6910 SDValue Op, SelectionDAG &DAG, bool Signed, 6911 SmallVectorImpl<SDValue> &Results) const { 6912 const auto &DL = DAG.getDataLayout(); 6913 const auto &TLI = DAG.getTargetLoweringInfo(); 6914 6915 assert(Op.getValueType() == MVT::i64 && 6916 "unexpected type for custom lowering DIV"); 6917 SDLoc dl(Op); 6918 6919 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 6920 DAG.getConstant(0, dl, MVT::i32)); 6921 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 6922 DAG.getConstant(1, dl, MVT::i32)); 6923 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, Lo, Hi); 6924 6925 SDValue DBZCHK = 6926 DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, DAG.getEntryNode(), Or); 6927 6928 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 6929 6930 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 6931 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 6932 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 6933 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 6934 6935 Results.push_back(Lower); 6936 Results.push_back(Upper); 6937 } 6938 6939 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 6940 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 6941 // Acquire/Release load/store is not legal for targets without a dmb or 6942 // equivalent available. 6943 return SDValue(); 6944 6945 // Monotonic load/store is legal for all targets. 6946 return Op; 6947 } 6948 6949 static void ReplaceREADCYCLECOUNTER(SDNode *N, 6950 SmallVectorImpl<SDValue> &Results, 6951 SelectionDAG &DAG, 6952 const ARMSubtarget *Subtarget) { 6953 SDLoc DL(N); 6954 // Under Power Management extensions, the cycle-count is: 6955 // mrc p15, #0, <Rt>, c9, c13, #0 6956 SDValue Ops[] = { N->getOperand(0), // Chain 6957 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 6958 DAG.getConstant(15, DL, MVT::i32), 6959 DAG.getConstant(0, DL, MVT::i32), 6960 DAG.getConstant(9, DL, MVT::i32), 6961 DAG.getConstant(13, DL, MVT::i32), 6962 DAG.getConstant(0, DL, MVT::i32) 6963 }; 6964 6965 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 6966 DAG.getVTList(MVT::i32, MVT::Other), Ops); 6967 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 6968 DAG.getConstant(0, DL, MVT::i32))); 6969 Results.push_back(Cycles32.getValue(1)); 6970 } 6971 6972 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 6973 SDLoc dl(V.getNode()); 6974 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 6975 SDValue VHi = DAG.getAnyExtOrTrunc( 6976 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 6977 dl, MVT::i32); 6978 SDValue RegClass = 6979 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 6980 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 6981 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 6982 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 6983 return SDValue( 6984 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 6985 } 6986 6987 static void ReplaceCMP_SWAP_64Results(SDNode *N, 6988 SmallVectorImpl<SDValue> & Results, 6989 SelectionDAG &DAG) { 6990 assert(N->getValueType(0) == MVT::i64 && 6991 "AtomicCmpSwap on types less than 64 should be legal"); 6992 SDValue Ops[] = {N->getOperand(1), 6993 createGPRPairNode(DAG, N->getOperand(2)), 6994 createGPRPairNode(DAG, N->getOperand(3)), 6995 N->getOperand(0)}; 6996 SDNode *CmpSwap = DAG.getMachineNode( 6997 ARM::CMP_SWAP_64, SDLoc(N), 6998 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 6999 7000 MachineFunction &MF = DAG.getMachineFunction(); 7001 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); 7002 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 7003 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); 7004 7005 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32, 7006 SDValue(CmpSwap, 0))); 7007 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32, 7008 SDValue(CmpSwap, 0))); 7009 Results.push_back(SDValue(CmpSwap, 2)); 7010 } 7011 7012 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7013 switch (Op.getOpcode()) { 7014 default: llvm_unreachable("Don't know how to custom lower this!"); 7015 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 7016 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7017 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7018 case ISD::GlobalAddress: 7019 switch (Subtarget->getTargetTriple().getObjectFormat()) { 7020 default: llvm_unreachable("unknown object format"); 7021 case Triple::COFF: 7022 return LowerGlobalAddressWindows(Op, DAG); 7023 case Triple::ELF: 7024 return LowerGlobalAddressELF(Op, DAG); 7025 case Triple::MachO: 7026 return LowerGlobalAddressDarwin(Op, DAG); 7027 } 7028 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7029 case ISD::SELECT: return LowerSELECT(Op, DAG); 7030 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7031 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 7032 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 7033 case ISD::VASTART: return LowerVASTART(Op, DAG); 7034 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 7035 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 7036 case ISD::SINT_TO_FP: 7037 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7038 case ISD::FP_TO_SINT: 7039 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 7040 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 7041 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7042 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7043 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 7044 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 7045 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 7046 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 7047 Subtarget); 7048 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 7049 case ISD::SHL: 7050 case ISD::SRL: 7051 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 7052 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 7053 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 7054 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 7055 case ISD::SRL_PARTS: 7056 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 7057 case ISD::CTTZ: 7058 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 7059 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 7060 case ISD::SETCC: return LowerVSETCC(Op, DAG); 7061 case ISD::SETCCE: return LowerSETCCE(Op, DAG); 7062 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 7063 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 7064 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7065 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 7066 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7067 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 7068 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7069 case ISD::MUL: return LowerMUL(Op, DAG); 7070 case ISD::SDIV: 7071 if (Subtarget->isTargetWindows()) 7072 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 7073 return LowerSDIV(Op, DAG); 7074 case ISD::UDIV: 7075 if (Subtarget->isTargetWindows()) 7076 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 7077 return LowerUDIV(Op, DAG); 7078 case ISD::ADDC: 7079 case ISD::ADDE: 7080 case ISD::SUBC: 7081 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 7082 case ISD::SADDO: 7083 case ISD::UADDO: 7084 case ISD::SSUBO: 7085 case ISD::USUBO: 7086 return LowerXALUO(Op, DAG); 7087 case ISD::ATOMIC_LOAD: 7088 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 7089 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 7090 case ISD::SDIVREM: 7091 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 7092 case ISD::DYNAMIC_STACKALLOC: 7093 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 7094 return LowerDYNAMIC_STACKALLOC(Op, DAG); 7095 llvm_unreachable("Don't know how to custom lower this!"); 7096 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 7097 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 7098 case ARMISD::WIN__DBZCHK: return SDValue(); 7099 } 7100 } 7101 7102 /// ReplaceNodeResults - Replace the results of node with an illegal result 7103 /// type with new values built out of custom code. 7104 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 7105 SmallVectorImpl<SDValue> &Results, 7106 SelectionDAG &DAG) const { 7107 SDValue Res; 7108 switch (N->getOpcode()) { 7109 default: 7110 llvm_unreachable("Don't know how to custom expand this!"); 7111 case ISD::READ_REGISTER: 7112 ExpandREAD_REGISTER(N, Results, DAG); 7113 break; 7114 case ISD::BITCAST: 7115 Res = ExpandBITCAST(N, DAG); 7116 break; 7117 case ISD::SRL: 7118 case ISD::SRA: 7119 Res = Expand64BitShift(N, DAG, Subtarget); 7120 break; 7121 case ISD::SREM: 7122 case ISD::UREM: 7123 Res = LowerREM(N, DAG); 7124 break; 7125 case ISD::SDIVREM: 7126 case ISD::UDIVREM: 7127 Res = LowerDivRem(SDValue(N, 0), DAG); 7128 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 7129 Results.push_back(Res.getValue(0)); 7130 Results.push_back(Res.getValue(1)); 7131 return; 7132 case ISD::READCYCLECOUNTER: 7133 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 7134 return; 7135 case ISD::UDIV: 7136 case ISD::SDIV: 7137 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 7138 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 7139 Results); 7140 case ISD::ATOMIC_CMP_SWAP: 7141 ReplaceCMP_SWAP_64Results(N, Results, DAG); 7142 return; 7143 } 7144 if (Res.getNode()) 7145 Results.push_back(Res); 7146 } 7147 7148 //===----------------------------------------------------------------------===// 7149 // ARM Scheduler Hooks 7150 //===----------------------------------------------------------------------===// 7151 7152 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 7153 /// registers the function context. 7154 void ARMTargetLowering:: 7155 SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 7156 MachineBasicBlock *DispatchBB, int FI) const { 7157 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7158 DebugLoc dl = MI->getDebugLoc(); 7159 MachineFunction *MF = MBB->getParent(); 7160 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7161 MachineConstantPool *MCP = MF->getConstantPool(); 7162 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 7163 const Function *F = MF->getFunction(); 7164 7165 bool isThumb = Subtarget->isThumb(); 7166 bool isThumb2 = Subtarget->isThumb2(); 7167 7168 unsigned PCLabelId = AFI->createPICLabelUId(); 7169 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 7170 ARMConstantPoolValue *CPV = 7171 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 7172 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 7173 7174 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 7175 : &ARM::GPRRegClass; 7176 7177 // Grab constant pool and fixed stack memory operands. 7178 MachineMemOperand *CPMMO = 7179 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 7180 MachineMemOperand::MOLoad, 4, 4); 7181 7182 MachineMemOperand *FIMMOSt = 7183 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 7184 MachineMemOperand::MOStore, 4, 4); 7185 7186 // Load the address of the dispatch MBB into the jump buffer. 7187 if (isThumb2) { 7188 // Incoming value: jbuf 7189 // ldr.n r5, LCPI1_1 7190 // orr r5, r5, #1 7191 // add r5, pc 7192 // str r5, [$jbuf, #+4] ; &jbuf[1] 7193 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7194 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 7195 .addConstantPoolIndex(CPI) 7196 .addMemOperand(CPMMO)); 7197 // Set the low bit because of thumb mode. 7198 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7199 AddDefaultCC( 7200 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 7201 .addReg(NewVReg1, RegState::Kill) 7202 .addImm(0x01))); 7203 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7204 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 7205 .addReg(NewVReg2, RegState::Kill) 7206 .addImm(PCLabelId); 7207 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 7208 .addReg(NewVReg3, RegState::Kill) 7209 .addFrameIndex(FI) 7210 .addImm(36) // &jbuf[1] :: pc 7211 .addMemOperand(FIMMOSt)); 7212 } else if (isThumb) { 7213 // Incoming value: jbuf 7214 // ldr.n r1, LCPI1_4 7215 // add r1, pc 7216 // mov r2, #1 7217 // orrs r1, r2 7218 // add r2, $jbuf, #+4 ; &jbuf[1] 7219 // str r1, [r2] 7220 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7221 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 7222 .addConstantPoolIndex(CPI) 7223 .addMemOperand(CPMMO)); 7224 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7225 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 7226 .addReg(NewVReg1, RegState::Kill) 7227 .addImm(PCLabelId); 7228 // Set the low bit because of thumb mode. 7229 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7230 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 7231 .addReg(ARM::CPSR, RegState::Define) 7232 .addImm(1)); 7233 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7234 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 7235 .addReg(ARM::CPSR, RegState::Define) 7236 .addReg(NewVReg2, RegState::Kill) 7237 .addReg(NewVReg3, RegState::Kill)); 7238 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7239 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 7240 .addFrameIndex(FI) 7241 .addImm(36); // &jbuf[1] :: pc 7242 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 7243 .addReg(NewVReg4, RegState::Kill) 7244 .addReg(NewVReg5, RegState::Kill) 7245 .addImm(0) 7246 .addMemOperand(FIMMOSt)); 7247 } else { 7248 // Incoming value: jbuf 7249 // ldr r1, LCPI1_1 7250 // add r1, pc, r1 7251 // str r1, [$jbuf, #+4] ; &jbuf[1] 7252 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7253 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 7254 .addConstantPoolIndex(CPI) 7255 .addImm(0) 7256 .addMemOperand(CPMMO)); 7257 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7258 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 7259 .addReg(NewVReg1, RegState::Kill) 7260 .addImm(PCLabelId)); 7261 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 7262 .addReg(NewVReg2, RegState::Kill) 7263 .addFrameIndex(FI) 7264 .addImm(36) // &jbuf[1] :: pc 7265 .addMemOperand(FIMMOSt)); 7266 } 7267 } 7268 7269 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI, 7270 MachineBasicBlock *MBB) const { 7271 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7272 DebugLoc dl = MI->getDebugLoc(); 7273 MachineFunction *MF = MBB->getParent(); 7274 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7275 MachineFrameInfo *MFI = MF->getFrameInfo(); 7276 int FI = MFI->getFunctionContextIndex(); 7277 7278 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 7279 : &ARM::GPRnopcRegClass; 7280 7281 // Get a mapping of the call site numbers to all of the landing pads they're 7282 // associated with. 7283 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 7284 unsigned MaxCSNum = 0; 7285 MachineModuleInfo &MMI = MF->getMMI(); 7286 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 7287 ++BB) { 7288 if (!BB->isEHPad()) continue; 7289 7290 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 7291 // pad. 7292 for (MachineBasicBlock::iterator 7293 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 7294 if (!II->isEHLabel()) continue; 7295 7296 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 7297 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 7298 7299 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 7300 for (SmallVectorImpl<unsigned>::iterator 7301 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 7302 CSI != CSE; ++CSI) { 7303 CallSiteNumToLPad[*CSI].push_back(&*BB); 7304 MaxCSNum = std::max(MaxCSNum, *CSI); 7305 } 7306 break; 7307 } 7308 } 7309 7310 // Get an ordered list of the machine basic blocks for the jump table. 7311 std::vector<MachineBasicBlock*> LPadList; 7312 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 7313 LPadList.reserve(CallSiteNumToLPad.size()); 7314 for (unsigned I = 1; I <= MaxCSNum; ++I) { 7315 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 7316 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7317 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 7318 LPadList.push_back(*II); 7319 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 7320 } 7321 } 7322 7323 assert(!LPadList.empty() && 7324 "No landing pad destinations for the dispatch jump table!"); 7325 7326 // Create the jump table and associated information. 7327 MachineJumpTableInfo *JTI = 7328 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 7329 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 7330 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 7331 7332 // Create the MBBs for the dispatch code. 7333 7334 // Shove the dispatch's address into the return slot in the function context. 7335 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 7336 DispatchBB->setIsEHPad(); 7337 7338 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 7339 unsigned trap_opcode; 7340 if (Subtarget->isThumb()) 7341 trap_opcode = ARM::tTRAP; 7342 else 7343 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 7344 7345 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 7346 DispatchBB->addSuccessor(TrapBB); 7347 7348 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 7349 DispatchBB->addSuccessor(DispContBB); 7350 7351 // Insert and MBBs. 7352 MF->insert(MF->end(), DispatchBB); 7353 MF->insert(MF->end(), DispContBB); 7354 MF->insert(MF->end(), TrapBB); 7355 7356 // Insert code into the entry block that creates and registers the function 7357 // context. 7358 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 7359 7360 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 7361 MachinePointerInfo::getFixedStack(*MF, FI), 7362 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); 7363 7364 MachineInstrBuilder MIB; 7365 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 7366 7367 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 7368 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 7369 7370 // Add a register mask with no preserved registers. This results in all 7371 // registers being marked as clobbered. 7372 MIB.addRegMask(RI.getNoPreservedMask()); 7373 7374 unsigned NumLPads = LPadList.size(); 7375 if (Subtarget->isThumb2()) { 7376 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7377 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 7378 .addFrameIndex(FI) 7379 .addImm(4) 7380 .addMemOperand(FIMMOLd)); 7381 7382 if (NumLPads < 256) { 7383 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 7384 .addReg(NewVReg1) 7385 .addImm(LPadList.size())); 7386 } else { 7387 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7388 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 7389 .addImm(NumLPads & 0xFFFF)); 7390 7391 unsigned VReg2 = VReg1; 7392 if ((NumLPads & 0xFFFF0000) != 0) { 7393 VReg2 = MRI->createVirtualRegister(TRC); 7394 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 7395 .addReg(VReg1) 7396 .addImm(NumLPads >> 16)); 7397 } 7398 7399 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 7400 .addReg(NewVReg1) 7401 .addReg(VReg2)); 7402 } 7403 7404 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 7405 .addMBB(TrapBB) 7406 .addImm(ARMCC::HI) 7407 .addReg(ARM::CPSR); 7408 7409 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7410 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 7411 .addJumpTableIndex(MJTI)); 7412 7413 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7414 AddDefaultCC( 7415 AddDefaultPred( 7416 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 7417 .addReg(NewVReg3, RegState::Kill) 7418 .addReg(NewVReg1) 7419 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7420 7421 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 7422 .addReg(NewVReg4, RegState::Kill) 7423 .addReg(NewVReg1) 7424 .addJumpTableIndex(MJTI); 7425 } else if (Subtarget->isThumb()) { 7426 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7427 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 7428 .addFrameIndex(FI) 7429 .addImm(1) 7430 .addMemOperand(FIMMOLd)); 7431 7432 if (NumLPads < 256) { 7433 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 7434 .addReg(NewVReg1) 7435 .addImm(NumLPads)); 7436 } else { 7437 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7438 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7439 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7440 7441 // MachineConstantPool wants an explicit alignment. 7442 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7443 if (Align == 0) 7444 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7445 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7446 7447 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7448 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 7449 .addReg(VReg1, RegState::Define) 7450 .addConstantPoolIndex(Idx)); 7451 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 7452 .addReg(NewVReg1) 7453 .addReg(VReg1)); 7454 } 7455 7456 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 7457 .addMBB(TrapBB) 7458 .addImm(ARMCC::HI) 7459 .addReg(ARM::CPSR); 7460 7461 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7462 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 7463 .addReg(ARM::CPSR, RegState::Define) 7464 .addReg(NewVReg1) 7465 .addImm(2)); 7466 7467 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7468 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 7469 .addJumpTableIndex(MJTI)); 7470 7471 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7472 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 7473 .addReg(ARM::CPSR, RegState::Define) 7474 .addReg(NewVReg2, RegState::Kill) 7475 .addReg(NewVReg3)); 7476 7477 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 7478 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 7479 7480 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7481 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 7482 .addReg(NewVReg4, RegState::Kill) 7483 .addImm(0) 7484 .addMemOperand(JTMMOLd)); 7485 7486 unsigned NewVReg6 = NewVReg5; 7487 if (RelocM == Reloc::PIC_) { 7488 NewVReg6 = MRI->createVirtualRegister(TRC); 7489 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 7490 .addReg(ARM::CPSR, RegState::Define) 7491 .addReg(NewVReg5, RegState::Kill) 7492 .addReg(NewVReg3)); 7493 } 7494 7495 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 7496 .addReg(NewVReg6, RegState::Kill) 7497 .addJumpTableIndex(MJTI); 7498 } else { 7499 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7500 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 7501 .addFrameIndex(FI) 7502 .addImm(4) 7503 .addMemOperand(FIMMOLd)); 7504 7505 if (NumLPads < 256) { 7506 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 7507 .addReg(NewVReg1) 7508 .addImm(NumLPads)); 7509 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 7510 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7511 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 7512 .addImm(NumLPads & 0xFFFF)); 7513 7514 unsigned VReg2 = VReg1; 7515 if ((NumLPads & 0xFFFF0000) != 0) { 7516 VReg2 = MRI->createVirtualRegister(TRC); 7517 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 7518 .addReg(VReg1) 7519 .addImm(NumLPads >> 16)); 7520 } 7521 7522 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7523 .addReg(NewVReg1) 7524 .addReg(VReg2)); 7525 } else { 7526 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7527 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7528 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7529 7530 // MachineConstantPool wants an explicit alignment. 7531 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7532 if (Align == 0) 7533 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7534 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7535 7536 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7537 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 7538 .addReg(VReg1, RegState::Define) 7539 .addConstantPoolIndex(Idx) 7540 .addImm(0)); 7541 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7542 .addReg(NewVReg1) 7543 .addReg(VReg1, RegState::Kill)); 7544 } 7545 7546 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 7547 .addMBB(TrapBB) 7548 .addImm(ARMCC::HI) 7549 .addReg(ARM::CPSR); 7550 7551 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7552 AddDefaultCC( 7553 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 7554 .addReg(NewVReg1) 7555 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7556 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7557 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 7558 .addJumpTableIndex(MJTI)); 7559 7560 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 7561 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 7562 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7563 AddDefaultPred( 7564 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 7565 .addReg(NewVReg3, RegState::Kill) 7566 .addReg(NewVReg4) 7567 .addImm(0) 7568 .addMemOperand(JTMMOLd)); 7569 7570 if (RelocM == Reloc::PIC_) { 7571 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 7572 .addReg(NewVReg5, RegState::Kill) 7573 .addReg(NewVReg4) 7574 .addJumpTableIndex(MJTI); 7575 } else { 7576 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 7577 .addReg(NewVReg5, RegState::Kill) 7578 .addJumpTableIndex(MJTI); 7579 } 7580 } 7581 7582 // Add the jump table entries as successors to the MBB. 7583 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 7584 for (std::vector<MachineBasicBlock*>::iterator 7585 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 7586 MachineBasicBlock *CurMBB = *I; 7587 if (SeenMBBs.insert(CurMBB).second) 7588 DispContBB->addSuccessor(CurMBB); 7589 } 7590 7591 // N.B. the order the invoke BBs are processed in doesn't matter here. 7592 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 7593 SmallVector<MachineBasicBlock*, 64> MBBLPads; 7594 for (MachineBasicBlock *BB : InvokeBBs) { 7595 7596 // Remove the landing pad successor from the invoke block and replace it 7597 // with the new dispatch block. 7598 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 7599 BB->succ_end()); 7600 while (!Successors.empty()) { 7601 MachineBasicBlock *SMBB = Successors.pop_back_val(); 7602 if (SMBB->isEHPad()) { 7603 BB->removeSuccessor(SMBB); 7604 MBBLPads.push_back(SMBB); 7605 } 7606 } 7607 7608 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 7609 BB->normalizeSuccProbs(); 7610 7611 // Find the invoke call and mark all of the callee-saved registers as 7612 // 'implicit defined' so that they're spilled. This prevents code from 7613 // moving instructions to before the EH block, where they will never be 7614 // executed. 7615 for (MachineBasicBlock::reverse_iterator 7616 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 7617 if (!II->isCall()) continue; 7618 7619 DenseMap<unsigned, bool> DefRegs; 7620 for (MachineInstr::mop_iterator 7621 OI = II->operands_begin(), OE = II->operands_end(); 7622 OI != OE; ++OI) { 7623 if (!OI->isReg()) continue; 7624 DefRegs[OI->getReg()] = true; 7625 } 7626 7627 MachineInstrBuilder MIB(*MF, &*II); 7628 7629 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 7630 unsigned Reg = SavedRegs[i]; 7631 if (Subtarget->isThumb2() && 7632 !ARM::tGPRRegClass.contains(Reg) && 7633 !ARM::hGPRRegClass.contains(Reg)) 7634 continue; 7635 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 7636 continue; 7637 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 7638 continue; 7639 if (!DefRegs[Reg]) 7640 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 7641 } 7642 7643 break; 7644 } 7645 } 7646 7647 // Mark all former landing pads as non-landing pads. The dispatch is the only 7648 // landing pad now. 7649 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7650 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 7651 (*I)->setIsEHPad(false); 7652 7653 // The instruction is gone now. 7654 MI->eraseFromParent(); 7655 } 7656 7657 static 7658 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 7659 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 7660 E = MBB->succ_end(); I != E; ++I) 7661 if (*I != Succ) 7662 return *I; 7663 llvm_unreachable("Expecting a BB with two successors!"); 7664 } 7665 7666 /// Return the load opcode for a given load size. If load size >= 8, 7667 /// neon opcode will be returned. 7668 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 7669 if (LdSize >= 8) 7670 return LdSize == 16 ? ARM::VLD1q32wb_fixed 7671 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 7672 if (IsThumb1) 7673 return LdSize == 4 ? ARM::tLDRi 7674 : LdSize == 2 ? ARM::tLDRHi 7675 : LdSize == 1 ? ARM::tLDRBi : 0; 7676 if (IsThumb2) 7677 return LdSize == 4 ? ARM::t2LDR_POST 7678 : LdSize == 2 ? ARM::t2LDRH_POST 7679 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 7680 return LdSize == 4 ? ARM::LDR_POST_IMM 7681 : LdSize == 2 ? ARM::LDRH_POST 7682 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 7683 } 7684 7685 /// Return the store opcode for a given store size. If store size >= 8, 7686 /// neon opcode will be returned. 7687 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 7688 if (StSize >= 8) 7689 return StSize == 16 ? ARM::VST1q32wb_fixed 7690 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 7691 if (IsThumb1) 7692 return StSize == 4 ? ARM::tSTRi 7693 : StSize == 2 ? ARM::tSTRHi 7694 : StSize == 1 ? ARM::tSTRBi : 0; 7695 if (IsThumb2) 7696 return StSize == 4 ? ARM::t2STR_POST 7697 : StSize == 2 ? ARM::t2STRH_POST 7698 : StSize == 1 ? ARM::t2STRB_POST : 0; 7699 return StSize == 4 ? ARM::STR_POST_IMM 7700 : StSize == 2 ? ARM::STRH_POST 7701 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 7702 } 7703 7704 /// Emit a post-increment load operation with given size. The instructions 7705 /// will be added to BB at Pos. 7706 static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos, 7707 const TargetInstrInfo *TII, DebugLoc dl, 7708 unsigned LdSize, unsigned Data, unsigned AddrIn, 7709 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 7710 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 7711 assert(LdOpc != 0 && "Should have a load opcode"); 7712 if (LdSize >= 8) { 7713 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7714 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7715 .addImm(0)); 7716 } else if (IsThumb1) { 7717 // load + update AddrIn 7718 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7719 .addReg(AddrIn).addImm(0)); 7720 MachineInstrBuilder MIB = 7721 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 7722 MIB = AddDefaultT1CC(MIB); 7723 MIB.addReg(AddrIn).addImm(LdSize); 7724 AddDefaultPred(MIB); 7725 } else if (IsThumb2) { 7726 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7727 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7728 .addImm(LdSize)); 7729 } else { // arm 7730 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7731 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7732 .addReg(0).addImm(LdSize)); 7733 } 7734 } 7735 7736 /// Emit a post-increment store operation with given size. The instructions 7737 /// will be added to BB at Pos. 7738 static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos, 7739 const TargetInstrInfo *TII, DebugLoc dl, 7740 unsigned StSize, unsigned Data, unsigned AddrIn, 7741 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 7742 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 7743 assert(StOpc != 0 && "Should have a store opcode"); 7744 if (StSize >= 8) { 7745 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7746 .addReg(AddrIn).addImm(0).addReg(Data)); 7747 } else if (IsThumb1) { 7748 // store + update AddrIn 7749 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data) 7750 .addReg(AddrIn).addImm(0)); 7751 MachineInstrBuilder MIB = 7752 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 7753 MIB = AddDefaultT1CC(MIB); 7754 MIB.addReg(AddrIn).addImm(StSize); 7755 AddDefaultPred(MIB); 7756 } else if (IsThumb2) { 7757 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7758 .addReg(Data).addReg(AddrIn).addImm(StSize)); 7759 } else { // arm 7760 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7761 .addReg(Data).addReg(AddrIn).addReg(0) 7762 .addImm(StSize)); 7763 } 7764 } 7765 7766 MachineBasicBlock * 7767 ARMTargetLowering::EmitStructByval(MachineInstr *MI, 7768 MachineBasicBlock *BB) const { 7769 // This pseudo instruction has 3 operands: dst, src, size 7770 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 7771 // Otherwise, we will generate unrolled scalar copies. 7772 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7773 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7774 MachineFunction::iterator It = ++BB->getIterator(); 7775 7776 unsigned dest = MI->getOperand(0).getReg(); 7777 unsigned src = MI->getOperand(1).getReg(); 7778 unsigned SizeVal = MI->getOperand(2).getImm(); 7779 unsigned Align = MI->getOperand(3).getImm(); 7780 DebugLoc dl = MI->getDebugLoc(); 7781 7782 MachineFunction *MF = BB->getParent(); 7783 MachineRegisterInfo &MRI = MF->getRegInfo(); 7784 unsigned UnitSize = 0; 7785 const TargetRegisterClass *TRC = nullptr; 7786 const TargetRegisterClass *VecTRC = nullptr; 7787 7788 bool IsThumb1 = Subtarget->isThumb1Only(); 7789 bool IsThumb2 = Subtarget->isThumb2(); 7790 7791 if (Align & 1) { 7792 UnitSize = 1; 7793 } else if (Align & 2) { 7794 UnitSize = 2; 7795 } else { 7796 // Check whether we can use NEON instructions. 7797 if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) && 7798 Subtarget->hasNEON()) { 7799 if ((Align % 16 == 0) && SizeVal >= 16) 7800 UnitSize = 16; 7801 else if ((Align % 8 == 0) && SizeVal >= 8) 7802 UnitSize = 8; 7803 } 7804 // Can't use NEON instructions. 7805 if (UnitSize == 0) 7806 UnitSize = 4; 7807 } 7808 7809 // Select the correct opcode and register class for unit size load/store 7810 bool IsNeon = UnitSize >= 8; 7811 TRC = (IsThumb1 || IsThumb2) ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 7812 if (IsNeon) 7813 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 7814 : UnitSize == 8 ? &ARM::DPRRegClass 7815 : nullptr; 7816 7817 unsigned BytesLeft = SizeVal % UnitSize; 7818 unsigned LoopSize = SizeVal - BytesLeft; 7819 7820 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 7821 // Use LDR and STR to copy. 7822 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 7823 // [destOut] = STR_POST(scratch, destIn, UnitSize) 7824 unsigned srcIn = src; 7825 unsigned destIn = dest; 7826 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 7827 unsigned srcOut = MRI.createVirtualRegister(TRC); 7828 unsigned destOut = MRI.createVirtualRegister(TRC); 7829 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 7830 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 7831 IsThumb1, IsThumb2); 7832 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 7833 IsThumb1, IsThumb2); 7834 srcIn = srcOut; 7835 destIn = destOut; 7836 } 7837 7838 // Handle the leftover bytes with LDRB and STRB. 7839 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 7840 // [destOut] = STRB_POST(scratch, destIn, 1) 7841 for (unsigned i = 0; i < BytesLeft; i++) { 7842 unsigned srcOut = MRI.createVirtualRegister(TRC); 7843 unsigned destOut = MRI.createVirtualRegister(TRC); 7844 unsigned scratch = MRI.createVirtualRegister(TRC); 7845 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 7846 IsThumb1, IsThumb2); 7847 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 7848 IsThumb1, IsThumb2); 7849 srcIn = srcOut; 7850 destIn = destOut; 7851 } 7852 MI->eraseFromParent(); // The instruction is gone now. 7853 return BB; 7854 } 7855 7856 // Expand the pseudo op to a loop. 7857 // thisMBB: 7858 // ... 7859 // movw varEnd, # --> with thumb2 7860 // movt varEnd, # 7861 // ldrcp varEnd, idx --> without thumb2 7862 // fallthrough --> loopMBB 7863 // loopMBB: 7864 // PHI varPhi, varEnd, varLoop 7865 // PHI srcPhi, src, srcLoop 7866 // PHI destPhi, dst, destLoop 7867 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7868 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 7869 // subs varLoop, varPhi, #UnitSize 7870 // bne loopMBB 7871 // fallthrough --> exitMBB 7872 // exitMBB: 7873 // epilogue to handle left-over bytes 7874 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7875 // [destOut] = STRB_POST(scratch, destLoop, 1) 7876 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7877 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7878 MF->insert(It, loopMBB); 7879 MF->insert(It, exitMBB); 7880 7881 // Transfer the remainder of BB and its successor edges to exitMBB. 7882 exitMBB->splice(exitMBB->begin(), BB, 7883 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7884 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7885 7886 // Load an immediate to varEnd. 7887 unsigned varEnd = MRI.createVirtualRegister(TRC); 7888 if (Subtarget->useMovt(*MF)) { 7889 unsigned Vtmp = varEnd; 7890 if ((LoopSize & 0xFFFF0000) != 0) 7891 Vtmp = MRI.createVirtualRegister(TRC); 7892 AddDefaultPred(BuildMI(BB, dl, 7893 TII->get(IsThumb2 ? ARM::t2MOVi16 : ARM::MOVi16), 7894 Vtmp).addImm(LoopSize & 0xFFFF)); 7895 7896 if ((LoopSize & 0xFFFF0000) != 0) 7897 AddDefaultPred(BuildMI(BB, dl, 7898 TII->get(IsThumb2 ? ARM::t2MOVTi16 : ARM::MOVTi16), 7899 varEnd) 7900 .addReg(Vtmp) 7901 .addImm(LoopSize >> 16)); 7902 } else { 7903 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7904 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7905 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 7906 7907 // MachineConstantPool wants an explicit alignment. 7908 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7909 if (Align == 0) 7910 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7911 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7912 7913 if (IsThumb1) 7914 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg( 7915 varEnd, RegState::Define).addConstantPoolIndex(Idx)); 7916 else 7917 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg( 7918 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0)); 7919 } 7920 BB->addSuccessor(loopMBB); 7921 7922 // Generate the loop body: 7923 // varPhi = PHI(varLoop, varEnd) 7924 // srcPhi = PHI(srcLoop, src) 7925 // destPhi = PHI(destLoop, dst) 7926 MachineBasicBlock *entryBB = BB; 7927 BB = loopMBB; 7928 unsigned varLoop = MRI.createVirtualRegister(TRC); 7929 unsigned varPhi = MRI.createVirtualRegister(TRC); 7930 unsigned srcLoop = MRI.createVirtualRegister(TRC); 7931 unsigned srcPhi = MRI.createVirtualRegister(TRC); 7932 unsigned destLoop = MRI.createVirtualRegister(TRC); 7933 unsigned destPhi = MRI.createVirtualRegister(TRC); 7934 7935 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 7936 .addReg(varLoop).addMBB(loopMBB) 7937 .addReg(varEnd).addMBB(entryBB); 7938 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 7939 .addReg(srcLoop).addMBB(loopMBB) 7940 .addReg(src).addMBB(entryBB); 7941 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 7942 .addReg(destLoop).addMBB(loopMBB) 7943 .addReg(dest).addMBB(entryBB); 7944 7945 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7946 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 7947 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 7948 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 7949 IsThumb1, IsThumb2); 7950 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 7951 IsThumb1, IsThumb2); 7952 7953 // Decrement loop variable by UnitSize. 7954 if (IsThumb1) { 7955 MachineInstrBuilder MIB = 7956 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop); 7957 MIB = AddDefaultT1CC(MIB); 7958 MIB.addReg(varPhi).addImm(UnitSize); 7959 AddDefaultPred(MIB); 7960 } else { 7961 MachineInstrBuilder MIB = 7962 BuildMI(*BB, BB->end(), dl, 7963 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 7964 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 7965 MIB->getOperand(5).setReg(ARM::CPSR); 7966 MIB->getOperand(5).setIsDef(true); 7967 } 7968 BuildMI(*BB, BB->end(), dl, 7969 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7970 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 7971 7972 // loopMBB can loop back to loopMBB or fall through to exitMBB. 7973 BB->addSuccessor(loopMBB); 7974 BB->addSuccessor(exitMBB); 7975 7976 // Add epilogue to handle BytesLeft. 7977 BB = exitMBB; 7978 MachineInstr *StartOfExit = exitMBB->begin(); 7979 7980 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7981 // [destOut] = STRB_POST(scratch, destLoop, 1) 7982 unsigned srcIn = srcLoop; 7983 unsigned destIn = destLoop; 7984 for (unsigned i = 0; i < BytesLeft; i++) { 7985 unsigned srcOut = MRI.createVirtualRegister(TRC); 7986 unsigned destOut = MRI.createVirtualRegister(TRC); 7987 unsigned scratch = MRI.createVirtualRegister(TRC); 7988 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 7989 IsThumb1, IsThumb2); 7990 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 7991 IsThumb1, IsThumb2); 7992 srcIn = srcOut; 7993 destIn = destOut; 7994 } 7995 7996 MI->eraseFromParent(); // The instruction is gone now. 7997 return BB; 7998 } 7999 8000 MachineBasicBlock * 8001 ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI, 8002 MachineBasicBlock *MBB) const { 8003 const TargetMachine &TM = getTargetMachine(); 8004 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 8005 DebugLoc DL = MI->getDebugLoc(); 8006 8007 assert(Subtarget->isTargetWindows() && 8008 "__chkstk is only supported on Windows"); 8009 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 8010 8011 // __chkstk takes the number of words to allocate on the stack in R4, and 8012 // returns the stack adjustment in number of bytes in R4. This will not 8013 // clober any other registers (other than the obvious lr). 8014 // 8015 // Although, technically, IP should be considered a register which may be 8016 // clobbered, the call itself will not touch it. Windows on ARM is a pure 8017 // thumb-2 environment, so there is no interworking required. As a result, we 8018 // do not expect a veneer to be emitted by the linker, clobbering IP. 8019 // 8020 // Each module receives its own copy of __chkstk, so no import thunk is 8021 // required, again, ensuring that IP is not clobbered. 8022 // 8023 // Finally, although some linkers may theoretically provide a trampoline for 8024 // out of range calls (which is quite common due to a 32M range limitation of 8025 // branches for Thumb), we can generate the long-call version via 8026 // -mcmodel=large, alleviating the need for the trampoline which may clobber 8027 // IP. 8028 8029 switch (TM.getCodeModel()) { 8030 case CodeModel::Small: 8031 case CodeModel::Medium: 8032 case CodeModel::Default: 8033 case CodeModel::Kernel: 8034 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 8035 .addImm((unsigned)ARMCC::AL).addReg(0) 8036 .addExternalSymbol("__chkstk") 8037 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8038 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8039 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8040 break; 8041 case CodeModel::Large: 8042 case CodeModel::JITDefault: { 8043 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 8044 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 8045 8046 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 8047 .addExternalSymbol("__chkstk"); 8048 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 8049 .addImm((unsigned)ARMCC::AL).addReg(0) 8050 .addReg(Reg, RegState::Kill) 8051 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8052 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8053 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8054 break; 8055 } 8056 } 8057 8058 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), 8059 ARM::SP) 8060 .addReg(ARM::SP, RegState::Kill) 8061 .addReg(ARM::R4, RegState::Kill) 8062 .setMIFlags(MachineInstr::FrameSetup))); 8063 8064 MI->eraseFromParent(); 8065 return MBB; 8066 } 8067 8068 MachineBasicBlock * 8069 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr *MI, 8070 MachineBasicBlock *MBB) const { 8071 DebugLoc DL = MI->getDebugLoc(); 8072 MachineFunction *MF = MBB->getParent(); 8073 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8074 8075 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 8076 MF->insert(++MBB->getIterator(), ContBB); 8077 ContBB->splice(ContBB->begin(), MBB, 8078 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8079 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 8080 8081 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 8082 MF->push_back(TrapBB); 8083 BuildMI(TrapBB, DL, TII->get(ARM::t2UDF)).addImm(249); 8084 MBB->addSuccessor(TrapBB); 8085 8086 BuildMI(*MBB, MI, DL, TII->get(ARM::tCBZ)) 8087 .addReg(MI->getOperand(0).getReg()) 8088 .addMBB(TrapBB); 8089 AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::t2B)).addMBB(ContBB)); 8090 MBB->addSuccessor(ContBB); 8091 8092 MI->eraseFromParent(); 8093 return ContBB; 8094 } 8095 8096 MachineBasicBlock * 8097 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8098 MachineBasicBlock *BB) const { 8099 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8100 DebugLoc dl = MI->getDebugLoc(); 8101 bool isThumb2 = Subtarget->isThumb2(); 8102 switch (MI->getOpcode()) { 8103 default: { 8104 MI->dump(); 8105 llvm_unreachable("Unexpected instr type to insert"); 8106 } 8107 // The Thumb2 pre-indexed stores have the same MI operands, they just 8108 // define them differently in the .td files from the isel patterns, so 8109 // they need pseudos. 8110 case ARM::t2STR_preidx: 8111 MI->setDesc(TII->get(ARM::t2STR_PRE)); 8112 return BB; 8113 case ARM::t2STRB_preidx: 8114 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 8115 return BB; 8116 case ARM::t2STRH_preidx: 8117 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 8118 return BB; 8119 8120 case ARM::STRi_preidx: 8121 case ARM::STRBi_preidx: { 8122 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 8123 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 8124 // Decode the offset. 8125 unsigned Offset = MI->getOperand(4).getImm(); 8126 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 8127 Offset = ARM_AM::getAM2Offset(Offset); 8128 if (isSub) 8129 Offset = -Offset; 8130 8131 MachineMemOperand *MMO = *MI->memoperands_begin(); 8132 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 8133 .addOperand(MI->getOperand(0)) // Rn_wb 8134 .addOperand(MI->getOperand(1)) // Rt 8135 .addOperand(MI->getOperand(2)) // Rn 8136 .addImm(Offset) // offset (skip GPR==zero_reg) 8137 .addOperand(MI->getOperand(5)) // pred 8138 .addOperand(MI->getOperand(6)) 8139 .addMemOperand(MMO); 8140 MI->eraseFromParent(); 8141 return BB; 8142 } 8143 case ARM::STRr_preidx: 8144 case ARM::STRBr_preidx: 8145 case ARM::STRH_preidx: { 8146 unsigned NewOpc; 8147 switch (MI->getOpcode()) { 8148 default: llvm_unreachable("unexpected opcode!"); 8149 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 8150 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 8151 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 8152 } 8153 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 8154 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 8155 MIB.addOperand(MI->getOperand(i)); 8156 MI->eraseFromParent(); 8157 return BB; 8158 } 8159 8160 case ARM::tMOVCCr_pseudo: { 8161 // To "insert" a SELECT_CC instruction, we actually have to insert the 8162 // diamond control-flow pattern. The incoming instruction knows the 8163 // destination vreg to set, the condition code register to branch on, the 8164 // true/false values to select between, and a branch opcode to use. 8165 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8166 MachineFunction::iterator It = ++BB->getIterator(); 8167 8168 // thisMBB: 8169 // ... 8170 // TrueVal = ... 8171 // cmpTY ccX, r1, r2 8172 // bCC copy1MBB 8173 // fallthrough --> copy0MBB 8174 MachineBasicBlock *thisMBB = BB; 8175 MachineFunction *F = BB->getParent(); 8176 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8177 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8178 F->insert(It, copy0MBB); 8179 F->insert(It, sinkMBB); 8180 8181 // Transfer the remainder of BB and its successor edges to sinkMBB. 8182 sinkMBB->splice(sinkMBB->begin(), BB, 8183 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8184 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8185 8186 BB->addSuccessor(copy0MBB); 8187 BB->addSuccessor(sinkMBB); 8188 8189 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 8190 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 8191 8192 // copy0MBB: 8193 // %FalseValue = ... 8194 // # fallthrough to sinkMBB 8195 BB = copy0MBB; 8196 8197 // Update machine-CFG edges 8198 BB->addSuccessor(sinkMBB); 8199 8200 // sinkMBB: 8201 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8202 // ... 8203 BB = sinkMBB; 8204 BuildMI(*BB, BB->begin(), dl, 8205 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 8206 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 8207 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8208 8209 MI->eraseFromParent(); // The pseudo instruction is gone now. 8210 return BB; 8211 } 8212 8213 case ARM::BCCi64: 8214 case ARM::BCCZi64: { 8215 // If there is an unconditional branch to the other successor, remove it. 8216 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8217 8218 // Compare both parts that make up the double comparison separately for 8219 // equality. 8220 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 8221 8222 unsigned LHS1 = MI->getOperand(1).getReg(); 8223 unsigned LHS2 = MI->getOperand(2).getReg(); 8224 if (RHSisZero) { 8225 AddDefaultPred(BuildMI(BB, dl, 8226 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8227 .addReg(LHS1).addImm(0)); 8228 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8229 .addReg(LHS2).addImm(0) 8230 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8231 } else { 8232 unsigned RHS1 = MI->getOperand(3).getReg(); 8233 unsigned RHS2 = MI->getOperand(4).getReg(); 8234 AddDefaultPred(BuildMI(BB, dl, 8235 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8236 .addReg(LHS1).addReg(RHS1)); 8237 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8238 .addReg(LHS2).addReg(RHS2) 8239 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8240 } 8241 8242 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 8243 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 8244 if (MI->getOperand(0).getImm() == ARMCC::NE) 8245 std::swap(destMBB, exitMBB); 8246 8247 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 8248 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 8249 if (isThumb2) 8250 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 8251 else 8252 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 8253 8254 MI->eraseFromParent(); // The pseudo instruction is gone now. 8255 return BB; 8256 } 8257 8258 case ARM::Int_eh_sjlj_setjmp: 8259 case ARM::Int_eh_sjlj_setjmp_nofp: 8260 case ARM::tInt_eh_sjlj_setjmp: 8261 case ARM::t2Int_eh_sjlj_setjmp: 8262 case ARM::t2Int_eh_sjlj_setjmp_nofp: 8263 return BB; 8264 8265 case ARM::Int_eh_sjlj_setup_dispatch: 8266 EmitSjLjDispatchBlock(MI, BB); 8267 return BB; 8268 8269 case ARM::ABS: 8270 case ARM::t2ABS: { 8271 // To insert an ABS instruction, we have to insert the 8272 // diamond control-flow pattern. The incoming instruction knows the 8273 // source vreg to test against 0, the destination vreg to set, 8274 // the condition code register to branch on, the 8275 // true/false values to select between, and a branch opcode to use. 8276 // It transforms 8277 // V1 = ABS V0 8278 // into 8279 // V2 = MOVS V0 8280 // BCC (branch to SinkBB if V0 >= 0) 8281 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 8282 // SinkBB: V1 = PHI(V2, V3) 8283 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8284 MachineFunction::iterator BBI = ++BB->getIterator(); 8285 MachineFunction *Fn = BB->getParent(); 8286 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8287 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8288 Fn->insert(BBI, RSBBB); 8289 Fn->insert(BBI, SinkBB); 8290 8291 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 8292 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 8293 bool ABSSrcKIll = MI->getOperand(1).isKill(); 8294 bool isThumb2 = Subtarget->isThumb2(); 8295 MachineRegisterInfo &MRI = Fn->getRegInfo(); 8296 // In Thumb mode S must not be specified if source register is the SP or 8297 // PC and if destination register is the SP, so restrict register class 8298 unsigned NewRsbDstReg = 8299 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 8300 8301 // Transfer the remainder of BB and its successor edges to sinkMBB. 8302 SinkBB->splice(SinkBB->begin(), BB, 8303 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8304 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 8305 8306 BB->addSuccessor(RSBBB); 8307 BB->addSuccessor(SinkBB); 8308 8309 // fall through to SinkMBB 8310 RSBBB->addSuccessor(SinkBB); 8311 8312 // insert a cmp at the end of BB 8313 AddDefaultPred(BuildMI(BB, dl, 8314 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8315 .addReg(ABSSrcReg).addImm(0)); 8316 8317 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 8318 BuildMI(BB, dl, 8319 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 8320 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 8321 8322 // insert rsbri in RSBBB 8323 // Note: BCC and rsbri will be converted into predicated rsbmi 8324 // by if-conversion pass 8325 BuildMI(*RSBBB, RSBBB->begin(), dl, 8326 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 8327 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 8328 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 8329 8330 // insert PHI in SinkBB, 8331 // reuse ABSDstReg to not change uses of ABS instruction 8332 BuildMI(*SinkBB, SinkBB->begin(), dl, 8333 TII->get(ARM::PHI), ABSDstReg) 8334 .addReg(NewRsbDstReg).addMBB(RSBBB) 8335 .addReg(ABSSrcReg).addMBB(BB); 8336 8337 // remove ABS instruction 8338 MI->eraseFromParent(); 8339 8340 // return last added BB 8341 return SinkBB; 8342 } 8343 case ARM::COPY_STRUCT_BYVAL_I32: 8344 ++NumLoopByVals; 8345 return EmitStructByval(MI, BB); 8346 case ARM::WIN__CHKSTK: 8347 return EmitLowered__chkstk(MI, BB); 8348 case ARM::WIN__DBZCHK: 8349 return EmitLowered__dbzchk(MI, BB); 8350 } 8351 } 8352 8353 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers 8354 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 8355 /// instead of as a custom inserter because we need the use list from the SDNode. 8356 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 8357 MachineInstr *MI, const SDNode *Node) { 8358 bool isThumb1 = Subtarget->isThumb1Only(); 8359 8360 DebugLoc DL = MI->getDebugLoc(); 8361 MachineFunction *MF = MI->getParent()->getParent(); 8362 MachineRegisterInfo &MRI = MF->getRegInfo(); 8363 MachineInstrBuilder MIB(*MF, MI); 8364 8365 // If the new dst/src is unused mark it as dead. 8366 if (!Node->hasAnyUseOfValue(0)) { 8367 MI->getOperand(0).setIsDead(true); 8368 } 8369 if (!Node->hasAnyUseOfValue(1)) { 8370 MI->getOperand(1).setIsDead(true); 8371 } 8372 8373 // The MEMCPY both defines and kills the scratch registers. 8374 for (unsigned I = 0; I != MI->getOperand(4).getImm(); ++I) { 8375 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 8376 : &ARM::GPRRegClass); 8377 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 8378 } 8379 } 8380 8381 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 8382 SDNode *Node) const { 8383 if (MI->getOpcode() == ARM::MEMCPY) { 8384 attachMEMCPYScratchRegs(Subtarget, MI, Node); 8385 return; 8386 } 8387 8388 const MCInstrDesc *MCID = &MI->getDesc(); 8389 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 8390 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 8391 // operand is still set to noreg. If needed, set the optional operand's 8392 // register to CPSR, and remove the redundant implicit def. 8393 // 8394 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 8395 8396 // Rename pseudo opcodes. 8397 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 8398 if (NewOpc) { 8399 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 8400 MCID = &TII->get(NewOpc); 8401 8402 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 8403 "converted opcode should be the same except for cc_out"); 8404 8405 MI->setDesc(*MCID); 8406 8407 // Add the optional cc_out operand 8408 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 8409 } 8410 unsigned ccOutIdx = MCID->getNumOperands() - 1; 8411 8412 // Any ARM instruction that sets the 's' bit should specify an optional 8413 // "cc_out" operand in the last operand position. 8414 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 8415 assert(!NewOpc && "Optional cc_out operand required"); 8416 return; 8417 } 8418 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 8419 // since we already have an optional CPSR def. 8420 bool definesCPSR = false; 8421 bool deadCPSR = false; 8422 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 8423 i != e; ++i) { 8424 const MachineOperand &MO = MI->getOperand(i); 8425 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 8426 definesCPSR = true; 8427 if (MO.isDead()) 8428 deadCPSR = true; 8429 MI->RemoveOperand(i); 8430 break; 8431 } 8432 } 8433 if (!definesCPSR) { 8434 assert(!NewOpc && "Optional cc_out operand required"); 8435 return; 8436 } 8437 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 8438 if (deadCPSR) { 8439 assert(!MI->getOperand(ccOutIdx).getReg() && 8440 "expect uninitialized optional cc_out operand"); 8441 return; 8442 } 8443 8444 // If this instruction was defined with an optional CPSR def and its dag node 8445 // had a live implicit CPSR def, then activate the optional CPSR def. 8446 MachineOperand &MO = MI->getOperand(ccOutIdx); 8447 MO.setReg(ARM::CPSR); 8448 MO.setIsDef(true); 8449 } 8450 8451 //===----------------------------------------------------------------------===// 8452 // ARM Optimization Hooks 8453 //===----------------------------------------------------------------------===// 8454 8455 // Helper function that checks if N is a null or all ones constant. 8456 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 8457 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 8458 } 8459 8460 // Return true if N is conditionally 0 or all ones. 8461 // Detects these expressions where cc is an i1 value: 8462 // 8463 // (select cc 0, y) [AllOnes=0] 8464 // (select cc y, 0) [AllOnes=0] 8465 // (zext cc) [AllOnes=0] 8466 // (sext cc) [AllOnes=0/1] 8467 // (select cc -1, y) [AllOnes=1] 8468 // (select cc y, -1) [AllOnes=1] 8469 // 8470 // Invert is set when N is the null/all ones constant when CC is false. 8471 // OtherOp is set to the alternative value of N. 8472 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 8473 SDValue &CC, bool &Invert, 8474 SDValue &OtherOp, 8475 SelectionDAG &DAG) { 8476 switch (N->getOpcode()) { 8477 default: return false; 8478 case ISD::SELECT: { 8479 CC = N->getOperand(0); 8480 SDValue N1 = N->getOperand(1); 8481 SDValue N2 = N->getOperand(2); 8482 if (isZeroOrAllOnes(N1, AllOnes)) { 8483 Invert = false; 8484 OtherOp = N2; 8485 return true; 8486 } 8487 if (isZeroOrAllOnes(N2, AllOnes)) { 8488 Invert = true; 8489 OtherOp = N1; 8490 return true; 8491 } 8492 return false; 8493 } 8494 case ISD::ZERO_EXTEND: 8495 // (zext cc) can never be the all ones value. 8496 if (AllOnes) 8497 return false; 8498 // Fall through. 8499 case ISD::SIGN_EXTEND: { 8500 SDLoc dl(N); 8501 EVT VT = N->getValueType(0); 8502 CC = N->getOperand(0); 8503 if (CC.getValueType() != MVT::i1) 8504 return false; 8505 Invert = !AllOnes; 8506 if (AllOnes) 8507 // When looking for an AllOnes constant, N is an sext, and the 'other' 8508 // value is 0. 8509 OtherOp = DAG.getConstant(0, dl, VT); 8510 else if (N->getOpcode() == ISD::ZERO_EXTEND) 8511 // When looking for a 0 constant, N can be zext or sext. 8512 OtherOp = DAG.getConstant(1, dl, VT); 8513 else 8514 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 8515 VT); 8516 return true; 8517 } 8518 } 8519 } 8520 8521 // Combine a constant select operand into its use: 8522 // 8523 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8524 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8525 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 8526 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 8527 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8528 // 8529 // The transform is rejected if the select doesn't have a constant operand that 8530 // is null, or all ones when AllOnes is set. 8531 // 8532 // Also recognize sext/zext from i1: 8533 // 8534 // (add (zext cc), x) -> (select cc (add x, 1), x) 8535 // (add (sext cc), x) -> (select cc (add x, -1), x) 8536 // 8537 // These transformations eventually create predicated instructions. 8538 // 8539 // @param N The node to transform. 8540 // @param Slct The N operand that is a select. 8541 // @param OtherOp The other N operand (x above). 8542 // @param DCI Context. 8543 // @param AllOnes Require the select constant to be all ones instead of null. 8544 // @returns The new node, or SDValue() on failure. 8545 static 8546 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 8547 TargetLowering::DAGCombinerInfo &DCI, 8548 bool AllOnes = false) { 8549 SelectionDAG &DAG = DCI.DAG; 8550 EVT VT = N->getValueType(0); 8551 SDValue NonConstantVal; 8552 SDValue CCOp; 8553 bool SwapSelectOps; 8554 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 8555 NonConstantVal, DAG)) 8556 return SDValue(); 8557 8558 // Slct is now know to be the desired identity constant when CC is true. 8559 SDValue TrueVal = OtherOp; 8560 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 8561 OtherOp, NonConstantVal); 8562 // Unless SwapSelectOps says CC should be false. 8563 if (SwapSelectOps) 8564 std::swap(TrueVal, FalseVal); 8565 8566 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 8567 CCOp, TrueVal, FalseVal); 8568 } 8569 8570 // Attempt combineSelectAndUse on each operand of a commutative operator N. 8571 static 8572 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 8573 TargetLowering::DAGCombinerInfo &DCI) { 8574 SDValue N0 = N->getOperand(0); 8575 SDValue N1 = N->getOperand(1); 8576 if (N0.getNode()->hasOneUse()) 8577 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 8578 return Result; 8579 if (N1.getNode()->hasOneUse()) 8580 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 8581 return Result; 8582 return SDValue(); 8583 } 8584 8585 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 8586 // (only after legalization). 8587 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 8588 TargetLowering::DAGCombinerInfo &DCI, 8589 const ARMSubtarget *Subtarget) { 8590 8591 // Only perform optimization if after legalize, and if NEON is available. We 8592 // also expected both operands to be BUILD_VECTORs. 8593 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 8594 || N0.getOpcode() != ISD::BUILD_VECTOR 8595 || N1.getOpcode() != ISD::BUILD_VECTOR) 8596 return SDValue(); 8597 8598 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 8599 EVT VT = N->getValueType(0); 8600 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 8601 return SDValue(); 8602 8603 // Check that the vector operands are of the right form. 8604 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 8605 // operands, where N is the size of the formed vector. 8606 // Each EXTRACT_VECTOR should have the same input vector and odd or even 8607 // index such that we have a pair wise add pattern. 8608 8609 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 8610 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8611 return SDValue(); 8612 SDValue Vec = N0->getOperand(0)->getOperand(0); 8613 SDNode *V = Vec.getNode(); 8614 unsigned nextIndex = 0; 8615 8616 // For each operands to the ADD which are BUILD_VECTORs, 8617 // check to see if each of their operands are an EXTRACT_VECTOR with 8618 // the same vector and appropriate index. 8619 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 8620 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 8621 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 8622 8623 SDValue ExtVec0 = N0->getOperand(i); 8624 SDValue ExtVec1 = N1->getOperand(i); 8625 8626 // First operand is the vector, verify its the same. 8627 if (V != ExtVec0->getOperand(0).getNode() || 8628 V != ExtVec1->getOperand(0).getNode()) 8629 return SDValue(); 8630 8631 // Second is the constant, verify its correct. 8632 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 8633 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 8634 8635 // For the constant, we want to see all the even or all the odd. 8636 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 8637 || C1->getZExtValue() != nextIndex+1) 8638 return SDValue(); 8639 8640 // Increment index. 8641 nextIndex+=2; 8642 } else 8643 return SDValue(); 8644 } 8645 8646 // Create VPADDL node. 8647 SelectionDAG &DAG = DCI.DAG; 8648 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8649 8650 SDLoc dl(N); 8651 8652 // Build operand list. 8653 SmallVector<SDValue, 8> Ops; 8654 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 8655 TLI.getPointerTy(DAG.getDataLayout()))); 8656 8657 // Input is the vector. 8658 Ops.push_back(Vec); 8659 8660 // Get widened type and narrowed type. 8661 MVT widenType; 8662 unsigned numElem = VT.getVectorNumElements(); 8663 8664 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 8665 switch (inputLaneType.getSimpleVT().SimpleTy) { 8666 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 8667 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 8668 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 8669 default: 8670 llvm_unreachable("Invalid vector element type for padd optimization."); 8671 } 8672 8673 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 8674 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 8675 return DAG.getNode(ExtOp, dl, VT, tmp); 8676 } 8677 8678 static SDValue findMUL_LOHI(SDValue V) { 8679 if (V->getOpcode() == ISD::UMUL_LOHI || 8680 V->getOpcode() == ISD::SMUL_LOHI) 8681 return V; 8682 return SDValue(); 8683 } 8684 8685 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 8686 TargetLowering::DAGCombinerInfo &DCI, 8687 const ARMSubtarget *Subtarget) { 8688 8689 if (Subtarget->isThumb1Only()) return SDValue(); 8690 8691 // Only perform the checks after legalize when the pattern is available. 8692 if (DCI.isBeforeLegalize()) return SDValue(); 8693 8694 // Look for multiply add opportunities. 8695 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 8696 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 8697 // a glue link from the first add to the second add. 8698 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 8699 // a S/UMLAL instruction. 8700 // UMUL_LOHI 8701 // / :lo \ :hi 8702 // / \ [no multiline comment] 8703 // loAdd -> ADDE | 8704 // \ :glue / 8705 // \ / 8706 // ADDC <- hiAdd 8707 // 8708 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 8709 SDValue AddcOp0 = AddcNode->getOperand(0); 8710 SDValue AddcOp1 = AddcNode->getOperand(1); 8711 8712 // Check if the two operands are from the same mul_lohi node. 8713 if (AddcOp0.getNode() == AddcOp1.getNode()) 8714 return SDValue(); 8715 8716 assert(AddcNode->getNumValues() == 2 && 8717 AddcNode->getValueType(0) == MVT::i32 && 8718 "Expect ADDC with two result values. First: i32"); 8719 8720 // Check that we have a glued ADDC node. 8721 if (AddcNode->getValueType(1) != MVT::Glue) 8722 return SDValue(); 8723 8724 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 8725 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 8726 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 8727 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 8728 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 8729 return SDValue(); 8730 8731 // Look for the glued ADDE. 8732 SDNode* AddeNode = AddcNode->getGluedUser(); 8733 if (!AddeNode) 8734 return SDValue(); 8735 8736 // Make sure it is really an ADDE. 8737 if (AddeNode->getOpcode() != ISD::ADDE) 8738 return SDValue(); 8739 8740 assert(AddeNode->getNumOperands() == 3 && 8741 AddeNode->getOperand(2).getValueType() == MVT::Glue && 8742 "ADDE node has the wrong inputs"); 8743 8744 // Check for the triangle shape. 8745 SDValue AddeOp0 = AddeNode->getOperand(0); 8746 SDValue AddeOp1 = AddeNode->getOperand(1); 8747 8748 // Make sure that the ADDE operands are not coming from the same node. 8749 if (AddeOp0.getNode() == AddeOp1.getNode()) 8750 return SDValue(); 8751 8752 // Find the MUL_LOHI node walking up ADDE's operands. 8753 bool IsLeftOperandMUL = false; 8754 SDValue MULOp = findMUL_LOHI(AddeOp0); 8755 if (MULOp == SDValue()) 8756 MULOp = findMUL_LOHI(AddeOp1); 8757 else 8758 IsLeftOperandMUL = true; 8759 if (MULOp == SDValue()) 8760 return SDValue(); 8761 8762 // Figure out the right opcode. 8763 unsigned Opc = MULOp->getOpcode(); 8764 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 8765 8766 // Figure out the high and low input values to the MLAL node. 8767 SDValue* HiAdd = nullptr; 8768 SDValue* LoMul = nullptr; 8769 SDValue* LowAdd = nullptr; 8770 8771 // Ensure that ADDE is from high result of ISD::SMUL_LOHI. 8772 if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1))) 8773 return SDValue(); 8774 8775 if (IsLeftOperandMUL) 8776 HiAdd = &AddeOp1; 8777 else 8778 HiAdd = &AddeOp0; 8779 8780 8781 // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node 8782 // whose low result is fed to the ADDC we are checking. 8783 8784 if (AddcOp0 == MULOp.getValue(0)) { 8785 LoMul = &AddcOp0; 8786 LowAdd = &AddcOp1; 8787 } 8788 if (AddcOp1 == MULOp.getValue(0)) { 8789 LoMul = &AddcOp1; 8790 LowAdd = &AddcOp0; 8791 } 8792 8793 if (!LoMul) 8794 return SDValue(); 8795 8796 // Create the merged node. 8797 SelectionDAG &DAG = DCI.DAG; 8798 8799 // Build operand list. 8800 SmallVector<SDValue, 8> Ops; 8801 Ops.push_back(LoMul->getOperand(0)); 8802 Ops.push_back(LoMul->getOperand(1)); 8803 Ops.push_back(*LowAdd); 8804 Ops.push_back(*HiAdd); 8805 8806 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), 8807 DAG.getVTList(MVT::i32, MVT::i32), Ops); 8808 8809 // Replace the ADDs' nodes uses by the MLA node's values. 8810 SDValue HiMLALResult(MLALNode.getNode(), 1); 8811 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 8812 8813 SDValue LoMLALResult(MLALNode.getNode(), 0); 8814 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 8815 8816 // Return original node to notify the driver to stop replacing. 8817 SDValue resNode(AddcNode, 0); 8818 return resNode; 8819 } 8820 8821 /// PerformADDCCombine - Target-specific dag combine transform from 8822 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 8823 static SDValue PerformADDCCombine(SDNode *N, 8824 TargetLowering::DAGCombinerInfo &DCI, 8825 const ARMSubtarget *Subtarget) { 8826 8827 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 8828 8829 } 8830 8831 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 8832 /// operands N0 and N1. This is a helper for PerformADDCombine that is 8833 /// called with the default operands, and if that fails, with commuted 8834 /// operands. 8835 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 8836 TargetLowering::DAGCombinerInfo &DCI, 8837 const ARMSubtarget *Subtarget){ 8838 8839 // Attempt to create vpaddl for this add. 8840 if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget)) 8841 return Result; 8842 8843 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8844 if (N0.getNode()->hasOneUse()) 8845 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 8846 return Result; 8847 return SDValue(); 8848 } 8849 8850 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 8851 /// 8852 static SDValue PerformADDCombine(SDNode *N, 8853 TargetLowering::DAGCombinerInfo &DCI, 8854 const ARMSubtarget *Subtarget) { 8855 SDValue N0 = N->getOperand(0); 8856 SDValue N1 = N->getOperand(1); 8857 8858 // First try with the default operand order. 8859 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 8860 return Result; 8861 8862 // If that didn't work, try again with the operands commuted. 8863 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 8864 } 8865 8866 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 8867 /// 8868 static SDValue PerformSUBCombine(SDNode *N, 8869 TargetLowering::DAGCombinerInfo &DCI) { 8870 SDValue N0 = N->getOperand(0); 8871 SDValue N1 = N->getOperand(1); 8872 8873 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8874 if (N1.getNode()->hasOneUse()) 8875 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 8876 return Result; 8877 8878 return SDValue(); 8879 } 8880 8881 /// PerformVMULCombine 8882 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 8883 /// special multiplier accumulator forwarding. 8884 /// vmul d3, d0, d2 8885 /// vmla d3, d1, d2 8886 /// is faster than 8887 /// vadd d3, d0, d1 8888 /// vmul d3, d3, d2 8889 // However, for (A + B) * (A + B), 8890 // vadd d2, d0, d1 8891 // vmul d3, d0, d2 8892 // vmla d3, d1, d2 8893 // is slower than 8894 // vadd d2, d0, d1 8895 // vmul d3, d2, d2 8896 static SDValue PerformVMULCombine(SDNode *N, 8897 TargetLowering::DAGCombinerInfo &DCI, 8898 const ARMSubtarget *Subtarget) { 8899 if (!Subtarget->hasVMLxForwarding()) 8900 return SDValue(); 8901 8902 SelectionDAG &DAG = DCI.DAG; 8903 SDValue N0 = N->getOperand(0); 8904 SDValue N1 = N->getOperand(1); 8905 unsigned Opcode = N0.getOpcode(); 8906 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8907 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 8908 Opcode = N1.getOpcode(); 8909 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8910 Opcode != ISD::FADD && Opcode != ISD::FSUB) 8911 return SDValue(); 8912 std::swap(N0, N1); 8913 } 8914 8915 if (N0 == N1) 8916 return SDValue(); 8917 8918 EVT VT = N->getValueType(0); 8919 SDLoc DL(N); 8920 SDValue N00 = N0->getOperand(0); 8921 SDValue N01 = N0->getOperand(1); 8922 return DAG.getNode(Opcode, DL, VT, 8923 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 8924 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 8925 } 8926 8927 static SDValue PerformMULCombine(SDNode *N, 8928 TargetLowering::DAGCombinerInfo &DCI, 8929 const ARMSubtarget *Subtarget) { 8930 SelectionDAG &DAG = DCI.DAG; 8931 8932 if (Subtarget->isThumb1Only()) 8933 return SDValue(); 8934 8935 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8936 return SDValue(); 8937 8938 EVT VT = N->getValueType(0); 8939 if (VT.is64BitVector() || VT.is128BitVector()) 8940 return PerformVMULCombine(N, DCI, Subtarget); 8941 if (VT != MVT::i32) 8942 return SDValue(); 8943 8944 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8945 if (!C) 8946 return SDValue(); 8947 8948 int64_t MulAmt = C->getSExtValue(); 8949 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 8950 8951 ShiftAmt = ShiftAmt & (32 - 1); 8952 SDValue V = N->getOperand(0); 8953 SDLoc DL(N); 8954 8955 SDValue Res; 8956 MulAmt >>= ShiftAmt; 8957 8958 if (MulAmt >= 0) { 8959 if (isPowerOf2_32(MulAmt - 1)) { 8960 // (mul x, 2^N + 1) => (add (shl x, N), x) 8961 Res = DAG.getNode(ISD::ADD, DL, VT, 8962 V, 8963 DAG.getNode(ISD::SHL, DL, VT, 8964 V, 8965 DAG.getConstant(Log2_32(MulAmt - 1), DL, 8966 MVT::i32))); 8967 } else if (isPowerOf2_32(MulAmt + 1)) { 8968 // (mul x, 2^N - 1) => (sub (shl x, N), x) 8969 Res = DAG.getNode(ISD::SUB, DL, VT, 8970 DAG.getNode(ISD::SHL, DL, VT, 8971 V, 8972 DAG.getConstant(Log2_32(MulAmt + 1), DL, 8973 MVT::i32)), 8974 V); 8975 } else 8976 return SDValue(); 8977 } else { 8978 uint64_t MulAmtAbs = -MulAmt; 8979 if (isPowerOf2_32(MulAmtAbs + 1)) { 8980 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 8981 Res = DAG.getNode(ISD::SUB, DL, VT, 8982 V, 8983 DAG.getNode(ISD::SHL, DL, VT, 8984 V, 8985 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 8986 MVT::i32))); 8987 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 8988 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 8989 Res = DAG.getNode(ISD::ADD, DL, VT, 8990 V, 8991 DAG.getNode(ISD::SHL, DL, VT, 8992 V, 8993 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 8994 MVT::i32))); 8995 Res = DAG.getNode(ISD::SUB, DL, VT, 8996 DAG.getConstant(0, DL, MVT::i32), Res); 8997 8998 } else 8999 return SDValue(); 9000 } 9001 9002 if (ShiftAmt != 0) 9003 Res = DAG.getNode(ISD::SHL, DL, VT, 9004 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 9005 9006 // Do not add new nodes to DAG combiner worklist. 9007 DCI.CombineTo(N, Res, false); 9008 return SDValue(); 9009 } 9010 9011 static SDValue PerformANDCombine(SDNode *N, 9012 TargetLowering::DAGCombinerInfo &DCI, 9013 const ARMSubtarget *Subtarget) { 9014 9015 // Attempt to use immediate-form VBIC 9016 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9017 SDLoc dl(N); 9018 EVT VT = N->getValueType(0); 9019 SelectionDAG &DAG = DCI.DAG; 9020 9021 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9022 return SDValue(); 9023 9024 APInt SplatBits, SplatUndef; 9025 unsigned SplatBitSize; 9026 bool HasAnyUndefs; 9027 if (BVN && 9028 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9029 if (SplatBitSize <= 64) { 9030 EVT VbicVT; 9031 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 9032 SplatUndef.getZExtValue(), SplatBitSize, 9033 DAG, dl, VbicVT, VT.is128BitVector(), 9034 OtherModImm); 9035 if (Val.getNode()) { 9036 SDValue Input = 9037 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 9038 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 9039 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 9040 } 9041 } 9042 } 9043 9044 if (!Subtarget->isThumb1Only()) { 9045 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 9046 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 9047 return Result; 9048 } 9049 9050 return SDValue(); 9051 } 9052 9053 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 9054 static SDValue PerformORCombine(SDNode *N, 9055 TargetLowering::DAGCombinerInfo &DCI, 9056 const ARMSubtarget *Subtarget) { 9057 // Attempt to use immediate-form VORR 9058 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9059 SDLoc dl(N); 9060 EVT VT = N->getValueType(0); 9061 SelectionDAG &DAG = DCI.DAG; 9062 9063 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9064 return SDValue(); 9065 9066 APInt SplatBits, SplatUndef; 9067 unsigned SplatBitSize; 9068 bool HasAnyUndefs; 9069 if (BVN && Subtarget->hasNEON() && 9070 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9071 if (SplatBitSize <= 64) { 9072 EVT VorrVT; 9073 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 9074 SplatUndef.getZExtValue(), SplatBitSize, 9075 DAG, dl, VorrVT, VT.is128BitVector(), 9076 OtherModImm); 9077 if (Val.getNode()) { 9078 SDValue Input = 9079 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 9080 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 9081 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 9082 } 9083 } 9084 } 9085 9086 if (!Subtarget->isThumb1Only()) { 9087 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 9088 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9089 return Result; 9090 } 9091 9092 // The code below optimizes (or (and X, Y), Z). 9093 // The AND operand needs to have a single user to make these optimizations 9094 // profitable. 9095 SDValue N0 = N->getOperand(0); 9096 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 9097 return SDValue(); 9098 SDValue N1 = N->getOperand(1); 9099 9100 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 9101 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 9102 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 9103 APInt SplatUndef; 9104 unsigned SplatBitSize; 9105 bool HasAnyUndefs; 9106 9107 APInt SplatBits0, SplatBits1; 9108 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 9109 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 9110 // Ensure that the second operand of both ands are constants 9111 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 9112 HasAnyUndefs) && !HasAnyUndefs) { 9113 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 9114 HasAnyUndefs) && !HasAnyUndefs) { 9115 // Ensure that the bit width of the constants are the same and that 9116 // the splat arguments are logical inverses as per the pattern we 9117 // are trying to simplify. 9118 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 9119 SplatBits0 == ~SplatBits1) { 9120 // Canonicalize the vector type to make instruction selection 9121 // simpler. 9122 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 9123 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 9124 N0->getOperand(1), 9125 N0->getOperand(0), 9126 N1->getOperand(0)); 9127 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9128 } 9129 } 9130 } 9131 } 9132 9133 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 9134 // reasonable. 9135 9136 // BFI is only available on V6T2+ 9137 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 9138 return SDValue(); 9139 9140 SDLoc DL(N); 9141 // 1) or (and A, mask), val => ARMbfi A, val, mask 9142 // iff (val & mask) == val 9143 // 9144 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9145 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 9146 // && mask == ~mask2 9147 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 9148 // && ~mask == mask2 9149 // (i.e., copy a bitfield value into another bitfield of the same width) 9150 9151 if (VT != MVT::i32) 9152 return SDValue(); 9153 9154 SDValue N00 = N0.getOperand(0); 9155 9156 // The value and the mask need to be constants so we can verify this is 9157 // actually a bitfield set. If the mask is 0xffff, we can do better 9158 // via a movt instruction, so don't use BFI in that case. 9159 SDValue MaskOp = N0.getOperand(1); 9160 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 9161 if (!MaskC) 9162 return SDValue(); 9163 unsigned Mask = MaskC->getZExtValue(); 9164 if (Mask == 0xffff) 9165 return SDValue(); 9166 SDValue Res; 9167 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 9168 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 9169 if (N1C) { 9170 unsigned Val = N1C->getZExtValue(); 9171 if ((Val & ~Mask) != Val) 9172 return SDValue(); 9173 9174 if (ARM::isBitFieldInvertedMask(Mask)) { 9175 Val >>= countTrailingZeros(~Mask); 9176 9177 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 9178 DAG.getConstant(Val, DL, MVT::i32), 9179 DAG.getConstant(Mask, DL, MVT::i32)); 9180 9181 // Do not add new nodes to DAG combiner worklist. 9182 DCI.CombineTo(N, Res, false); 9183 return SDValue(); 9184 } 9185 } else if (N1.getOpcode() == ISD::AND) { 9186 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9187 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9188 if (!N11C) 9189 return SDValue(); 9190 unsigned Mask2 = N11C->getZExtValue(); 9191 9192 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 9193 // as is to match. 9194 if (ARM::isBitFieldInvertedMask(Mask) && 9195 (Mask == ~Mask2)) { 9196 // The pack halfword instruction works better for masks that fit it, 9197 // so use that when it's available. 9198 if (Subtarget->hasT2ExtractPack() && 9199 (Mask == 0xffff || Mask == 0xffff0000)) 9200 return SDValue(); 9201 // 2a 9202 unsigned amt = countTrailingZeros(Mask2); 9203 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 9204 DAG.getConstant(amt, DL, MVT::i32)); 9205 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 9206 DAG.getConstant(Mask, DL, MVT::i32)); 9207 // Do not add new nodes to DAG combiner worklist. 9208 DCI.CombineTo(N, Res, false); 9209 return SDValue(); 9210 } else if (ARM::isBitFieldInvertedMask(~Mask) && 9211 (~Mask == Mask2)) { 9212 // The pack halfword instruction works better for masks that fit it, 9213 // so use that when it's available. 9214 if (Subtarget->hasT2ExtractPack() && 9215 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 9216 return SDValue(); 9217 // 2b 9218 unsigned lsb = countTrailingZeros(Mask); 9219 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 9220 DAG.getConstant(lsb, DL, MVT::i32)); 9221 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 9222 DAG.getConstant(Mask2, DL, MVT::i32)); 9223 // Do not add new nodes to DAG combiner worklist. 9224 DCI.CombineTo(N, Res, false); 9225 return SDValue(); 9226 } 9227 } 9228 9229 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 9230 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 9231 ARM::isBitFieldInvertedMask(~Mask)) { 9232 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 9233 // where lsb(mask) == #shamt and masked bits of B are known zero. 9234 SDValue ShAmt = N00.getOperand(1); 9235 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9236 unsigned LSB = countTrailingZeros(Mask); 9237 if (ShAmtC != LSB) 9238 return SDValue(); 9239 9240 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 9241 DAG.getConstant(~Mask, DL, MVT::i32)); 9242 9243 // Do not add new nodes to DAG combiner worklist. 9244 DCI.CombineTo(N, Res, false); 9245 } 9246 9247 return SDValue(); 9248 } 9249 9250 static SDValue PerformXORCombine(SDNode *N, 9251 TargetLowering::DAGCombinerInfo &DCI, 9252 const ARMSubtarget *Subtarget) { 9253 EVT VT = N->getValueType(0); 9254 SelectionDAG &DAG = DCI.DAG; 9255 9256 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9257 return SDValue(); 9258 9259 if (!Subtarget->isThumb1Only()) { 9260 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 9261 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9262 return Result; 9263 } 9264 9265 return SDValue(); 9266 } 9267 9268 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 9269 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 9270 // their position in "to" (Rd). 9271 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 9272 assert(N->getOpcode() == ARMISD::BFI); 9273 9274 SDValue From = N->getOperand(1); 9275 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 9276 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 9277 9278 // If the Base came from a SHR #C, we can deduce that it is really testing bit 9279 // #C in the base of the SHR. 9280 if (From->getOpcode() == ISD::SRL && 9281 isa<ConstantSDNode>(From->getOperand(1))) { 9282 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 9283 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 9284 FromMask <<= Shift.getLimitedValue(31); 9285 From = From->getOperand(0); 9286 } 9287 9288 return From; 9289 } 9290 9291 // If A and B contain one contiguous set of bits, does A | B == A . B? 9292 // 9293 // Neither A nor B must be zero. 9294 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 9295 unsigned LastActiveBitInA = A.countTrailingZeros(); 9296 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 9297 return LastActiveBitInA - 1 == FirstActiveBitInB; 9298 } 9299 9300 static SDValue FindBFIToCombineWith(SDNode *N) { 9301 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 9302 // if one exists. 9303 APInt ToMask, FromMask; 9304 SDValue From = ParseBFI(N, ToMask, FromMask); 9305 SDValue To = N->getOperand(0); 9306 9307 // Now check for a compatible BFI to merge with. We can pass through BFIs that 9308 // aren't compatible, but not if they set the same bit in their destination as 9309 // we do (or that of any BFI we're going to combine with). 9310 SDValue V = To; 9311 APInt CombinedToMask = ToMask; 9312 while (V.getOpcode() == ARMISD::BFI) { 9313 APInt NewToMask, NewFromMask; 9314 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 9315 if (NewFrom != From) { 9316 // This BFI has a different base. Keep going. 9317 CombinedToMask |= NewToMask; 9318 V = V.getOperand(0); 9319 continue; 9320 } 9321 9322 // Do the written bits conflict with any we've seen so far? 9323 if ((NewToMask & CombinedToMask).getBoolValue()) 9324 // Conflicting bits - bail out because going further is unsafe. 9325 return SDValue(); 9326 9327 // Are the new bits contiguous when combined with the old bits? 9328 if (BitsProperlyConcatenate(ToMask, NewToMask) && 9329 BitsProperlyConcatenate(FromMask, NewFromMask)) 9330 return V; 9331 if (BitsProperlyConcatenate(NewToMask, ToMask) && 9332 BitsProperlyConcatenate(NewFromMask, FromMask)) 9333 return V; 9334 9335 // We've seen a write to some bits, so track it. 9336 CombinedToMask |= NewToMask; 9337 // Keep going... 9338 V = V.getOperand(0); 9339 } 9340 9341 return SDValue(); 9342 } 9343 9344 static SDValue PerformBFICombine(SDNode *N, 9345 TargetLowering::DAGCombinerInfo &DCI) { 9346 SDValue N1 = N->getOperand(1); 9347 if (N1.getOpcode() == ISD::AND) { 9348 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 9349 // the bits being cleared by the AND are not demanded by the BFI. 9350 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9351 if (!N11C) 9352 return SDValue(); 9353 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 9354 unsigned LSB = countTrailingZeros(~InvMask); 9355 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 9356 assert(Width < 9357 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 9358 "undefined behavior"); 9359 unsigned Mask = (1u << Width) - 1; 9360 unsigned Mask2 = N11C->getZExtValue(); 9361 if ((Mask & (~Mask2)) == 0) 9362 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 9363 N->getOperand(0), N1.getOperand(0), 9364 N->getOperand(2)); 9365 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 9366 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 9367 // Keep track of any consecutive bits set that all come from the same base 9368 // value. We can combine these together into a single BFI. 9369 SDValue CombineBFI = FindBFIToCombineWith(N); 9370 if (CombineBFI == SDValue()) 9371 return SDValue(); 9372 9373 // We've found a BFI. 9374 APInt ToMask1, FromMask1; 9375 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 9376 9377 APInt ToMask2, FromMask2; 9378 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 9379 assert(From1 == From2); 9380 (void)From2; 9381 9382 // First, unlink CombineBFI. 9383 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 9384 // Then create a new BFI, combining the two together. 9385 APInt NewFromMask = FromMask1 | FromMask2; 9386 APInt NewToMask = ToMask1 | ToMask2; 9387 9388 EVT VT = N->getValueType(0); 9389 SDLoc dl(N); 9390 9391 if (NewFromMask[0] == 0) 9392 From1 = DCI.DAG.getNode( 9393 ISD::SRL, dl, VT, From1, 9394 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 9395 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 9396 DCI.DAG.getConstant(~NewToMask, dl, VT)); 9397 } 9398 return SDValue(); 9399 } 9400 9401 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 9402 /// ARMISD::VMOVRRD. 9403 static SDValue PerformVMOVRRDCombine(SDNode *N, 9404 TargetLowering::DAGCombinerInfo &DCI, 9405 const ARMSubtarget *Subtarget) { 9406 // vmovrrd(vmovdrr x, y) -> x,y 9407 SDValue InDouble = N->getOperand(0); 9408 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) 9409 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 9410 9411 // vmovrrd(load f64) -> (load i32), (load i32) 9412 SDNode *InNode = InDouble.getNode(); 9413 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 9414 InNode->getValueType(0) == MVT::f64 && 9415 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 9416 !cast<LoadSDNode>(InNode)->isVolatile()) { 9417 // TODO: Should this be done for non-FrameIndex operands? 9418 LoadSDNode *LD = cast<LoadSDNode>(InNode); 9419 9420 SelectionDAG &DAG = DCI.DAG; 9421 SDLoc DL(LD); 9422 SDValue BasePtr = LD->getBasePtr(); 9423 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 9424 LD->getPointerInfo(), LD->isVolatile(), 9425 LD->isNonTemporal(), LD->isInvariant(), 9426 LD->getAlignment()); 9427 9428 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 9429 DAG.getConstant(4, DL, MVT::i32)); 9430 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 9431 LD->getPointerInfo(), LD->isVolatile(), 9432 LD->isNonTemporal(), LD->isInvariant(), 9433 std::min(4U, LD->getAlignment() / 2)); 9434 9435 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 9436 if (DCI.DAG.getDataLayout().isBigEndian()) 9437 std::swap (NewLD1, NewLD2); 9438 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 9439 return Result; 9440 } 9441 9442 return SDValue(); 9443 } 9444 9445 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 9446 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 9447 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 9448 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 9449 SDValue Op0 = N->getOperand(0); 9450 SDValue Op1 = N->getOperand(1); 9451 if (Op0.getOpcode() == ISD::BITCAST) 9452 Op0 = Op0.getOperand(0); 9453 if (Op1.getOpcode() == ISD::BITCAST) 9454 Op1 = Op1.getOperand(0); 9455 if (Op0.getOpcode() == ARMISD::VMOVRRD && 9456 Op0.getNode() == Op1.getNode() && 9457 Op0.getResNo() == 0 && Op1.getResNo() == 1) 9458 return DAG.getNode(ISD::BITCAST, SDLoc(N), 9459 N->getValueType(0), Op0.getOperand(0)); 9460 return SDValue(); 9461 } 9462 9463 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 9464 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 9465 /// i64 vector to have f64 elements, since the value can then be loaded 9466 /// directly into a VFP register. 9467 static bool hasNormalLoadOperand(SDNode *N) { 9468 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 9469 for (unsigned i = 0; i < NumElts; ++i) { 9470 SDNode *Elt = N->getOperand(i).getNode(); 9471 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 9472 return true; 9473 } 9474 return false; 9475 } 9476 9477 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 9478 /// ISD::BUILD_VECTOR. 9479 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 9480 TargetLowering::DAGCombinerInfo &DCI, 9481 const ARMSubtarget *Subtarget) { 9482 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 9483 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 9484 // into a pair of GPRs, which is fine when the value is used as a scalar, 9485 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 9486 SelectionDAG &DAG = DCI.DAG; 9487 if (N->getNumOperands() == 2) 9488 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 9489 return RV; 9490 9491 // Load i64 elements as f64 values so that type legalization does not split 9492 // them up into i32 values. 9493 EVT VT = N->getValueType(0); 9494 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 9495 return SDValue(); 9496 SDLoc dl(N); 9497 SmallVector<SDValue, 8> Ops; 9498 unsigned NumElts = VT.getVectorNumElements(); 9499 for (unsigned i = 0; i < NumElts; ++i) { 9500 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 9501 Ops.push_back(V); 9502 // Make the DAGCombiner fold the bitcast. 9503 DCI.AddToWorklist(V.getNode()); 9504 } 9505 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 9506 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 9507 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 9508 } 9509 9510 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 9511 static SDValue 9512 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9513 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 9514 // At that time, we may have inserted bitcasts from integer to float. 9515 // If these bitcasts have survived DAGCombine, change the lowering of this 9516 // BUILD_VECTOR in something more vector friendly, i.e., that does not 9517 // force to use floating point types. 9518 9519 // Make sure we can change the type of the vector. 9520 // This is possible iff: 9521 // 1. The vector is only used in a bitcast to a integer type. I.e., 9522 // 1.1. Vector is used only once. 9523 // 1.2. Use is a bit convert to an integer type. 9524 // 2. The size of its operands are 32-bits (64-bits are not legal). 9525 EVT VT = N->getValueType(0); 9526 EVT EltVT = VT.getVectorElementType(); 9527 9528 // Check 1.1. and 2. 9529 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 9530 return SDValue(); 9531 9532 // By construction, the input type must be float. 9533 assert(EltVT == MVT::f32 && "Unexpected type!"); 9534 9535 // Check 1.2. 9536 SDNode *Use = *N->use_begin(); 9537 if (Use->getOpcode() != ISD::BITCAST || 9538 Use->getValueType(0).isFloatingPoint()) 9539 return SDValue(); 9540 9541 // Check profitability. 9542 // Model is, if more than half of the relevant operands are bitcast from 9543 // i32, turn the build_vector into a sequence of insert_vector_elt. 9544 // Relevant operands are everything that is not statically 9545 // (i.e., at compile time) bitcasted. 9546 unsigned NumOfBitCastedElts = 0; 9547 unsigned NumElts = VT.getVectorNumElements(); 9548 unsigned NumOfRelevantElts = NumElts; 9549 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 9550 SDValue Elt = N->getOperand(Idx); 9551 if (Elt->getOpcode() == ISD::BITCAST) { 9552 // Assume only bit cast to i32 will go away. 9553 if (Elt->getOperand(0).getValueType() == MVT::i32) 9554 ++NumOfBitCastedElts; 9555 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 9556 // Constants are statically casted, thus do not count them as 9557 // relevant operands. 9558 --NumOfRelevantElts; 9559 } 9560 9561 // Check if more than half of the elements require a non-free bitcast. 9562 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 9563 return SDValue(); 9564 9565 SelectionDAG &DAG = DCI.DAG; 9566 // Create the new vector type. 9567 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 9568 // Check if the type is legal. 9569 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9570 if (!TLI.isTypeLegal(VecVT)) 9571 return SDValue(); 9572 9573 // Combine: 9574 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 9575 // => BITCAST INSERT_VECTOR_ELT 9576 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 9577 // (BITCAST EN), N. 9578 SDValue Vec = DAG.getUNDEF(VecVT); 9579 SDLoc dl(N); 9580 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 9581 SDValue V = N->getOperand(Idx); 9582 if (V.isUndef()) 9583 continue; 9584 if (V.getOpcode() == ISD::BITCAST && 9585 V->getOperand(0).getValueType() == MVT::i32) 9586 // Fold obvious case. 9587 V = V.getOperand(0); 9588 else { 9589 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 9590 // Make the DAGCombiner fold the bitcasts. 9591 DCI.AddToWorklist(V.getNode()); 9592 } 9593 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 9594 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 9595 } 9596 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 9597 // Make the DAGCombiner fold the bitcasts. 9598 DCI.AddToWorklist(Vec.getNode()); 9599 return Vec; 9600 } 9601 9602 /// PerformInsertEltCombine - Target-specific dag combine xforms for 9603 /// ISD::INSERT_VECTOR_ELT. 9604 static SDValue PerformInsertEltCombine(SDNode *N, 9605 TargetLowering::DAGCombinerInfo &DCI) { 9606 // Bitcast an i64 load inserted into a vector to f64. 9607 // Otherwise, the i64 value will be legalized to a pair of i32 values. 9608 EVT VT = N->getValueType(0); 9609 SDNode *Elt = N->getOperand(1).getNode(); 9610 if (VT.getVectorElementType() != MVT::i64 || 9611 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 9612 return SDValue(); 9613 9614 SelectionDAG &DAG = DCI.DAG; 9615 SDLoc dl(N); 9616 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 9617 VT.getVectorNumElements()); 9618 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 9619 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 9620 // Make the DAGCombiner fold the bitcasts. 9621 DCI.AddToWorklist(Vec.getNode()); 9622 DCI.AddToWorklist(V.getNode()); 9623 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 9624 Vec, V, N->getOperand(2)); 9625 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 9626 } 9627 9628 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 9629 /// ISD::VECTOR_SHUFFLE. 9630 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 9631 // The LLVM shufflevector instruction does not require the shuffle mask 9632 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 9633 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 9634 // operands do not match the mask length, they are extended by concatenating 9635 // them with undef vectors. That is probably the right thing for other 9636 // targets, but for NEON it is better to concatenate two double-register 9637 // size vector operands into a single quad-register size vector. Do that 9638 // transformation here: 9639 // shuffle(concat(v1, undef), concat(v2, undef)) -> 9640 // shuffle(concat(v1, v2), undef) 9641 SDValue Op0 = N->getOperand(0); 9642 SDValue Op1 = N->getOperand(1); 9643 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 9644 Op1.getOpcode() != ISD::CONCAT_VECTORS || 9645 Op0.getNumOperands() != 2 || 9646 Op1.getNumOperands() != 2) 9647 return SDValue(); 9648 SDValue Concat0Op1 = Op0.getOperand(1); 9649 SDValue Concat1Op1 = Op1.getOperand(1); 9650 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 9651 return SDValue(); 9652 // Skip the transformation if any of the types are illegal. 9653 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9654 EVT VT = N->getValueType(0); 9655 if (!TLI.isTypeLegal(VT) || 9656 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 9657 !TLI.isTypeLegal(Concat1Op1.getValueType())) 9658 return SDValue(); 9659 9660 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 9661 Op0.getOperand(0), Op1.getOperand(0)); 9662 // Translate the shuffle mask. 9663 SmallVector<int, 16> NewMask; 9664 unsigned NumElts = VT.getVectorNumElements(); 9665 unsigned HalfElts = NumElts/2; 9666 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 9667 for (unsigned n = 0; n < NumElts; ++n) { 9668 int MaskElt = SVN->getMaskElt(n); 9669 int NewElt = -1; 9670 if (MaskElt < (int)HalfElts) 9671 NewElt = MaskElt; 9672 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 9673 NewElt = HalfElts + MaskElt - NumElts; 9674 NewMask.push_back(NewElt); 9675 } 9676 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 9677 DAG.getUNDEF(VT), NewMask.data()); 9678 } 9679 9680 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 9681 /// NEON load/store intrinsics, and generic vector load/stores, to merge 9682 /// base address updates. 9683 /// For generic load/stores, the memory type is assumed to be a vector. 9684 /// The caller is assumed to have checked legality. 9685 static SDValue CombineBaseUpdate(SDNode *N, 9686 TargetLowering::DAGCombinerInfo &DCI) { 9687 SelectionDAG &DAG = DCI.DAG; 9688 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 9689 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 9690 const bool isStore = N->getOpcode() == ISD::STORE; 9691 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 9692 SDValue Addr = N->getOperand(AddrOpIdx); 9693 MemSDNode *MemN = cast<MemSDNode>(N); 9694 SDLoc dl(N); 9695 9696 // Search for a use of the address operand that is an increment. 9697 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 9698 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 9699 SDNode *User = *UI; 9700 if (User->getOpcode() != ISD::ADD || 9701 UI.getUse().getResNo() != Addr.getResNo()) 9702 continue; 9703 9704 // Check that the add is independent of the load/store. Otherwise, folding 9705 // it would create a cycle. 9706 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 9707 continue; 9708 9709 // Find the new opcode for the updating load/store. 9710 bool isLoadOp = true; 9711 bool isLaneOp = false; 9712 unsigned NewOpc = 0; 9713 unsigned NumVecs = 0; 9714 if (isIntrinsic) { 9715 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9716 switch (IntNo) { 9717 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 9718 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 9719 NumVecs = 1; break; 9720 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 9721 NumVecs = 2; break; 9722 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 9723 NumVecs = 3; break; 9724 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 9725 NumVecs = 4; break; 9726 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 9727 NumVecs = 2; isLaneOp = true; break; 9728 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 9729 NumVecs = 3; isLaneOp = true; break; 9730 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 9731 NumVecs = 4; isLaneOp = true; break; 9732 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 9733 NumVecs = 1; isLoadOp = false; break; 9734 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 9735 NumVecs = 2; isLoadOp = false; break; 9736 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 9737 NumVecs = 3; isLoadOp = false; break; 9738 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 9739 NumVecs = 4; isLoadOp = false; break; 9740 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 9741 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 9742 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 9743 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 9744 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 9745 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 9746 } 9747 } else { 9748 isLaneOp = true; 9749 switch (N->getOpcode()) { 9750 default: llvm_unreachable("unexpected opcode for Neon base update"); 9751 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 9752 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 9753 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 9754 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 9755 NumVecs = 1; isLaneOp = false; break; 9756 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 9757 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 9758 } 9759 } 9760 9761 // Find the size of memory referenced by the load/store. 9762 EVT VecTy; 9763 if (isLoadOp) { 9764 VecTy = N->getValueType(0); 9765 } else if (isIntrinsic) { 9766 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 9767 } else { 9768 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 9769 VecTy = N->getOperand(1).getValueType(); 9770 } 9771 9772 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 9773 if (isLaneOp) 9774 NumBytes /= VecTy.getVectorNumElements(); 9775 9776 // If the increment is a constant, it must match the memory ref size. 9777 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 9778 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 9779 uint64_t IncVal = CInc->getZExtValue(); 9780 if (IncVal != NumBytes) 9781 continue; 9782 } else if (NumBytes >= 3 * 16) { 9783 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 9784 // separate instructions that make it harder to use a non-constant update. 9785 continue; 9786 } 9787 9788 // OK, we found an ADD we can fold into the base update. 9789 // Now, create a _UPD node, taking care of not breaking alignment. 9790 9791 EVT AlignedVecTy = VecTy; 9792 unsigned Alignment = MemN->getAlignment(); 9793 9794 // If this is a less-than-standard-aligned load/store, change the type to 9795 // match the standard alignment. 9796 // The alignment is overlooked when selecting _UPD variants; and it's 9797 // easier to introduce bitcasts here than fix that. 9798 // There are 3 ways to get to this base-update combine: 9799 // - intrinsics: they are assumed to be properly aligned (to the standard 9800 // alignment of the memory type), so we don't need to do anything. 9801 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 9802 // intrinsics, so, likewise, there's nothing to do. 9803 // - generic load/store instructions: the alignment is specified as an 9804 // explicit operand, rather than implicitly as the standard alignment 9805 // of the memory type (like the intrisics). We need to change the 9806 // memory type to match the explicit alignment. That way, we don't 9807 // generate non-standard-aligned ARMISD::VLDx nodes. 9808 if (isa<LSBaseSDNode>(N)) { 9809 if (Alignment == 0) 9810 Alignment = 1; 9811 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 9812 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 9813 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 9814 assert(!isLaneOp && "Unexpected generic load/store lane."); 9815 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 9816 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 9817 } 9818 // Don't set an explicit alignment on regular load/stores that we want 9819 // to transform to VLD/VST 1_UPD nodes. 9820 // This matches the behavior of regular load/stores, which only get an 9821 // explicit alignment if the MMO alignment is larger than the standard 9822 // alignment of the memory type. 9823 // Intrinsics, however, always get an explicit alignment, set to the 9824 // alignment of the MMO. 9825 Alignment = 1; 9826 } 9827 9828 // Create the new updating load/store node. 9829 // First, create an SDVTList for the new updating node's results. 9830 EVT Tys[6]; 9831 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 9832 unsigned n; 9833 for (n = 0; n < NumResultVecs; ++n) 9834 Tys[n] = AlignedVecTy; 9835 Tys[n++] = MVT::i32; 9836 Tys[n] = MVT::Other; 9837 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 9838 9839 // Then, gather the new node's operands. 9840 SmallVector<SDValue, 8> Ops; 9841 Ops.push_back(N->getOperand(0)); // incoming chain 9842 Ops.push_back(N->getOperand(AddrOpIdx)); 9843 Ops.push_back(Inc); 9844 9845 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 9846 // Try to match the intrinsic's signature 9847 Ops.push_back(StN->getValue()); 9848 } else { 9849 // Loads (and of course intrinsics) match the intrinsics' signature, 9850 // so just add all but the alignment operand. 9851 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) 9852 Ops.push_back(N->getOperand(i)); 9853 } 9854 9855 // For all node types, the alignment operand is always the last one. 9856 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 9857 9858 // If this is a non-standard-aligned STORE, the penultimate operand is the 9859 // stored value. Bitcast it to the aligned type. 9860 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 9861 SDValue &StVal = Ops[Ops.size()-2]; 9862 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 9863 } 9864 9865 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, 9866 Ops, AlignedVecTy, 9867 MemN->getMemOperand()); 9868 9869 // Update the uses. 9870 SmallVector<SDValue, 5> NewResults; 9871 for (unsigned i = 0; i < NumResultVecs; ++i) 9872 NewResults.push_back(SDValue(UpdN.getNode(), i)); 9873 9874 // If this is an non-standard-aligned LOAD, the first result is the loaded 9875 // value. Bitcast it to the expected result type. 9876 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 9877 SDValue &LdVal = NewResults[0]; 9878 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 9879 } 9880 9881 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 9882 DCI.CombineTo(N, NewResults); 9883 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 9884 9885 break; 9886 } 9887 return SDValue(); 9888 } 9889 9890 static SDValue PerformVLDCombine(SDNode *N, 9891 TargetLowering::DAGCombinerInfo &DCI) { 9892 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 9893 return SDValue(); 9894 9895 return CombineBaseUpdate(N, DCI); 9896 } 9897 9898 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 9899 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 9900 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 9901 /// return true. 9902 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9903 SelectionDAG &DAG = DCI.DAG; 9904 EVT VT = N->getValueType(0); 9905 // vldN-dup instructions only support 64-bit vectors for N > 1. 9906 if (!VT.is64BitVector()) 9907 return false; 9908 9909 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 9910 SDNode *VLD = N->getOperand(0).getNode(); 9911 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 9912 return false; 9913 unsigned NumVecs = 0; 9914 unsigned NewOpc = 0; 9915 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 9916 if (IntNo == Intrinsic::arm_neon_vld2lane) { 9917 NumVecs = 2; 9918 NewOpc = ARMISD::VLD2DUP; 9919 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 9920 NumVecs = 3; 9921 NewOpc = ARMISD::VLD3DUP; 9922 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 9923 NumVecs = 4; 9924 NewOpc = ARMISD::VLD4DUP; 9925 } else { 9926 return false; 9927 } 9928 9929 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 9930 // numbers match the load. 9931 unsigned VLDLaneNo = 9932 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 9933 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9934 UI != UE; ++UI) { 9935 // Ignore uses of the chain result. 9936 if (UI.getUse().getResNo() == NumVecs) 9937 continue; 9938 SDNode *User = *UI; 9939 if (User->getOpcode() != ARMISD::VDUPLANE || 9940 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 9941 return false; 9942 } 9943 9944 // Create the vldN-dup node. 9945 EVT Tys[5]; 9946 unsigned n; 9947 for (n = 0; n < NumVecs; ++n) 9948 Tys[n] = VT; 9949 Tys[n] = MVT::Other; 9950 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 9951 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 9952 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 9953 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 9954 Ops, VLDMemInt->getMemoryVT(), 9955 VLDMemInt->getMemOperand()); 9956 9957 // Update the uses. 9958 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9959 UI != UE; ++UI) { 9960 unsigned ResNo = UI.getUse().getResNo(); 9961 // Ignore uses of the chain result. 9962 if (ResNo == NumVecs) 9963 continue; 9964 SDNode *User = *UI; 9965 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 9966 } 9967 9968 // Now the vldN-lane intrinsic is dead except for its chain result. 9969 // Update uses of the chain. 9970 std::vector<SDValue> VLDDupResults; 9971 for (unsigned n = 0; n < NumVecs; ++n) 9972 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 9973 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 9974 DCI.CombineTo(VLD, VLDDupResults); 9975 9976 return true; 9977 } 9978 9979 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 9980 /// ARMISD::VDUPLANE. 9981 static SDValue PerformVDUPLANECombine(SDNode *N, 9982 TargetLowering::DAGCombinerInfo &DCI) { 9983 SDValue Op = N->getOperand(0); 9984 9985 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 9986 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 9987 if (CombineVLDDUP(N, DCI)) 9988 return SDValue(N, 0); 9989 9990 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 9991 // redundant. Ignore bit_converts for now; element sizes are checked below. 9992 while (Op.getOpcode() == ISD::BITCAST) 9993 Op = Op.getOperand(0); 9994 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 9995 return SDValue(); 9996 9997 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 9998 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 9999 // The canonical VMOV for a zero vector uses a 32-bit element size. 10000 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10001 unsigned EltBits; 10002 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 10003 EltSize = 8; 10004 EVT VT = N->getValueType(0); 10005 if (EltSize > VT.getVectorElementType().getSizeInBits()) 10006 return SDValue(); 10007 10008 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 10009 } 10010 10011 static SDValue PerformLOADCombine(SDNode *N, 10012 TargetLowering::DAGCombinerInfo &DCI) { 10013 EVT VT = N->getValueType(0); 10014 10015 // If this is a legal vector load, try to combine it into a VLD1_UPD. 10016 if (ISD::isNormalLoad(N) && VT.isVector() && 10017 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10018 return CombineBaseUpdate(N, DCI); 10019 10020 return SDValue(); 10021 } 10022 10023 /// PerformSTORECombine - Target-specific dag combine xforms for 10024 /// ISD::STORE. 10025 static SDValue PerformSTORECombine(SDNode *N, 10026 TargetLowering::DAGCombinerInfo &DCI) { 10027 StoreSDNode *St = cast<StoreSDNode>(N); 10028 if (St->isVolatile()) 10029 return SDValue(); 10030 10031 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 10032 // pack all of the elements in one place. Next, store to memory in fewer 10033 // chunks. 10034 SDValue StVal = St->getValue(); 10035 EVT VT = StVal.getValueType(); 10036 if (St->isTruncatingStore() && VT.isVector()) { 10037 SelectionDAG &DAG = DCI.DAG; 10038 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10039 EVT StVT = St->getMemoryVT(); 10040 unsigned NumElems = VT.getVectorNumElements(); 10041 assert(StVT != VT && "Cannot truncate to the same type"); 10042 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 10043 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 10044 10045 // From, To sizes and ElemCount must be pow of two 10046 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 10047 10048 // We are going to use the original vector elt for storing. 10049 // Accumulated smaller vector elements must be a multiple of the store size. 10050 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 10051 10052 unsigned SizeRatio = FromEltSz / ToEltSz; 10053 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 10054 10055 // Create a type on which we perform the shuffle. 10056 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 10057 NumElems*SizeRatio); 10058 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 10059 10060 SDLoc DL(St); 10061 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 10062 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 10063 for (unsigned i = 0; i < NumElems; ++i) 10064 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() 10065 ? (i + 1) * SizeRatio - 1 10066 : i * SizeRatio; 10067 10068 // Can't shuffle using an illegal type. 10069 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 10070 10071 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 10072 DAG.getUNDEF(WideVec.getValueType()), 10073 ShuffleVec.data()); 10074 // At this point all of the data is stored at the bottom of the 10075 // register. We now need to save it to mem. 10076 10077 // Find the largest store unit 10078 MVT StoreType = MVT::i8; 10079 for (MVT Tp : MVT::integer_valuetypes()) { 10080 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 10081 StoreType = Tp; 10082 } 10083 // Didn't find a legal store type. 10084 if (!TLI.isTypeLegal(StoreType)) 10085 return SDValue(); 10086 10087 // Bitcast the original vector into a vector of store-size units 10088 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 10089 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 10090 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 10091 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 10092 SmallVector<SDValue, 8> Chains; 10093 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 10094 TLI.getPointerTy(DAG.getDataLayout())); 10095 SDValue BasePtr = St->getBasePtr(); 10096 10097 // Perform one or more big stores into memory. 10098 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 10099 for (unsigned I = 0; I < E; I++) { 10100 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 10101 StoreType, ShuffWide, 10102 DAG.getIntPtrConstant(I, DL)); 10103 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 10104 St->getPointerInfo(), St->isVolatile(), 10105 St->isNonTemporal(), St->getAlignment()); 10106 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 10107 Increment); 10108 Chains.push_back(Ch); 10109 } 10110 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 10111 } 10112 10113 if (!ISD::isNormalStore(St)) 10114 return SDValue(); 10115 10116 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 10117 // ARM stores of arguments in the same cache line. 10118 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 10119 StVal.getNode()->hasOneUse()) { 10120 SelectionDAG &DAG = DCI.DAG; 10121 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 10122 SDLoc DL(St); 10123 SDValue BasePtr = St->getBasePtr(); 10124 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 10125 StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ), 10126 BasePtr, St->getPointerInfo(), St->isVolatile(), 10127 St->isNonTemporal(), St->getAlignment()); 10128 10129 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 10130 DAG.getConstant(4, DL, MVT::i32)); 10131 return DAG.getStore(NewST1.getValue(0), DL, 10132 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 10133 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 10134 St->isNonTemporal(), 10135 std::min(4U, St->getAlignment() / 2)); 10136 } 10137 10138 if (StVal.getValueType() == MVT::i64 && 10139 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 10140 10141 // Bitcast an i64 store extracted from a vector to f64. 10142 // Otherwise, the i64 value will be legalized to a pair of i32 values. 10143 SelectionDAG &DAG = DCI.DAG; 10144 SDLoc dl(StVal); 10145 SDValue IntVec = StVal.getOperand(0); 10146 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 10147 IntVec.getValueType().getVectorNumElements()); 10148 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 10149 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 10150 Vec, StVal.getOperand(1)); 10151 dl = SDLoc(N); 10152 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 10153 // Make the DAGCombiner fold the bitcasts. 10154 DCI.AddToWorklist(Vec.getNode()); 10155 DCI.AddToWorklist(ExtElt.getNode()); 10156 DCI.AddToWorklist(V.getNode()); 10157 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 10158 St->getPointerInfo(), St->isVolatile(), 10159 St->isNonTemporal(), St->getAlignment(), 10160 St->getAAInfo()); 10161 } 10162 10163 // If this is a legal vector store, try to combine it into a VST1_UPD. 10164 if (ISD::isNormalStore(N) && VT.isVector() && 10165 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10166 return CombineBaseUpdate(N, DCI); 10167 10168 return SDValue(); 10169 } 10170 10171 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 10172 /// can replace combinations of VMUL and VCVT (floating-point to integer) 10173 /// when the VMUL has a constant operand that is a power of 2. 10174 /// 10175 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10176 /// vmul.f32 d16, d17, d16 10177 /// vcvt.s32.f32 d16, d16 10178 /// becomes: 10179 /// vcvt.s32.f32 d16, d16, #3 10180 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 10181 const ARMSubtarget *Subtarget) { 10182 if (!Subtarget->hasNEON()) 10183 return SDValue(); 10184 10185 SDValue Op = N->getOperand(0); 10186 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 10187 Op.getOpcode() != ISD::FMUL) 10188 return SDValue(); 10189 10190 SDValue ConstVec = Op->getOperand(1); 10191 if (!isa<BuildVectorSDNode>(ConstVec)) 10192 return SDValue(); 10193 10194 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 10195 uint32_t FloatBits = FloatTy.getSizeInBits(); 10196 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 10197 uint32_t IntBits = IntTy.getSizeInBits(); 10198 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10199 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10200 // These instructions only exist converting from f32 to i32. We can handle 10201 // smaller integers by generating an extra truncate, but larger ones would 10202 // be lossy. We also can't handle more then 4 lanes, since these intructions 10203 // only support v2i32/v4i32 types. 10204 return SDValue(); 10205 } 10206 10207 BitVector UndefElements; 10208 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10209 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10210 if (C == -1 || C == 0 || C > 32) 10211 return SDValue(); 10212 10213 SDLoc dl(N); 10214 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 10215 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 10216 Intrinsic::arm_neon_vcvtfp2fxu; 10217 SDValue FixConv = DAG.getNode( 10218 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10219 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 10220 DAG.getConstant(C, dl, MVT::i32)); 10221 10222 if (IntBits < FloatBits) 10223 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 10224 10225 return FixConv; 10226 } 10227 10228 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 10229 /// can replace combinations of VCVT (integer to floating-point) and VDIV 10230 /// when the VDIV has a constant operand that is a power of 2. 10231 /// 10232 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10233 /// vcvt.f32.s32 d16, d16 10234 /// vdiv.f32 d16, d17, d16 10235 /// becomes: 10236 /// vcvt.f32.s32 d16, d16, #3 10237 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 10238 const ARMSubtarget *Subtarget) { 10239 if (!Subtarget->hasNEON()) 10240 return SDValue(); 10241 10242 SDValue Op = N->getOperand(0); 10243 unsigned OpOpcode = Op.getNode()->getOpcode(); 10244 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 10245 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 10246 return SDValue(); 10247 10248 SDValue ConstVec = N->getOperand(1); 10249 if (!isa<BuildVectorSDNode>(ConstVec)) 10250 return SDValue(); 10251 10252 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 10253 uint32_t FloatBits = FloatTy.getSizeInBits(); 10254 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 10255 uint32_t IntBits = IntTy.getSizeInBits(); 10256 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10257 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10258 // These instructions only exist converting from i32 to f32. We can handle 10259 // smaller integers by generating an extra extend, but larger ones would 10260 // be lossy. We also can't handle more then 4 lanes, since these intructions 10261 // only support v2i32/v4i32 types. 10262 return SDValue(); 10263 } 10264 10265 BitVector UndefElements; 10266 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10267 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10268 if (C == -1 || C == 0 || C > 32) 10269 return SDValue(); 10270 10271 SDLoc dl(N); 10272 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 10273 SDValue ConvInput = Op.getOperand(0); 10274 if (IntBits < FloatBits) 10275 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 10276 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10277 ConvInput); 10278 10279 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 10280 Intrinsic::arm_neon_vcvtfxu2fp; 10281 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 10282 Op.getValueType(), 10283 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 10284 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 10285 } 10286 10287 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 10288 /// operand of a vector shift operation, where all the elements of the 10289 /// build_vector must have the same constant integer value. 10290 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 10291 // Ignore bit_converts. 10292 while (Op.getOpcode() == ISD::BITCAST) 10293 Op = Op.getOperand(0); 10294 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 10295 APInt SplatBits, SplatUndef; 10296 unsigned SplatBitSize; 10297 bool HasAnyUndefs; 10298 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 10299 HasAnyUndefs, ElementBits) || 10300 SplatBitSize > ElementBits) 10301 return false; 10302 Cnt = SplatBits.getSExtValue(); 10303 return true; 10304 } 10305 10306 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 10307 /// operand of a vector shift left operation. That value must be in the range: 10308 /// 0 <= Value < ElementBits for a left shift; or 10309 /// 0 <= Value <= ElementBits for a long left shift. 10310 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 10311 assert(VT.isVector() && "vector shift count is not a vector type"); 10312 int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); 10313 if (! getVShiftImm(Op, ElementBits, Cnt)) 10314 return false; 10315 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 10316 } 10317 10318 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 10319 /// operand of a vector shift right operation. For a shift opcode, the value 10320 /// is positive, but for an intrinsic the value count must be negative. The 10321 /// absolute value must be in the range: 10322 /// 1 <= |Value| <= ElementBits for a right shift; or 10323 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 10324 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 10325 int64_t &Cnt) { 10326 assert(VT.isVector() && "vector shift count is not a vector type"); 10327 int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); 10328 if (! getVShiftImm(Op, ElementBits, Cnt)) 10329 return false; 10330 if (!isIntrinsic) 10331 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 10332 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { 10333 Cnt = -Cnt; 10334 return true; 10335 } 10336 return false; 10337 } 10338 10339 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 10340 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 10341 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10342 switch (IntNo) { 10343 default: 10344 // Don't do anything for most intrinsics. 10345 break; 10346 10347 // Vector shifts: check for immediate versions and lower them. 10348 // Note: This is done during DAG combining instead of DAG legalizing because 10349 // the build_vectors for 64-bit vector element shift counts are generally 10350 // not legal, and it is hard to see their values after they get legalized to 10351 // loads from a constant pool. 10352 case Intrinsic::arm_neon_vshifts: 10353 case Intrinsic::arm_neon_vshiftu: 10354 case Intrinsic::arm_neon_vrshifts: 10355 case Intrinsic::arm_neon_vrshiftu: 10356 case Intrinsic::arm_neon_vrshiftn: 10357 case Intrinsic::arm_neon_vqshifts: 10358 case Intrinsic::arm_neon_vqshiftu: 10359 case Intrinsic::arm_neon_vqshiftsu: 10360 case Intrinsic::arm_neon_vqshiftns: 10361 case Intrinsic::arm_neon_vqshiftnu: 10362 case Intrinsic::arm_neon_vqshiftnsu: 10363 case Intrinsic::arm_neon_vqrshiftns: 10364 case Intrinsic::arm_neon_vqrshiftnu: 10365 case Intrinsic::arm_neon_vqrshiftnsu: { 10366 EVT VT = N->getOperand(1).getValueType(); 10367 int64_t Cnt; 10368 unsigned VShiftOpc = 0; 10369 10370 switch (IntNo) { 10371 case Intrinsic::arm_neon_vshifts: 10372 case Intrinsic::arm_neon_vshiftu: 10373 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 10374 VShiftOpc = ARMISD::VSHL; 10375 break; 10376 } 10377 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 10378 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 10379 ARMISD::VSHRs : ARMISD::VSHRu); 10380 break; 10381 } 10382 return SDValue(); 10383 10384 case Intrinsic::arm_neon_vrshifts: 10385 case Intrinsic::arm_neon_vrshiftu: 10386 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 10387 break; 10388 return SDValue(); 10389 10390 case Intrinsic::arm_neon_vqshifts: 10391 case Intrinsic::arm_neon_vqshiftu: 10392 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10393 break; 10394 return SDValue(); 10395 10396 case Intrinsic::arm_neon_vqshiftsu: 10397 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10398 break; 10399 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 10400 10401 case Intrinsic::arm_neon_vrshiftn: 10402 case Intrinsic::arm_neon_vqshiftns: 10403 case Intrinsic::arm_neon_vqshiftnu: 10404 case Intrinsic::arm_neon_vqshiftnsu: 10405 case Intrinsic::arm_neon_vqrshiftns: 10406 case Intrinsic::arm_neon_vqrshiftnu: 10407 case Intrinsic::arm_neon_vqrshiftnsu: 10408 // Narrowing shifts require an immediate right shift. 10409 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 10410 break; 10411 llvm_unreachable("invalid shift count for narrowing vector shift " 10412 "intrinsic"); 10413 10414 default: 10415 llvm_unreachable("unhandled vector shift"); 10416 } 10417 10418 switch (IntNo) { 10419 case Intrinsic::arm_neon_vshifts: 10420 case Intrinsic::arm_neon_vshiftu: 10421 // Opcode already set above. 10422 break; 10423 case Intrinsic::arm_neon_vrshifts: 10424 VShiftOpc = ARMISD::VRSHRs; break; 10425 case Intrinsic::arm_neon_vrshiftu: 10426 VShiftOpc = ARMISD::VRSHRu; break; 10427 case Intrinsic::arm_neon_vrshiftn: 10428 VShiftOpc = ARMISD::VRSHRN; break; 10429 case Intrinsic::arm_neon_vqshifts: 10430 VShiftOpc = ARMISD::VQSHLs; break; 10431 case Intrinsic::arm_neon_vqshiftu: 10432 VShiftOpc = ARMISD::VQSHLu; break; 10433 case Intrinsic::arm_neon_vqshiftsu: 10434 VShiftOpc = ARMISD::VQSHLsu; break; 10435 case Intrinsic::arm_neon_vqshiftns: 10436 VShiftOpc = ARMISD::VQSHRNs; break; 10437 case Intrinsic::arm_neon_vqshiftnu: 10438 VShiftOpc = ARMISD::VQSHRNu; break; 10439 case Intrinsic::arm_neon_vqshiftnsu: 10440 VShiftOpc = ARMISD::VQSHRNsu; break; 10441 case Intrinsic::arm_neon_vqrshiftns: 10442 VShiftOpc = ARMISD::VQRSHRNs; break; 10443 case Intrinsic::arm_neon_vqrshiftnu: 10444 VShiftOpc = ARMISD::VQRSHRNu; break; 10445 case Intrinsic::arm_neon_vqrshiftnsu: 10446 VShiftOpc = ARMISD::VQRSHRNsu; break; 10447 } 10448 10449 SDLoc dl(N); 10450 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 10451 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 10452 } 10453 10454 case Intrinsic::arm_neon_vshiftins: { 10455 EVT VT = N->getOperand(1).getValueType(); 10456 int64_t Cnt; 10457 unsigned VShiftOpc = 0; 10458 10459 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 10460 VShiftOpc = ARMISD::VSLI; 10461 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 10462 VShiftOpc = ARMISD::VSRI; 10463 else { 10464 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 10465 } 10466 10467 SDLoc dl(N); 10468 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 10469 N->getOperand(1), N->getOperand(2), 10470 DAG.getConstant(Cnt, dl, MVT::i32)); 10471 } 10472 10473 case Intrinsic::arm_neon_vqrshifts: 10474 case Intrinsic::arm_neon_vqrshiftu: 10475 // No immediate versions of these to check for. 10476 break; 10477 } 10478 10479 return SDValue(); 10480 } 10481 10482 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 10483 /// lowers them. As with the vector shift intrinsics, this is done during DAG 10484 /// combining instead of DAG legalizing because the build_vectors for 64-bit 10485 /// vector element shift counts are generally not legal, and it is hard to see 10486 /// their values after they get legalized to loads from a constant pool. 10487 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 10488 const ARMSubtarget *ST) { 10489 EVT VT = N->getValueType(0); 10490 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 10491 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 10492 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 10493 SDValue N1 = N->getOperand(1); 10494 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 10495 SDValue N0 = N->getOperand(0); 10496 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 10497 DAG.MaskedValueIsZero(N0.getOperand(0), 10498 APInt::getHighBitsSet(32, 16))) 10499 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 10500 } 10501 } 10502 10503 // Nothing to be done for scalar shifts. 10504 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10505 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 10506 return SDValue(); 10507 10508 assert(ST->hasNEON() && "unexpected vector shift"); 10509 int64_t Cnt; 10510 10511 switch (N->getOpcode()) { 10512 default: llvm_unreachable("unexpected shift opcode"); 10513 10514 case ISD::SHL: 10515 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 10516 SDLoc dl(N); 10517 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), 10518 DAG.getConstant(Cnt, dl, MVT::i32)); 10519 } 10520 break; 10521 10522 case ISD::SRA: 10523 case ISD::SRL: 10524 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 10525 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 10526 ARMISD::VSHRs : ARMISD::VSHRu); 10527 SDLoc dl(N); 10528 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 10529 DAG.getConstant(Cnt, dl, MVT::i32)); 10530 } 10531 } 10532 return SDValue(); 10533 } 10534 10535 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 10536 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 10537 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 10538 const ARMSubtarget *ST) { 10539 SDValue N0 = N->getOperand(0); 10540 10541 // Check for sign- and zero-extensions of vector extract operations of 8- 10542 // and 16-bit vector elements. NEON supports these directly. They are 10543 // handled during DAG combining because type legalization will promote them 10544 // to 32-bit types and it is messy to recognize the operations after that. 10545 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 10546 SDValue Vec = N0.getOperand(0); 10547 SDValue Lane = N0.getOperand(1); 10548 EVT VT = N->getValueType(0); 10549 EVT EltVT = N0.getValueType(); 10550 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10551 10552 if (VT == MVT::i32 && 10553 (EltVT == MVT::i8 || EltVT == MVT::i16) && 10554 TLI.isTypeLegal(Vec.getValueType()) && 10555 isa<ConstantSDNode>(Lane)) { 10556 10557 unsigned Opc = 0; 10558 switch (N->getOpcode()) { 10559 default: llvm_unreachable("unexpected opcode"); 10560 case ISD::SIGN_EXTEND: 10561 Opc = ARMISD::VGETLANEs; 10562 break; 10563 case ISD::ZERO_EXTEND: 10564 case ISD::ANY_EXTEND: 10565 Opc = ARMISD::VGETLANEu; 10566 break; 10567 } 10568 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 10569 } 10570 } 10571 10572 return SDValue(); 10573 } 10574 10575 static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero, 10576 APInt &KnownOne) { 10577 if (Op.getOpcode() == ARMISD::BFI) { 10578 // Conservatively, we can recurse down the first operand 10579 // and just mask out all affected bits. 10580 computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne); 10581 10582 // The operand to BFI is already a mask suitable for removing the bits it 10583 // sets. 10584 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 10585 APInt Mask = CI->getAPIntValue(); 10586 KnownZero &= Mask; 10587 KnownOne &= Mask; 10588 return; 10589 } 10590 if (Op.getOpcode() == ARMISD::CMOV) { 10591 APInt KZ2(KnownZero.getBitWidth(), 0); 10592 APInt KO2(KnownOne.getBitWidth(), 0); 10593 computeKnownBits(DAG, Op.getOperand(1), KnownZero, KnownOne); 10594 computeKnownBits(DAG, Op.getOperand(2), KZ2, KO2); 10595 10596 KnownZero &= KZ2; 10597 KnownOne &= KO2; 10598 return; 10599 } 10600 return DAG.computeKnownBits(Op, KnownZero, KnownOne); 10601 } 10602 10603 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 10604 // If we have a CMOV, OR and AND combination such as: 10605 // if (x & CN) 10606 // y |= CM; 10607 // 10608 // And: 10609 // * CN is a single bit; 10610 // * All bits covered by CM are known zero in y 10611 // 10612 // Then we can convert this into a sequence of BFI instructions. This will 10613 // always be a win if CM is a single bit, will always be no worse than the 10614 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 10615 // three bits (due to the extra IT instruction). 10616 10617 SDValue Op0 = CMOV->getOperand(0); 10618 SDValue Op1 = CMOV->getOperand(1); 10619 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 10620 auto CC = CCNode->getAPIntValue().getLimitedValue(); 10621 SDValue CmpZ = CMOV->getOperand(4); 10622 10623 // The compare must be against zero. 10624 if (!isNullConstant(CmpZ->getOperand(1))) 10625 return SDValue(); 10626 10627 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 10628 SDValue And = CmpZ->getOperand(0); 10629 if (And->getOpcode() != ISD::AND) 10630 return SDValue(); 10631 ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1)); 10632 if (!AndC || !AndC->getAPIntValue().isPowerOf2()) 10633 return SDValue(); 10634 SDValue X = And->getOperand(0); 10635 10636 if (CC == ARMCC::EQ) { 10637 // We're performing an "equal to zero" compare. Swap the operands so we 10638 // canonicalize on a "not equal to zero" compare. 10639 std::swap(Op0, Op1); 10640 } else { 10641 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 10642 } 10643 10644 if (Op1->getOpcode() != ISD::OR) 10645 return SDValue(); 10646 10647 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 10648 if (!OrC) 10649 return SDValue(); 10650 SDValue Y = Op1->getOperand(0); 10651 10652 if (Op0 != Y) 10653 return SDValue(); 10654 10655 // Now, is it profitable to continue? 10656 APInt OrCI = OrC->getAPIntValue(); 10657 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 10658 if (OrCI.countPopulation() > Heuristic) 10659 return SDValue(); 10660 10661 // Lastly, can we determine that the bits defined by OrCI 10662 // are zero in Y? 10663 APInt KnownZero, KnownOne; 10664 computeKnownBits(DAG, Y, KnownZero, KnownOne); 10665 if ((OrCI & KnownZero) != OrCI) 10666 return SDValue(); 10667 10668 // OK, we can do the combine. 10669 SDValue V = Y; 10670 SDLoc dl(X); 10671 EVT VT = X.getValueType(); 10672 unsigned BitInX = AndC->getAPIntValue().logBase2(); 10673 10674 if (BitInX != 0) { 10675 // We must shift X first. 10676 X = DAG.getNode(ISD::SRL, dl, VT, X, 10677 DAG.getConstant(BitInX, dl, VT)); 10678 } 10679 10680 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 10681 BitInY < NumActiveBits; ++BitInY) { 10682 if (OrCI[BitInY] == 0) 10683 continue; 10684 APInt Mask(VT.getSizeInBits(), 0); 10685 Mask.setBit(BitInY); 10686 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 10687 // Confusingly, the operand is an *inverted* mask. 10688 DAG.getConstant(~Mask, dl, VT)); 10689 } 10690 10691 return V; 10692 } 10693 10694 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 10695 SDValue 10696 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 10697 SDValue Cmp = N->getOperand(4); 10698 if (Cmp.getOpcode() != ARMISD::CMPZ) 10699 // Only looking at NE cases. 10700 return SDValue(); 10701 10702 EVT VT = N->getValueType(0); 10703 SDLoc dl(N); 10704 SDValue LHS = Cmp.getOperand(0); 10705 SDValue RHS = Cmp.getOperand(1); 10706 SDValue Chain = N->getOperand(0); 10707 SDValue BB = N->getOperand(1); 10708 SDValue ARMcc = N->getOperand(2); 10709 ARMCC::CondCodes CC = 10710 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 10711 10712 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 10713 // -> (brcond Chain BB CC CPSR Cmp) 10714 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 10715 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 10716 LHS->getOperand(0)->hasOneUse()) { 10717 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 10718 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 10719 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 10720 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 10721 if ((LHS00C && LHS00C->getZExtValue() == 0) && 10722 (LHS01C && LHS01C->getZExtValue() == 1) && 10723 (LHS1C && LHS1C->getZExtValue() == 1) && 10724 (RHSC && RHSC->getZExtValue() == 0)) { 10725 return DAG.getNode( 10726 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 10727 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 10728 } 10729 } 10730 10731 return SDValue(); 10732 } 10733 10734 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 10735 SDValue 10736 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 10737 SDValue Cmp = N->getOperand(4); 10738 if (Cmp.getOpcode() != ARMISD::CMPZ) 10739 // Only looking at EQ and NE cases. 10740 return SDValue(); 10741 10742 EVT VT = N->getValueType(0); 10743 SDLoc dl(N); 10744 SDValue LHS = Cmp.getOperand(0); 10745 SDValue RHS = Cmp.getOperand(1); 10746 SDValue FalseVal = N->getOperand(0); 10747 SDValue TrueVal = N->getOperand(1); 10748 SDValue ARMcc = N->getOperand(2); 10749 ARMCC::CondCodes CC = 10750 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 10751 10752 // BFI is only available on V6T2+. 10753 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 10754 SDValue R = PerformCMOVToBFICombine(N, DAG); 10755 if (R) 10756 return R; 10757 } 10758 10759 // Simplify 10760 // mov r1, r0 10761 // cmp r1, x 10762 // mov r0, y 10763 // moveq r0, x 10764 // to 10765 // cmp r0, x 10766 // movne r0, y 10767 // 10768 // mov r1, r0 10769 // cmp r1, x 10770 // mov r0, x 10771 // movne r0, y 10772 // to 10773 // cmp r0, x 10774 // movne r0, y 10775 /// FIXME: Turn this into a target neutral optimization? 10776 SDValue Res; 10777 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 10778 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 10779 N->getOperand(3), Cmp); 10780 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 10781 SDValue ARMcc; 10782 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 10783 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 10784 N->getOperand(3), NewCmp); 10785 } 10786 10787 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 10788 // -> (cmov F T CC CPSR Cmp) 10789 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 10790 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 10791 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 10792 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 10793 if ((LHS0C && LHS0C->getZExtValue() == 0) && 10794 (LHS1C && LHS1C->getZExtValue() == 1) && 10795 (RHSC && RHSC->getZExtValue() == 0)) { 10796 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 10797 LHS->getOperand(2), LHS->getOperand(3), 10798 LHS->getOperand(4)); 10799 } 10800 } 10801 10802 if (Res.getNode()) { 10803 APInt KnownZero, KnownOne; 10804 DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne); 10805 // Capture demanded bits information that would be otherwise lost. 10806 if (KnownZero == 0xfffffffe) 10807 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10808 DAG.getValueType(MVT::i1)); 10809 else if (KnownZero == 0xffffff00) 10810 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10811 DAG.getValueType(MVT::i8)); 10812 else if (KnownZero == 0xffff0000) 10813 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10814 DAG.getValueType(MVT::i16)); 10815 } 10816 10817 return Res; 10818 } 10819 10820 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 10821 DAGCombinerInfo &DCI) const { 10822 switch (N->getOpcode()) { 10823 default: break; 10824 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 10825 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 10826 case ISD::SUB: return PerformSUBCombine(N, DCI); 10827 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 10828 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 10829 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 10830 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 10831 case ARMISD::BFI: return PerformBFICombine(N, DCI); 10832 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 10833 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 10834 case ISD::STORE: return PerformSTORECombine(N, DCI); 10835 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 10836 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 10837 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 10838 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 10839 case ISD::FP_TO_SINT: 10840 case ISD::FP_TO_UINT: 10841 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 10842 case ISD::FDIV: 10843 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 10844 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 10845 case ISD::SHL: 10846 case ISD::SRA: 10847 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 10848 case ISD::SIGN_EXTEND: 10849 case ISD::ZERO_EXTEND: 10850 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 10851 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 10852 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 10853 case ISD::LOAD: return PerformLOADCombine(N, DCI); 10854 case ARMISD::VLD2DUP: 10855 case ARMISD::VLD3DUP: 10856 case ARMISD::VLD4DUP: 10857 return PerformVLDCombine(N, DCI); 10858 case ARMISD::BUILD_VECTOR: 10859 return PerformARMBUILD_VECTORCombine(N, DCI); 10860 case ISD::INTRINSIC_VOID: 10861 case ISD::INTRINSIC_W_CHAIN: 10862 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10863 case Intrinsic::arm_neon_vld1: 10864 case Intrinsic::arm_neon_vld2: 10865 case Intrinsic::arm_neon_vld3: 10866 case Intrinsic::arm_neon_vld4: 10867 case Intrinsic::arm_neon_vld2lane: 10868 case Intrinsic::arm_neon_vld3lane: 10869 case Intrinsic::arm_neon_vld4lane: 10870 case Intrinsic::arm_neon_vst1: 10871 case Intrinsic::arm_neon_vst2: 10872 case Intrinsic::arm_neon_vst3: 10873 case Intrinsic::arm_neon_vst4: 10874 case Intrinsic::arm_neon_vst2lane: 10875 case Intrinsic::arm_neon_vst3lane: 10876 case Intrinsic::arm_neon_vst4lane: 10877 return PerformVLDCombine(N, DCI); 10878 default: break; 10879 } 10880 break; 10881 } 10882 return SDValue(); 10883 } 10884 10885 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 10886 EVT VT) const { 10887 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 10888 } 10889 10890 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 10891 unsigned, 10892 unsigned, 10893 bool *Fast) const { 10894 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 10895 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 10896 10897 switch (VT.getSimpleVT().SimpleTy) { 10898 default: 10899 return false; 10900 case MVT::i8: 10901 case MVT::i16: 10902 case MVT::i32: { 10903 // Unaligned access can use (for example) LRDB, LRDH, LDR 10904 if (AllowsUnaligned) { 10905 if (Fast) 10906 *Fast = Subtarget->hasV7Ops(); 10907 return true; 10908 } 10909 return false; 10910 } 10911 case MVT::f64: 10912 case MVT::v2f64: { 10913 // For any little-endian targets with neon, we can support unaligned ld/st 10914 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 10915 // A big-endian target may also explicitly support unaligned accesses 10916 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 10917 if (Fast) 10918 *Fast = true; 10919 return true; 10920 } 10921 return false; 10922 } 10923 } 10924 } 10925 10926 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 10927 unsigned AlignCheck) { 10928 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 10929 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 10930 } 10931 10932 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 10933 unsigned DstAlign, unsigned SrcAlign, 10934 bool IsMemset, bool ZeroMemset, 10935 bool MemcpyStrSrc, 10936 MachineFunction &MF) const { 10937 const Function *F = MF.getFunction(); 10938 10939 // See if we can use NEON instructions for this... 10940 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && 10941 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10942 bool Fast; 10943 if (Size >= 16 && 10944 (memOpAlign(SrcAlign, DstAlign, 16) || 10945 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { 10946 return MVT::v2f64; 10947 } else if (Size >= 8 && 10948 (memOpAlign(SrcAlign, DstAlign, 8) || 10949 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && 10950 Fast))) { 10951 return MVT::f64; 10952 } 10953 } 10954 10955 // Lowering to i32/i16 if the size permits. 10956 if (Size >= 4) 10957 return MVT::i32; 10958 else if (Size >= 2) 10959 return MVT::i16; 10960 10961 // Let the target-independent logic figure it out. 10962 return MVT::Other; 10963 } 10964 10965 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 10966 if (Val.getOpcode() != ISD::LOAD) 10967 return false; 10968 10969 EVT VT1 = Val.getValueType(); 10970 if (!VT1.isSimple() || !VT1.isInteger() || 10971 !VT2.isSimple() || !VT2.isInteger()) 10972 return false; 10973 10974 switch (VT1.getSimpleVT().SimpleTy) { 10975 default: break; 10976 case MVT::i1: 10977 case MVT::i8: 10978 case MVT::i16: 10979 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 10980 return true; 10981 } 10982 10983 return false; 10984 } 10985 10986 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 10987 EVT VT = ExtVal.getValueType(); 10988 10989 if (!isTypeLegal(VT)) 10990 return false; 10991 10992 // Don't create a loadext if we can fold the extension into a wide/long 10993 // instruction. 10994 // If there's more than one user instruction, the loadext is desirable no 10995 // matter what. There can be two uses by the same instruction. 10996 if (ExtVal->use_empty() || 10997 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 10998 return true; 10999 11000 SDNode *U = *ExtVal->use_begin(); 11001 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 11002 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) 11003 return false; 11004 11005 return true; 11006 } 11007 11008 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 11009 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11010 return false; 11011 11012 if (!isTypeLegal(EVT::getEVT(Ty1))) 11013 return false; 11014 11015 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 11016 11017 // Assuming the caller doesn't have a zeroext or signext return parameter, 11018 // truncation all the way down to i1 is valid. 11019 return true; 11020 } 11021 11022 11023 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 11024 if (V < 0) 11025 return false; 11026 11027 unsigned Scale = 1; 11028 switch (VT.getSimpleVT().SimpleTy) { 11029 default: return false; 11030 case MVT::i1: 11031 case MVT::i8: 11032 // Scale == 1; 11033 break; 11034 case MVT::i16: 11035 // Scale == 2; 11036 Scale = 2; 11037 break; 11038 case MVT::i32: 11039 // Scale == 4; 11040 Scale = 4; 11041 break; 11042 } 11043 11044 if ((V & (Scale - 1)) != 0) 11045 return false; 11046 V /= Scale; 11047 return V == (V & ((1LL << 5) - 1)); 11048 } 11049 11050 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 11051 const ARMSubtarget *Subtarget) { 11052 bool isNeg = false; 11053 if (V < 0) { 11054 isNeg = true; 11055 V = - V; 11056 } 11057 11058 switch (VT.getSimpleVT().SimpleTy) { 11059 default: return false; 11060 case MVT::i1: 11061 case MVT::i8: 11062 case MVT::i16: 11063 case MVT::i32: 11064 // + imm12 or - imm8 11065 if (isNeg) 11066 return V == (V & ((1LL << 8) - 1)); 11067 return V == (V & ((1LL << 12) - 1)); 11068 case MVT::f32: 11069 case MVT::f64: 11070 // Same as ARM mode. FIXME: NEON? 11071 if (!Subtarget->hasVFP2()) 11072 return false; 11073 if ((V & 3) != 0) 11074 return false; 11075 V >>= 2; 11076 return V == (V & ((1LL << 8) - 1)); 11077 } 11078 } 11079 11080 /// isLegalAddressImmediate - Return true if the integer value can be used 11081 /// as the offset of the target addressing mode for load / store of the 11082 /// given type. 11083 static bool isLegalAddressImmediate(int64_t V, EVT VT, 11084 const ARMSubtarget *Subtarget) { 11085 if (V == 0) 11086 return true; 11087 11088 if (!VT.isSimple()) 11089 return false; 11090 11091 if (Subtarget->isThumb1Only()) 11092 return isLegalT1AddressImmediate(V, VT); 11093 else if (Subtarget->isThumb2()) 11094 return isLegalT2AddressImmediate(V, VT, Subtarget); 11095 11096 // ARM mode. 11097 if (V < 0) 11098 V = - V; 11099 switch (VT.getSimpleVT().SimpleTy) { 11100 default: return false; 11101 case MVT::i1: 11102 case MVT::i8: 11103 case MVT::i32: 11104 // +- imm12 11105 return V == (V & ((1LL << 12) - 1)); 11106 case MVT::i16: 11107 // +- imm8 11108 return V == (V & ((1LL << 8) - 1)); 11109 case MVT::f32: 11110 case MVT::f64: 11111 if (!Subtarget->hasVFP2()) // FIXME: NEON? 11112 return false; 11113 if ((V & 3) != 0) 11114 return false; 11115 V >>= 2; 11116 return V == (V & ((1LL << 8) - 1)); 11117 } 11118 } 11119 11120 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 11121 EVT VT) const { 11122 int Scale = AM.Scale; 11123 if (Scale < 0) 11124 return false; 11125 11126 switch (VT.getSimpleVT().SimpleTy) { 11127 default: return false; 11128 case MVT::i1: 11129 case MVT::i8: 11130 case MVT::i16: 11131 case MVT::i32: 11132 if (Scale == 1) 11133 return true; 11134 // r + r << imm 11135 Scale = Scale & ~1; 11136 return Scale == 2 || Scale == 4 || Scale == 8; 11137 case MVT::i64: 11138 // r + r 11139 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11140 return true; 11141 return false; 11142 case MVT::isVoid: 11143 // Note, we allow "void" uses (basically, uses that aren't loads or 11144 // stores), because arm allows folding a scale into many arithmetic 11145 // operations. This should be made more precise and revisited later. 11146 11147 // Allow r << imm, but the imm has to be a multiple of two. 11148 if (Scale & 1) return false; 11149 return isPowerOf2_32(Scale); 11150 } 11151 } 11152 11153 /// isLegalAddressingMode - Return true if the addressing mode represented 11154 /// by AM is legal for this target, for a load/store of the specified type. 11155 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11156 const AddrMode &AM, Type *Ty, 11157 unsigned AS) const { 11158 EVT VT = getValueType(DL, Ty, true); 11159 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 11160 return false; 11161 11162 // Can never fold addr of global into load/store. 11163 if (AM.BaseGV) 11164 return false; 11165 11166 switch (AM.Scale) { 11167 case 0: // no scale reg, must be "r+i" or "r", or "i". 11168 break; 11169 case 1: 11170 if (Subtarget->isThumb1Only()) 11171 return false; 11172 // FALL THROUGH. 11173 default: 11174 // ARM doesn't support any R+R*scale+imm addr modes. 11175 if (AM.BaseOffs) 11176 return false; 11177 11178 if (!VT.isSimple()) 11179 return false; 11180 11181 if (Subtarget->isThumb2()) 11182 return isLegalT2ScaledAddressingMode(AM, VT); 11183 11184 int Scale = AM.Scale; 11185 switch (VT.getSimpleVT().SimpleTy) { 11186 default: return false; 11187 case MVT::i1: 11188 case MVT::i8: 11189 case MVT::i32: 11190 if (Scale < 0) Scale = -Scale; 11191 if (Scale == 1) 11192 return true; 11193 // r + r << imm 11194 return isPowerOf2_32(Scale & ~1); 11195 case MVT::i16: 11196 case MVT::i64: 11197 // r + r 11198 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11199 return true; 11200 return false; 11201 11202 case MVT::isVoid: 11203 // Note, we allow "void" uses (basically, uses that aren't loads or 11204 // stores), because arm allows folding a scale into many arithmetic 11205 // operations. This should be made more precise and revisited later. 11206 11207 // Allow r << imm, but the imm has to be a multiple of two. 11208 if (Scale & 1) return false; 11209 return isPowerOf2_32(Scale); 11210 } 11211 } 11212 return true; 11213 } 11214 11215 /// isLegalICmpImmediate - Return true if the specified immediate is legal 11216 /// icmp immediate, that is the target has icmp instructions which can compare 11217 /// a register against the immediate without having to materialize the 11218 /// immediate into a register. 11219 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11220 // Thumb2 and ARM modes can use cmn for negative immediates. 11221 if (!Subtarget->isThumb()) 11222 return ARM_AM::getSOImmVal(std::abs(Imm)) != -1; 11223 if (Subtarget->isThumb2()) 11224 return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1; 11225 // Thumb1 doesn't have cmn, and only 8-bit immediates. 11226 return Imm >= 0 && Imm <= 255; 11227 } 11228 11229 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 11230 /// *or sub* immediate, that is the target has add or sub instructions which can 11231 /// add a register with the immediate without having to materialize the 11232 /// immediate into a register. 11233 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11234 // Same encoding for add/sub, just flip the sign. 11235 int64_t AbsImm = std::abs(Imm); 11236 if (!Subtarget->isThumb()) 11237 return ARM_AM::getSOImmVal(AbsImm) != -1; 11238 if (Subtarget->isThumb2()) 11239 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 11240 // Thumb1 only has 8-bit unsigned immediate. 11241 return AbsImm >= 0 && AbsImm <= 255; 11242 } 11243 11244 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 11245 bool isSEXTLoad, SDValue &Base, 11246 SDValue &Offset, bool &isInc, 11247 SelectionDAG &DAG) { 11248 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11249 return false; 11250 11251 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 11252 // AddressingMode 3 11253 Base = Ptr->getOperand(0); 11254 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11255 int RHSC = (int)RHS->getZExtValue(); 11256 if (RHSC < 0 && RHSC > -256) { 11257 assert(Ptr->getOpcode() == ISD::ADD); 11258 isInc = false; 11259 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11260 return true; 11261 } 11262 } 11263 isInc = (Ptr->getOpcode() == ISD::ADD); 11264 Offset = Ptr->getOperand(1); 11265 return true; 11266 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 11267 // AddressingMode 2 11268 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11269 int RHSC = (int)RHS->getZExtValue(); 11270 if (RHSC < 0 && RHSC > -0x1000) { 11271 assert(Ptr->getOpcode() == ISD::ADD); 11272 isInc = false; 11273 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11274 Base = Ptr->getOperand(0); 11275 return true; 11276 } 11277 } 11278 11279 if (Ptr->getOpcode() == ISD::ADD) { 11280 isInc = true; 11281 ARM_AM::ShiftOpc ShOpcVal= 11282 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 11283 if (ShOpcVal != ARM_AM::no_shift) { 11284 Base = Ptr->getOperand(1); 11285 Offset = Ptr->getOperand(0); 11286 } else { 11287 Base = Ptr->getOperand(0); 11288 Offset = Ptr->getOperand(1); 11289 } 11290 return true; 11291 } 11292 11293 isInc = (Ptr->getOpcode() == ISD::ADD); 11294 Base = Ptr->getOperand(0); 11295 Offset = Ptr->getOperand(1); 11296 return true; 11297 } 11298 11299 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 11300 return false; 11301 } 11302 11303 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 11304 bool isSEXTLoad, SDValue &Base, 11305 SDValue &Offset, bool &isInc, 11306 SelectionDAG &DAG) { 11307 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11308 return false; 11309 11310 Base = Ptr->getOperand(0); 11311 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11312 int RHSC = (int)RHS->getZExtValue(); 11313 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 11314 assert(Ptr->getOpcode() == ISD::ADD); 11315 isInc = false; 11316 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11317 return true; 11318 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 11319 isInc = Ptr->getOpcode() == ISD::ADD; 11320 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11321 return true; 11322 } 11323 } 11324 11325 return false; 11326 } 11327 11328 /// getPreIndexedAddressParts - returns true by value, base pointer and 11329 /// offset pointer and addressing mode by reference if the node's address 11330 /// can be legally represented as pre-indexed load / store address. 11331 bool 11332 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 11333 SDValue &Offset, 11334 ISD::MemIndexedMode &AM, 11335 SelectionDAG &DAG) const { 11336 if (Subtarget->isThumb1Only()) 11337 return false; 11338 11339 EVT VT; 11340 SDValue Ptr; 11341 bool isSEXTLoad = false; 11342 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11343 Ptr = LD->getBasePtr(); 11344 VT = LD->getMemoryVT(); 11345 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11346 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11347 Ptr = ST->getBasePtr(); 11348 VT = ST->getMemoryVT(); 11349 } else 11350 return false; 11351 11352 bool isInc; 11353 bool isLegal = false; 11354 if (Subtarget->isThumb2()) 11355 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11356 Offset, isInc, DAG); 11357 else 11358 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11359 Offset, isInc, DAG); 11360 if (!isLegal) 11361 return false; 11362 11363 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 11364 return true; 11365 } 11366 11367 /// getPostIndexedAddressParts - returns true by value, base pointer and 11368 /// offset pointer and addressing mode by reference if this node can be 11369 /// combined with a load / store to form a post-indexed load / store. 11370 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 11371 SDValue &Base, 11372 SDValue &Offset, 11373 ISD::MemIndexedMode &AM, 11374 SelectionDAG &DAG) const { 11375 if (Subtarget->isThumb1Only()) 11376 return false; 11377 11378 EVT VT; 11379 SDValue Ptr; 11380 bool isSEXTLoad = false; 11381 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11382 VT = LD->getMemoryVT(); 11383 Ptr = LD->getBasePtr(); 11384 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11385 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11386 VT = ST->getMemoryVT(); 11387 Ptr = ST->getBasePtr(); 11388 } else 11389 return false; 11390 11391 bool isInc; 11392 bool isLegal = false; 11393 if (Subtarget->isThumb2()) 11394 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11395 isInc, DAG); 11396 else 11397 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11398 isInc, DAG); 11399 if (!isLegal) 11400 return false; 11401 11402 if (Ptr != Base) { 11403 // Swap base ptr and offset to catch more post-index load / store when 11404 // it's legal. In Thumb2 mode, offset must be an immediate. 11405 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 11406 !Subtarget->isThumb2()) 11407 std::swap(Base, Offset); 11408 11409 // Post-indexed load / store update the base pointer. 11410 if (Ptr != Base) 11411 return false; 11412 } 11413 11414 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 11415 return true; 11416 } 11417 11418 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11419 APInt &KnownZero, 11420 APInt &KnownOne, 11421 const SelectionDAG &DAG, 11422 unsigned Depth) const { 11423 unsigned BitWidth = KnownOne.getBitWidth(); 11424 KnownZero = KnownOne = APInt(BitWidth, 0); 11425 switch (Op.getOpcode()) { 11426 default: break; 11427 case ARMISD::ADDC: 11428 case ARMISD::ADDE: 11429 case ARMISD::SUBC: 11430 case ARMISD::SUBE: 11431 // These nodes' second result is a boolean 11432 if (Op.getResNo() == 0) 11433 break; 11434 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 11435 break; 11436 case ARMISD::CMOV: { 11437 // Bits are known zero/one if known on the LHS and RHS. 11438 DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 11439 if (KnownZero == 0 && KnownOne == 0) return; 11440 11441 APInt KnownZeroRHS, KnownOneRHS; 11442 DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 11443 KnownZero &= KnownZeroRHS; 11444 KnownOne &= KnownOneRHS; 11445 return; 11446 } 11447 case ISD::INTRINSIC_W_CHAIN: { 11448 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 11449 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 11450 switch (IntID) { 11451 default: return; 11452 case Intrinsic::arm_ldaex: 11453 case Intrinsic::arm_ldrex: { 11454 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 11455 unsigned MemBits = VT.getScalarType().getSizeInBits(); 11456 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 11457 return; 11458 } 11459 } 11460 } 11461 } 11462 } 11463 11464 //===----------------------------------------------------------------------===// 11465 // ARM Inline Assembly Support 11466 //===----------------------------------------------------------------------===// 11467 11468 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 11469 // Looking for "rev" which is V6+. 11470 if (!Subtarget->hasV6Ops()) 11471 return false; 11472 11473 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 11474 std::string AsmStr = IA->getAsmString(); 11475 SmallVector<StringRef, 4> AsmPieces; 11476 SplitString(AsmStr, AsmPieces, ";\n"); 11477 11478 switch (AsmPieces.size()) { 11479 default: return false; 11480 case 1: 11481 AsmStr = AsmPieces[0]; 11482 AsmPieces.clear(); 11483 SplitString(AsmStr, AsmPieces, " \t,"); 11484 11485 // rev $0, $1 11486 if (AsmPieces.size() == 3 && 11487 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 11488 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 11489 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 11490 if (Ty && Ty->getBitWidth() == 32) 11491 return IntrinsicLowering::LowerToByteSwap(CI); 11492 } 11493 break; 11494 } 11495 11496 return false; 11497 } 11498 11499 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 11500 // At this point, we have to lower this constraint to something else, so we 11501 // lower it to an "r" or "w". However, by doing this we will force the result 11502 // to be in register, while the X constraint is much more permissive. 11503 // 11504 // Although we are correct (we are free to emit anything, without 11505 // constraints), we might break use cases that would expect us to be more 11506 // efficient and emit something else. 11507 if (!Subtarget->hasVFP2()) 11508 return "r"; 11509 if (ConstraintVT.isFloatingPoint()) 11510 return "w"; 11511 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 11512 (ConstraintVT.getSizeInBits() == 64 || 11513 ConstraintVT.getSizeInBits() == 128)) 11514 return "w"; 11515 11516 return "r"; 11517 } 11518 11519 /// getConstraintType - Given a constraint letter, return the type of 11520 /// constraint it is for this target. 11521 ARMTargetLowering::ConstraintType 11522 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 11523 if (Constraint.size() == 1) { 11524 switch (Constraint[0]) { 11525 default: break; 11526 case 'l': return C_RegisterClass; 11527 case 'w': return C_RegisterClass; 11528 case 'h': return C_RegisterClass; 11529 case 'x': return C_RegisterClass; 11530 case 't': return C_RegisterClass; 11531 case 'j': return C_Other; // Constant for movw. 11532 // An address with a single base register. Due to the way we 11533 // currently handle addresses it is the same as an 'r' memory constraint. 11534 case 'Q': return C_Memory; 11535 } 11536 } else if (Constraint.size() == 2) { 11537 switch (Constraint[0]) { 11538 default: break; 11539 // All 'U+' constraints are addresses. 11540 case 'U': return C_Memory; 11541 } 11542 } 11543 return TargetLowering::getConstraintType(Constraint); 11544 } 11545 11546 /// Examine constraint type and operand type and determine a weight value. 11547 /// This object must already have been set up with the operand type 11548 /// and the current alternative constraint selected. 11549 TargetLowering::ConstraintWeight 11550 ARMTargetLowering::getSingleConstraintMatchWeight( 11551 AsmOperandInfo &info, const char *constraint) const { 11552 ConstraintWeight weight = CW_Invalid; 11553 Value *CallOperandVal = info.CallOperandVal; 11554 // If we don't have a value, we can't do a match, 11555 // but allow it at the lowest weight. 11556 if (!CallOperandVal) 11557 return CW_Default; 11558 Type *type = CallOperandVal->getType(); 11559 // Look at the constraint type. 11560 switch (*constraint) { 11561 default: 11562 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11563 break; 11564 case 'l': 11565 if (type->isIntegerTy()) { 11566 if (Subtarget->isThumb()) 11567 weight = CW_SpecificReg; 11568 else 11569 weight = CW_Register; 11570 } 11571 break; 11572 case 'w': 11573 if (type->isFloatingPointTy()) 11574 weight = CW_Register; 11575 break; 11576 } 11577 return weight; 11578 } 11579 11580 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 11581 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 11582 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 11583 if (Constraint.size() == 1) { 11584 // GCC ARM Constraint Letters 11585 switch (Constraint[0]) { 11586 case 'l': // Low regs or general regs. 11587 if (Subtarget->isThumb()) 11588 return RCPair(0U, &ARM::tGPRRegClass); 11589 return RCPair(0U, &ARM::GPRRegClass); 11590 case 'h': // High regs or no regs. 11591 if (Subtarget->isThumb()) 11592 return RCPair(0U, &ARM::hGPRRegClass); 11593 break; 11594 case 'r': 11595 if (Subtarget->isThumb1Only()) 11596 return RCPair(0U, &ARM::tGPRRegClass); 11597 return RCPair(0U, &ARM::GPRRegClass); 11598 case 'w': 11599 if (VT == MVT::Other) 11600 break; 11601 if (VT == MVT::f32) 11602 return RCPair(0U, &ARM::SPRRegClass); 11603 if (VT.getSizeInBits() == 64) 11604 return RCPair(0U, &ARM::DPRRegClass); 11605 if (VT.getSizeInBits() == 128) 11606 return RCPair(0U, &ARM::QPRRegClass); 11607 break; 11608 case 'x': 11609 if (VT == MVT::Other) 11610 break; 11611 if (VT == MVT::f32) 11612 return RCPair(0U, &ARM::SPR_8RegClass); 11613 if (VT.getSizeInBits() == 64) 11614 return RCPair(0U, &ARM::DPR_8RegClass); 11615 if (VT.getSizeInBits() == 128) 11616 return RCPair(0U, &ARM::QPR_8RegClass); 11617 break; 11618 case 't': 11619 if (VT == MVT::f32) 11620 return RCPair(0U, &ARM::SPRRegClass); 11621 break; 11622 } 11623 } 11624 if (StringRef("{cc}").equals_lower(Constraint)) 11625 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 11626 11627 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11628 } 11629 11630 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11631 /// vector. If it is invalid, don't add anything to Ops. 11632 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11633 std::string &Constraint, 11634 std::vector<SDValue>&Ops, 11635 SelectionDAG &DAG) const { 11636 SDValue Result; 11637 11638 // Currently only support length 1 constraints. 11639 if (Constraint.length() != 1) return; 11640 11641 char ConstraintLetter = Constraint[0]; 11642 switch (ConstraintLetter) { 11643 default: break; 11644 case 'j': 11645 case 'I': case 'J': case 'K': case 'L': 11646 case 'M': case 'N': case 'O': 11647 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 11648 if (!C) 11649 return; 11650 11651 int64_t CVal64 = C->getSExtValue(); 11652 int CVal = (int) CVal64; 11653 // None of these constraints allow values larger than 32 bits. Check 11654 // that the value fits in an int. 11655 if (CVal != CVal64) 11656 return; 11657 11658 switch (ConstraintLetter) { 11659 case 'j': 11660 // Constant suitable for movw, must be between 0 and 11661 // 65535. 11662 if (Subtarget->hasV6T2Ops()) 11663 if (CVal >= 0 && CVal <= 65535) 11664 break; 11665 return; 11666 case 'I': 11667 if (Subtarget->isThumb1Only()) { 11668 // This must be a constant between 0 and 255, for ADD 11669 // immediates. 11670 if (CVal >= 0 && CVal <= 255) 11671 break; 11672 } else if (Subtarget->isThumb2()) { 11673 // A constant that can be used as an immediate value in a 11674 // data-processing instruction. 11675 if (ARM_AM::getT2SOImmVal(CVal) != -1) 11676 break; 11677 } else { 11678 // A constant that can be used as an immediate value in a 11679 // data-processing instruction. 11680 if (ARM_AM::getSOImmVal(CVal) != -1) 11681 break; 11682 } 11683 return; 11684 11685 case 'J': 11686 if (Subtarget->isThumb1Only()) { 11687 // This must be a constant between -255 and -1, for negated ADD 11688 // immediates. This can be used in GCC with an "n" modifier that 11689 // prints the negated value, for use with SUB instructions. It is 11690 // not useful otherwise but is implemented for compatibility. 11691 if (CVal >= -255 && CVal <= -1) 11692 break; 11693 } else { 11694 // This must be a constant between -4095 and 4095. It is not clear 11695 // what this constraint is intended for. Implemented for 11696 // compatibility with GCC. 11697 if (CVal >= -4095 && CVal <= 4095) 11698 break; 11699 } 11700 return; 11701 11702 case 'K': 11703 if (Subtarget->isThumb1Only()) { 11704 // A 32-bit value where only one byte has a nonzero value. Exclude 11705 // zero to match GCC. This constraint is used by GCC internally for 11706 // constants that can be loaded with a move/shift combination. 11707 // It is not useful otherwise but is implemented for compatibility. 11708 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 11709 break; 11710 } else if (Subtarget->isThumb2()) { 11711 // A constant whose bitwise inverse can be used as an immediate 11712 // value in a data-processing instruction. This can be used in GCC 11713 // with a "B" modifier that prints the inverted value, for use with 11714 // BIC and MVN instructions. It is not useful otherwise but is 11715 // implemented for compatibility. 11716 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 11717 break; 11718 } else { 11719 // A constant whose bitwise inverse can be used as an immediate 11720 // value in a data-processing instruction. This can be used in GCC 11721 // with a "B" modifier that prints the inverted value, for use with 11722 // BIC and MVN instructions. It is not useful otherwise but is 11723 // implemented for compatibility. 11724 if (ARM_AM::getSOImmVal(~CVal) != -1) 11725 break; 11726 } 11727 return; 11728 11729 case 'L': 11730 if (Subtarget->isThumb1Only()) { 11731 // This must be a constant between -7 and 7, 11732 // for 3-operand ADD/SUB immediate instructions. 11733 if (CVal >= -7 && CVal < 7) 11734 break; 11735 } else if (Subtarget->isThumb2()) { 11736 // A constant whose negation can be used as an immediate value in a 11737 // data-processing instruction. This can be used in GCC with an "n" 11738 // modifier that prints the negated value, for use with SUB 11739 // instructions. It is not useful otherwise but is implemented for 11740 // compatibility. 11741 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 11742 break; 11743 } else { 11744 // A constant whose negation can be used as an immediate value in a 11745 // data-processing instruction. This can be used in GCC with an "n" 11746 // modifier that prints the negated value, for use with SUB 11747 // instructions. It is not useful otherwise but is implemented for 11748 // compatibility. 11749 if (ARM_AM::getSOImmVal(-CVal) != -1) 11750 break; 11751 } 11752 return; 11753 11754 case 'M': 11755 if (Subtarget->isThumb1Only()) { 11756 // This must be a multiple of 4 between 0 and 1020, for 11757 // ADD sp + immediate. 11758 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 11759 break; 11760 } else { 11761 // A power of two or a constant between 0 and 32. This is used in 11762 // GCC for the shift amount on shifted register operands, but it is 11763 // useful in general for any shift amounts. 11764 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 11765 break; 11766 } 11767 return; 11768 11769 case 'N': 11770 if (Subtarget->isThumb()) { // FIXME thumb2 11771 // This must be a constant between 0 and 31, for shift amounts. 11772 if (CVal >= 0 && CVal <= 31) 11773 break; 11774 } 11775 return; 11776 11777 case 'O': 11778 if (Subtarget->isThumb()) { // FIXME thumb2 11779 // This must be a multiple of 4 between -508 and 508, for 11780 // ADD/SUB sp = sp + immediate. 11781 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 11782 break; 11783 } 11784 return; 11785 } 11786 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 11787 break; 11788 } 11789 11790 if (Result.getNode()) { 11791 Ops.push_back(Result); 11792 return; 11793 } 11794 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11795 } 11796 11797 static RTLIB::Libcall getDivRemLibcall( 11798 const SDNode *N, MVT::SimpleValueType SVT) { 11799 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 11800 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 11801 "Unhandled Opcode in getDivRemLibcall"); 11802 bool isSigned = N->getOpcode() == ISD::SDIVREM || 11803 N->getOpcode() == ISD::SREM; 11804 RTLIB::Libcall LC; 11805 switch (SVT) { 11806 default: llvm_unreachable("Unexpected request for libcall!"); 11807 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 11808 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 11809 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 11810 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 11811 } 11812 return LC; 11813 } 11814 11815 static TargetLowering::ArgListTy getDivRemArgList( 11816 const SDNode *N, LLVMContext *Context) { 11817 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 11818 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 11819 "Unhandled Opcode in getDivRemArgList"); 11820 bool isSigned = N->getOpcode() == ISD::SDIVREM || 11821 N->getOpcode() == ISD::SREM; 11822 TargetLowering::ArgListTy Args; 11823 TargetLowering::ArgListEntry Entry; 11824 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 11825 EVT ArgVT = N->getOperand(i).getValueType(); 11826 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 11827 Entry.Node = N->getOperand(i); 11828 Entry.Ty = ArgTy; 11829 Entry.isSExt = isSigned; 11830 Entry.isZExt = !isSigned; 11831 Args.push_back(Entry); 11832 } 11833 return Args; 11834 } 11835 11836 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 11837 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 11838 Subtarget->isTargetGNUAEABI()) && 11839 "Register-based DivRem lowering only"); 11840 unsigned Opcode = Op->getOpcode(); 11841 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 11842 "Invalid opcode for Div/Rem lowering"); 11843 bool isSigned = (Opcode == ISD::SDIVREM); 11844 EVT VT = Op->getValueType(0); 11845 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 11846 11847 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 11848 VT.getSimpleVT().SimpleTy); 11849 SDValue InChain = DAG.getEntryNode(); 11850 11851 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 11852 DAG.getContext()); 11853 11854 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 11855 getPointerTy(DAG.getDataLayout())); 11856 11857 Type *RetTy = (Type*)StructType::get(Ty, Ty, nullptr); 11858 11859 SDLoc dl(Op); 11860 TargetLowering::CallLoweringInfo CLI(DAG); 11861 CLI.setDebugLoc(dl).setChain(InChain) 11862 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 11863 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 11864 11865 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 11866 return CallInfo.first; 11867 } 11868 11869 // Lowers REM using divmod helpers 11870 // see RTABI section 4.2/4.3 11871 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 11872 // Build return types (div and rem) 11873 std::vector<Type*> RetTyParams; 11874 Type *RetTyElement; 11875 11876 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 11877 default: llvm_unreachable("Unexpected request for libcall!"); 11878 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 11879 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 11880 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 11881 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 11882 } 11883 11884 RetTyParams.push_back(RetTyElement); 11885 RetTyParams.push_back(RetTyElement); 11886 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 11887 Type *RetTy = StructType::get(*DAG.getContext(), ret); 11888 11889 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 11890 SimpleTy); 11891 SDValue InChain = DAG.getEntryNode(); 11892 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext()); 11893 bool isSigned = N->getOpcode() == ISD::SREM; 11894 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 11895 getPointerTy(DAG.getDataLayout())); 11896 11897 // Lower call 11898 CallLoweringInfo CLI(DAG); 11899 CLI.setChain(InChain) 11900 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args), 0) 11901 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 11902 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 11903 11904 // Return second (rem) result operand (first contains div) 11905 SDNode *ResNode = CallResult.first.getNode(); 11906 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 11907 return ResNode->getOperand(1); 11908 } 11909 11910 SDValue 11911 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 11912 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 11913 SDLoc DL(Op); 11914 11915 // Get the inputs. 11916 SDValue Chain = Op.getOperand(0); 11917 SDValue Size = Op.getOperand(1); 11918 11919 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 11920 DAG.getConstant(2, DL, MVT::i32)); 11921 11922 SDValue Flag; 11923 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 11924 Flag = Chain.getValue(1); 11925 11926 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 11927 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 11928 11929 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 11930 Chain = NewSP.getValue(1); 11931 11932 SDValue Ops[2] = { NewSP, Chain }; 11933 return DAG.getMergeValues(Ops, DL); 11934 } 11935 11936 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 11937 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && 11938 "Unexpected type for custom-lowering FP_EXTEND"); 11939 11940 RTLIB::Libcall LC; 11941 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 11942 11943 SDValue SrcVal = Op.getOperand(0); 11944 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 11945 SDLoc(Op)).first; 11946 } 11947 11948 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 11949 assert(Op.getOperand(0).getValueType() == MVT::f64 && 11950 Subtarget->isFPOnlySP() && 11951 "Unexpected type for custom-lowering FP_ROUND"); 11952 11953 RTLIB::Libcall LC; 11954 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 11955 11956 SDValue SrcVal = Op.getOperand(0); 11957 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 11958 SDLoc(Op)).first; 11959 } 11960 11961 bool 11962 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11963 // The ARM target isn't yet aware of offsets. 11964 return false; 11965 } 11966 11967 bool ARM::isBitFieldInvertedMask(unsigned v) { 11968 if (v == 0xffffffff) 11969 return false; 11970 11971 // there can be 1's on either or both "outsides", all the "inside" 11972 // bits must be 0's 11973 return isShiftedMask_32(~v); 11974 } 11975 11976 /// isFPImmLegal - Returns true if the target can instruction select the 11977 /// specified FP immediate natively. If false, the legalizer will 11978 /// materialize the FP immediate as a load from a constant pool. 11979 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 11980 if (!Subtarget->hasVFP3()) 11981 return false; 11982 if (VT == MVT::f32) 11983 return ARM_AM::getFP32Imm(Imm) != -1; 11984 if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) 11985 return ARM_AM::getFP64Imm(Imm) != -1; 11986 return false; 11987 } 11988 11989 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 11990 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 11991 /// specified in the intrinsic calls. 11992 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11993 const CallInst &I, 11994 unsigned Intrinsic) const { 11995 switch (Intrinsic) { 11996 case Intrinsic::arm_neon_vld1: 11997 case Intrinsic::arm_neon_vld2: 11998 case Intrinsic::arm_neon_vld3: 11999 case Intrinsic::arm_neon_vld4: 12000 case Intrinsic::arm_neon_vld2lane: 12001 case Intrinsic::arm_neon_vld3lane: 12002 case Intrinsic::arm_neon_vld4lane: { 12003 Info.opc = ISD::INTRINSIC_W_CHAIN; 12004 // Conservatively set memVT to the entire set of vectors loaded. 12005 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12006 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 12007 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12008 Info.ptrVal = I.getArgOperand(0); 12009 Info.offset = 0; 12010 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12011 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12012 Info.vol = false; // volatile loads with NEON intrinsics not supported 12013 Info.readMem = true; 12014 Info.writeMem = false; 12015 return true; 12016 } 12017 case Intrinsic::arm_neon_vst1: 12018 case Intrinsic::arm_neon_vst2: 12019 case Intrinsic::arm_neon_vst3: 12020 case Intrinsic::arm_neon_vst4: 12021 case Intrinsic::arm_neon_vst2lane: 12022 case Intrinsic::arm_neon_vst3lane: 12023 case Intrinsic::arm_neon_vst4lane: { 12024 Info.opc = ISD::INTRINSIC_VOID; 12025 // Conservatively set memVT to the entire set of vectors stored. 12026 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12027 unsigned NumElts = 0; 12028 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 12029 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 12030 if (!ArgTy->isVectorTy()) 12031 break; 12032 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 12033 } 12034 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12035 Info.ptrVal = I.getArgOperand(0); 12036 Info.offset = 0; 12037 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12038 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12039 Info.vol = false; // volatile stores with NEON intrinsics not supported 12040 Info.readMem = false; 12041 Info.writeMem = true; 12042 return true; 12043 } 12044 case Intrinsic::arm_ldaex: 12045 case Intrinsic::arm_ldrex: { 12046 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12047 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 12048 Info.opc = ISD::INTRINSIC_W_CHAIN; 12049 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12050 Info.ptrVal = I.getArgOperand(0); 12051 Info.offset = 0; 12052 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12053 Info.vol = true; 12054 Info.readMem = true; 12055 Info.writeMem = false; 12056 return true; 12057 } 12058 case Intrinsic::arm_stlex: 12059 case Intrinsic::arm_strex: { 12060 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12061 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 12062 Info.opc = ISD::INTRINSIC_W_CHAIN; 12063 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12064 Info.ptrVal = I.getArgOperand(1); 12065 Info.offset = 0; 12066 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12067 Info.vol = true; 12068 Info.readMem = false; 12069 Info.writeMem = true; 12070 return true; 12071 } 12072 case Intrinsic::arm_stlexd: 12073 case Intrinsic::arm_strexd: { 12074 Info.opc = ISD::INTRINSIC_W_CHAIN; 12075 Info.memVT = MVT::i64; 12076 Info.ptrVal = I.getArgOperand(2); 12077 Info.offset = 0; 12078 Info.align = 8; 12079 Info.vol = true; 12080 Info.readMem = false; 12081 Info.writeMem = true; 12082 return true; 12083 } 12084 case Intrinsic::arm_ldaexd: 12085 case Intrinsic::arm_ldrexd: { 12086 Info.opc = ISD::INTRINSIC_W_CHAIN; 12087 Info.memVT = MVT::i64; 12088 Info.ptrVal = I.getArgOperand(0); 12089 Info.offset = 0; 12090 Info.align = 8; 12091 Info.vol = true; 12092 Info.readMem = true; 12093 Info.writeMem = false; 12094 return true; 12095 } 12096 default: 12097 break; 12098 } 12099 12100 return false; 12101 } 12102 12103 /// \brief Returns true if it is beneficial to convert a load of a constant 12104 /// to just the constant itself. 12105 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 12106 Type *Ty) const { 12107 assert(Ty->isIntegerTy()); 12108 12109 unsigned Bits = Ty->getPrimitiveSizeInBits(); 12110 if (Bits == 0 || Bits > 32) 12111 return false; 12112 return true; 12113 } 12114 12115 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 12116 ARM_MB::MemBOpt Domain) const { 12117 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12118 12119 // First, if the target has no DMB, see what fallback we can use. 12120 if (!Subtarget->hasDataBarrier()) { 12121 // Some ARMv6 cpus can support data barriers with an mcr instruction. 12122 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 12123 // here. 12124 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 12125 Function *MCR = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 12126 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 12127 Builder.getInt32(0), Builder.getInt32(7), 12128 Builder.getInt32(10), Builder.getInt32(5)}; 12129 return Builder.CreateCall(MCR, args); 12130 } else { 12131 // Instead of using barriers, atomic accesses on these subtargets use 12132 // libcalls. 12133 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 12134 } 12135 } else { 12136 Function *DMB = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 12137 // Only a full system barrier exists in the M-class architectures. 12138 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 12139 Constant *CDomain = Builder.getInt32(Domain); 12140 return Builder.CreateCall(DMB, CDomain); 12141 } 12142 } 12143 12144 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 12145 Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 12146 AtomicOrdering Ord, bool IsStore, 12147 bool IsLoad) const { 12148 switch (Ord) { 12149 case AtomicOrdering::NotAtomic: 12150 case AtomicOrdering::Unordered: 12151 llvm_unreachable("Invalid fence: unordered/non-atomic"); 12152 case AtomicOrdering::Monotonic: 12153 case AtomicOrdering::Acquire: 12154 return nullptr; // Nothing to do 12155 case AtomicOrdering::SequentiallyConsistent: 12156 if (!IsStore) 12157 return nullptr; // Nothing to do 12158 /*FALLTHROUGH*/ 12159 case AtomicOrdering::Release: 12160 case AtomicOrdering::AcquireRelease: 12161 if (Subtarget->isSwift()) 12162 return makeDMB(Builder, ARM_MB::ISHST); 12163 // FIXME: add a comment with a link to documentation justifying this. 12164 else 12165 return makeDMB(Builder, ARM_MB::ISH); 12166 } 12167 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 12168 } 12169 12170 Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 12171 AtomicOrdering Ord, bool IsStore, 12172 bool IsLoad) const { 12173 switch (Ord) { 12174 case AtomicOrdering::NotAtomic: 12175 case AtomicOrdering::Unordered: 12176 llvm_unreachable("Invalid fence: unordered/not-atomic"); 12177 case AtomicOrdering::Monotonic: 12178 case AtomicOrdering::Release: 12179 return nullptr; // Nothing to do 12180 case AtomicOrdering::Acquire: 12181 case AtomicOrdering::AcquireRelease: 12182 case AtomicOrdering::SequentiallyConsistent: 12183 return makeDMB(Builder, ARM_MB::ISH); 12184 } 12185 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 12186 } 12187 12188 // Loads and stores less than 64-bits are already atomic; ones above that 12189 // are doomed anyway, so defer to the default libcall and blame the OS when 12190 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12191 // anything for those. 12192 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 12193 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 12194 return (Size == 64) && !Subtarget->isMClass(); 12195 } 12196 12197 // Loads and stores less than 64-bits are already atomic; ones above that 12198 // are doomed anyway, so defer to the default libcall and blame the OS when 12199 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12200 // anything for those. 12201 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 12202 // guarantee, see DDI0406C ARM architecture reference manual, 12203 // sections A8.8.72-74 LDRD) 12204 TargetLowering::AtomicExpansionKind 12205 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 12206 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 12207 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 12208 : AtomicExpansionKind::None; 12209 } 12210 12211 // For the real atomic operations, we have ldrex/strex up to 32 bits, 12212 // and up to 64 bits on the non-M profiles 12213 TargetLowering::AtomicExpansionKind 12214 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 12215 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 12216 return (Size <= (Subtarget->isMClass() ? 32U : 64U)) 12217 ? AtomicExpansionKind::LLSC 12218 : AtomicExpansionKind::None; 12219 } 12220 12221 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( 12222 AtomicCmpXchgInst *AI) const { 12223 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 12224 // implement cmpxchg without spilling. If the address being exchanged is also 12225 // on the stack and close enough to the spill slot, this can lead to a 12226 // situation where the monitor always gets cleared and the atomic operation 12227 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 12228 return getTargetMachine().getOptLevel() != 0; 12229 } 12230 12231 bool ARMTargetLowering::shouldInsertFencesForAtomic( 12232 const Instruction *I) const { 12233 return InsertFencesForAtomic; 12234 } 12235 12236 // This has so far only been implemented for MachO. 12237 bool ARMTargetLowering::useLoadStackGuardNode() const { 12238 return Subtarget->isTargetMachO(); 12239 } 12240 12241 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 12242 unsigned &Cost) const { 12243 // If we do not have NEON, vector types are not natively supported. 12244 if (!Subtarget->hasNEON()) 12245 return false; 12246 12247 // Floating point values and vector values map to the same register file. 12248 // Therefore, although we could do a store extract of a vector type, this is 12249 // better to leave at float as we have more freedom in the addressing mode for 12250 // those. 12251 if (VectorTy->isFPOrFPVectorTy()) 12252 return false; 12253 12254 // If the index is unknown at compile time, this is very expensive to lower 12255 // and it is not possible to combine the store with the extract. 12256 if (!isa<ConstantInt>(Idx)) 12257 return false; 12258 12259 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 12260 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); 12261 // We can do a store + vector extract on any vector that fits perfectly in a D 12262 // or Q register. 12263 if (BitWidth == 64 || BitWidth == 128) { 12264 Cost = 0; 12265 return true; 12266 } 12267 return false; 12268 } 12269 12270 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 12271 return Subtarget->hasV6T2Ops(); 12272 } 12273 12274 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 12275 return Subtarget->hasV6T2Ops(); 12276 } 12277 12278 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 12279 AtomicOrdering Ord) const { 12280 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12281 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 12282 bool IsAcquire = isAcquireOrStronger(Ord); 12283 12284 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 12285 // intrinsic must return {i32, i32} and we have to recombine them into a 12286 // single i64 here. 12287 if (ValTy->getPrimitiveSizeInBits() == 64) { 12288 Intrinsic::ID Int = 12289 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 12290 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int); 12291 12292 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12293 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 12294 12295 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 12296 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 12297 if (!Subtarget->isLittle()) 12298 std::swap (Lo, Hi); 12299 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 12300 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 12301 return Builder.CreateOr( 12302 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 12303 } 12304 12305 Type *Tys[] = { Addr->getType() }; 12306 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 12307 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys); 12308 12309 return Builder.CreateTruncOrBitCast( 12310 Builder.CreateCall(Ldrex, Addr), 12311 cast<PointerType>(Addr->getType())->getElementType()); 12312 } 12313 12314 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 12315 IRBuilder<> &Builder) const { 12316 if (!Subtarget->hasV7Ops()) 12317 return; 12318 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12319 Builder.CreateCall(llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 12320 } 12321 12322 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 12323 Value *Addr, 12324 AtomicOrdering Ord) const { 12325 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12326 bool IsRelease = isReleaseOrStronger(Ord); 12327 12328 // Since the intrinsics must have legal type, the i64 intrinsics take two 12329 // parameters: "i32, i32". We must marshal Val into the appropriate form 12330 // before the call. 12331 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 12332 Intrinsic::ID Int = 12333 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 12334 Function *Strex = Intrinsic::getDeclaration(M, Int); 12335 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 12336 12337 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 12338 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 12339 if (!Subtarget->isLittle()) 12340 std::swap (Lo, Hi); 12341 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12342 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 12343 } 12344 12345 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 12346 Type *Tys[] = { Addr->getType() }; 12347 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 12348 12349 return Builder.CreateCall( 12350 Strex, {Builder.CreateZExtOrBitCast( 12351 Val, Strex->getFunctionType()->getParamType(0)), 12352 Addr}); 12353 } 12354 12355 /// \brief Lower an interleaved load into a vldN intrinsic. 12356 /// 12357 /// E.g. Lower an interleaved load (Factor = 2): 12358 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 12359 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 12360 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 12361 /// 12362 /// Into: 12363 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 12364 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 12365 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 12366 bool ARMTargetLowering::lowerInterleavedLoad( 12367 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 12368 ArrayRef<unsigned> Indices, unsigned Factor) const { 12369 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 12370 "Invalid interleave factor"); 12371 assert(!Shuffles.empty() && "Empty shufflevector input"); 12372 assert(Shuffles.size() == Indices.size() && 12373 "Unmatched number of shufflevectors and indices"); 12374 12375 VectorType *VecTy = Shuffles[0]->getType(); 12376 Type *EltTy = VecTy->getVectorElementType(); 12377 12378 const DataLayout &DL = LI->getModule()->getDataLayout(); 12379 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 12380 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 12381 12382 // Skip if we do not have NEON and skip illegal vector types and vector types 12383 // with i64/f64 elements (vldN doesn't support i64/f64 elements). 12384 if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits) 12385 return false; 12386 12387 // A pointer vector can not be the return type of the ldN intrinsics. Need to 12388 // load integer vectors first and then convert to pointer vectors. 12389 if (EltTy->isPointerTy()) 12390 VecTy = 12391 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); 12392 12393 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 12394 Intrinsic::arm_neon_vld3, 12395 Intrinsic::arm_neon_vld4}; 12396 12397 IRBuilder<> Builder(LI); 12398 SmallVector<Value *, 2> Ops; 12399 12400 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 12401 Ops.push_back(Builder.CreateBitCast(LI->getPointerOperand(), Int8Ptr)); 12402 Ops.push_back(Builder.getInt32(LI->getAlignment())); 12403 12404 Type *Tys[] = { VecTy, Int8Ptr }; 12405 Function *VldnFunc = 12406 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 12407 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN"); 12408 12409 // Replace uses of each shufflevector with the corresponding vector loaded 12410 // by ldN. 12411 for (unsigned i = 0; i < Shuffles.size(); i++) { 12412 ShuffleVectorInst *SV = Shuffles[i]; 12413 unsigned Index = Indices[i]; 12414 12415 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 12416 12417 // Convert the integer vector to pointer vector if the element is pointer. 12418 if (EltTy->isPointerTy()) 12419 SubVec = Builder.CreateIntToPtr(SubVec, SV->getType()); 12420 12421 SV->replaceAllUsesWith(SubVec); 12422 } 12423 12424 return true; 12425 } 12426 12427 /// \brief Get a mask consisting of sequential integers starting from \p Start. 12428 /// 12429 /// I.e. <Start, Start + 1, ..., Start + NumElts - 1> 12430 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start, 12431 unsigned NumElts) { 12432 SmallVector<Constant *, 16> Mask; 12433 for (unsigned i = 0; i < NumElts; i++) 12434 Mask.push_back(Builder.getInt32(Start + i)); 12435 12436 return ConstantVector::get(Mask); 12437 } 12438 12439 /// \brief Lower an interleaved store into a vstN intrinsic. 12440 /// 12441 /// E.g. Lower an interleaved store (Factor = 3): 12442 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 12443 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 12444 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 12445 /// 12446 /// Into: 12447 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 12448 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 12449 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 12450 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 12451 /// 12452 /// Note that the new shufflevectors will be removed and we'll only generate one 12453 /// vst3 instruction in CodeGen. 12454 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 12455 ShuffleVectorInst *SVI, 12456 unsigned Factor) const { 12457 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 12458 "Invalid interleave factor"); 12459 12460 VectorType *VecTy = SVI->getType(); 12461 assert(VecTy->getVectorNumElements() % Factor == 0 && 12462 "Invalid interleaved store"); 12463 12464 unsigned NumSubElts = VecTy->getVectorNumElements() / Factor; 12465 Type *EltTy = VecTy->getVectorElementType(); 12466 VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts); 12467 12468 const DataLayout &DL = SI->getModule()->getDataLayout(); 12469 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 12470 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 12471 12472 // Skip if we do not have NEON and skip illegal vector types and vector types 12473 // with i64/f64 elements (vstN doesn't support i64/f64 elements). 12474 if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128) || 12475 EltIs64Bits) 12476 return false; 12477 12478 Value *Op0 = SVI->getOperand(0); 12479 Value *Op1 = SVI->getOperand(1); 12480 IRBuilder<> Builder(SI); 12481 12482 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 12483 // vectors to integer vectors. 12484 if (EltTy->isPointerTy()) { 12485 Type *IntTy = DL.getIntPtrType(EltTy); 12486 12487 // Convert to the corresponding integer vector. 12488 Type *IntVecTy = 12489 VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); 12490 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 12491 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 12492 12493 SubVecTy = VectorType::get(IntTy, NumSubElts); 12494 } 12495 12496 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 12497 Intrinsic::arm_neon_vst3, 12498 Intrinsic::arm_neon_vst4}; 12499 SmallVector<Value *, 6> Ops; 12500 12501 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 12502 Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), Int8Ptr)); 12503 12504 Type *Tys[] = { Int8Ptr, SubVecTy }; 12505 Function *VstNFunc = Intrinsic::getDeclaration( 12506 SI->getModule(), StoreInts[Factor - 2], Tys); 12507 12508 // Split the shufflevector operands into sub vectors for the new vstN call. 12509 for (unsigned i = 0; i < Factor; i++) 12510 Ops.push_back(Builder.CreateShuffleVector( 12511 Op0, Op1, getSequentialMask(Builder, NumSubElts * i, NumSubElts))); 12512 12513 Ops.push_back(Builder.getInt32(SI->getAlignment())); 12514 Builder.CreateCall(VstNFunc, Ops); 12515 return true; 12516 } 12517 12518 enum HABaseType { 12519 HA_UNKNOWN = 0, 12520 HA_FLOAT, 12521 HA_DOUBLE, 12522 HA_VECT64, 12523 HA_VECT128 12524 }; 12525 12526 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 12527 uint64_t &Members) { 12528 if (auto *ST = dyn_cast<StructType>(Ty)) { 12529 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 12530 uint64_t SubMembers = 0; 12531 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 12532 return false; 12533 Members += SubMembers; 12534 } 12535 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 12536 uint64_t SubMembers = 0; 12537 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 12538 return false; 12539 Members += SubMembers * AT->getNumElements(); 12540 } else if (Ty->isFloatTy()) { 12541 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 12542 return false; 12543 Members = 1; 12544 Base = HA_FLOAT; 12545 } else if (Ty->isDoubleTy()) { 12546 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 12547 return false; 12548 Members = 1; 12549 Base = HA_DOUBLE; 12550 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 12551 Members = 1; 12552 switch (Base) { 12553 case HA_FLOAT: 12554 case HA_DOUBLE: 12555 return false; 12556 case HA_VECT64: 12557 return VT->getBitWidth() == 64; 12558 case HA_VECT128: 12559 return VT->getBitWidth() == 128; 12560 case HA_UNKNOWN: 12561 switch (VT->getBitWidth()) { 12562 case 64: 12563 Base = HA_VECT64; 12564 return true; 12565 case 128: 12566 Base = HA_VECT128; 12567 return true; 12568 default: 12569 return false; 12570 } 12571 } 12572 } 12573 12574 return (Members > 0 && Members <= 4); 12575 } 12576 12577 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 12578 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 12579 /// passing according to AAPCS rules. 12580 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 12581 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 12582 if (getEffectiveCallingConv(CallConv, isVarArg) != 12583 CallingConv::ARM_AAPCS_VFP) 12584 return false; 12585 12586 HABaseType Base = HA_UNKNOWN; 12587 uint64_t Members = 0; 12588 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 12589 DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 12590 12591 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 12592 return IsHA || IsIntArray; 12593 } 12594 12595 unsigned ARMTargetLowering::getExceptionPointerRegister( 12596 const Constant *PersonalityFn) const { 12597 // Platforms which do not use SjLj EH may return values in these registers 12598 // via the personality function. 12599 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; 12600 } 12601 12602 unsigned ARMTargetLowering::getExceptionSelectorRegister( 12603 const Constant *PersonalityFn) const { 12604 // Platforms which do not use SjLj EH may return values in these registers 12605 // via the personality function. 12606 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; 12607 } 12608 12609 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 12610 // Update IsSplitCSR in ARMFunctionInfo. 12611 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 12612 AFI->setIsSplitCSR(true); 12613 } 12614 12615 void ARMTargetLowering::insertCopiesSplitCSR( 12616 MachineBasicBlock *Entry, 12617 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 12618 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 12619 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 12620 if (!IStart) 12621 return; 12622 12623 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 12624 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 12625 MachineBasicBlock::iterator MBBI = Entry->begin(); 12626 for (const MCPhysReg *I = IStart; *I; ++I) { 12627 const TargetRegisterClass *RC = nullptr; 12628 if (ARM::GPRRegClass.contains(*I)) 12629 RC = &ARM::GPRRegClass; 12630 else if (ARM::DPRRegClass.contains(*I)) 12631 RC = &ARM::DPRRegClass; 12632 else 12633 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 12634 12635 unsigned NewVR = MRI->createVirtualRegister(RC); 12636 // Create copy from CSR to a virtual register. 12637 // FIXME: this currently does not emit CFI pseudo-instructions, it works 12638 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 12639 // nounwind. If we want to generalize this later, we may need to emit 12640 // CFI pseudo-instructions. 12641 assert(Entry->getParent()->getFunction()->hasFnAttribute( 12642 Attribute::NoUnwind) && 12643 "Function should be nounwind in insertCopiesSplitCSR!"); 12644 Entry->addLiveIn(*I); 12645 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 12646 .addReg(*I); 12647 12648 // Insert the copy-back instructions right before the terminator. 12649 for (auto *Exit : Exits) 12650 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 12651 TII->get(TargetOpcode::COPY), *I) 12652 .addReg(NewVR); 12653 } 12654 } 12655