1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARMISelLowering.h" 16 #include "ARMCallingConv.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMPerfectShuffle.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "ARMTargetObjectFile.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/CodeGen/Analysis.h" 28 #include "llvm/CodeGen/CallingConvLower.h" 29 #include "llvm/CodeGen/IntrinsicLowering.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineJumpTableInfo.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/SelectionDAG.h" 38 #include "llvm/IR/CallingConv.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/MC/MCSectionMachO.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Debug.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include "llvm/Target/TargetOptions.h" 55 #include <utility> 56 using namespace llvm; 57 58 #define DEBUG_TYPE "arm-isel" 59 60 STATISTIC(NumTailCalls, "Number of tail calls"); 61 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 62 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 63 64 static cl::opt<bool> 65 ARMInterworking("arm-interworking", cl::Hidden, 66 cl::desc("Enable / disable ARM interworking (for debugging only)"), 67 cl::init(true)); 68 69 namespace { 70 class ARMCCState : public CCState { 71 public: 72 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 73 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, 74 ParmContext PC) 75 : CCState(CC, isVarArg, MF, locs, C) { 76 assert(((PC == Call) || (PC == Prologue)) && 77 "ARMCCState users must specify whether their context is call" 78 "or prologue generation."); 79 CallOrPrologue = PC; 80 } 81 }; 82 } 83 84 // The APCS parameter registers. 85 static const MCPhysReg GPRArgRegs[] = { 86 ARM::R0, ARM::R1, ARM::R2, ARM::R3 87 }; 88 89 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 90 MVT PromotedBitwiseVT) { 91 if (VT != PromotedLdStVT) { 92 setOperationAction(ISD::LOAD, VT, Promote); 93 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 94 95 setOperationAction(ISD::STORE, VT, Promote); 96 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 97 } 98 99 MVT ElemTy = VT.getVectorElementType(); 100 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 101 setOperationAction(ISD::SETCC, VT, Custom); 102 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 103 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 104 if (ElemTy == MVT::i32) { 105 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 106 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 107 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 108 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 109 } else { 110 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 111 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 112 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 113 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 114 } 115 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 116 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 117 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 118 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 119 setOperationAction(ISD::SELECT, VT, Expand); 120 setOperationAction(ISD::SELECT_CC, VT, Expand); 121 setOperationAction(ISD::VSELECT, VT, Expand); 122 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 123 if (VT.isInteger()) { 124 setOperationAction(ISD::SHL, VT, Custom); 125 setOperationAction(ISD::SRA, VT, Custom); 126 setOperationAction(ISD::SRL, VT, Custom); 127 } 128 129 // Promote all bit-wise operations. 130 if (VT.isInteger() && VT != PromotedBitwiseVT) { 131 setOperationAction(ISD::AND, VT, Promote); 132 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 133 setOperationAction(ISD::OR, VT, Promote); 134 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 135 setOperationAction(ISD::XOR, VT, Promote); 136 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 137 } 138 139 // Neon does not support vector divide/remainder operations. 140 setOperationAction(ISD::SDIV, VT, Expand); 141 setOperationAction(ISD::UDIV, VT, Expand); 142 setOperationAction(ISD::FDIV, VT, Expand); 143 setOperationAction(ISD::SREM, VT, Expand); 144 setOperationAction(ISD::UREM, VT, Expand); 145 setOperationAction(ISD::FREM, VT, Expand); 146 147 if (!VT.isFloatingPoint() && 148 VT != MVT::v2i64 && VT != MVT::v1i64) 149 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 150 setOperationAction(Opcode, VT, Legal); 151 } 152 153 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 154 addRegisterClass(VT, &ARM::DPRRegClass); 155 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 156 } 157 158 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 159 addRegisterClass(VT, &ARM::DPairRegClass); 160 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 161 } 162 163 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 164 const ARMSubtarget &STI) 165 : TargetLowering(TM), Subtarget(&STI) { 166 RegInfo = Subtarget->getRegisterInfo(); 167 Itins = Subtarget->getInstrItineraryData(); 168 169 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 170 171 if (Subtarget->isTargetMachO()) { 172 // Uses VFP for Thumb libfuncs if available. 173 if (Subtarget->isThumb() && Subtarget->hasVFP2() && 174 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 175 static const struct { 176 const RTLIB::Libcall Op; 177 const char * const Name; 178 const ISD::CondCode Cond; 179 } LibraryCalls[] = { 180 // Single-precision floating-point arithmetic. 181 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 182 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 183 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 184 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 185 186 // Double-precision floating-point arithmetic. 187 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 188 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 189 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 190 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 191 192 // Single-precision comparisons. 193 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 194 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 195 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 196 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 197 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 198 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 199 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 200 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ }, 201 202 // Double-precision comparisons. 203 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 204 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 205 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 206 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 207 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 208 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 209 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 210 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ }, 211 212 // Floating-point to integer conversions. 213 // i64 conversions are done via library routines even when generating VFP 214 // instructions, so use the same ones. 215 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 216 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 217 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 218 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 219 220 // Conversions between floating types. 221 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 222 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 223 224 // Integer to floating-point conversions. 225 // i64 conversions are done via library routines even when generating VFP 226 // instructions, so use the same ones. 227 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 228 // e.g., __floatunsidf vs. __floatunssidfvfp. 229 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 230 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 231 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 232 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 233 }; 234 235 for (const auto &LC : LibraryCalls) { 236 setLibcallName(LC.Op, LC.Name); 237 if (LC.Cond != ISD::SETCC_INVALID) 238 setCmpLibcallCC(LC.Op, LC.Cond); 239 } 240 } 241 242 // Set the correct calling convention for ARMv7k WatchOS. It's just 243 // AAPCS_VFP for functions as simple as libcalls. 244 if (Subtarget->isTargetWatchABI()) { 245 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) 246 setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP); 247 } 248 } 249 250 // These libcalls are not available in 32-bit. 251 setLibcallName(RTLIB::SHL_I128, nullptr); 252 setLibcallName(RTLIB::SRL_I128, nullptr); 253 setLibcallName(RTLIB::SRA_I128, nullptr); 254 255 // RTLIB 256 if (Subtarget->isAAPCS_ABI() && 257 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 258 Subtarget->isTargetAndroid())) { 259 static const struct { 260 const RTLIB::Libcall Op; 261 const char * const Name; 262 const CallingConv::ID CC; 263 const ISD::CondCode Cond; 264 } LibraryCalls[] = { 265 // Double-precision floating-point arithmetic helper functions 266 // RTABI chapter 4.1.2, Table 2 267 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 268 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 269 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 270 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 271 272 // Double-precision floating-point comparison helper functions 273 // RTABI chapter 4.1.2, Table 3 274 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 275 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 276 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 277 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 278 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 279 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 280 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 281 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 282 283 // Single-precision floating-point arithmetic helper functions 284 // RTABI chapter 4.1.2, Table 4 285 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 286 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 287 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 288 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 289 290 // Single-precision floating-point comparison helper functions 291 // RTABI chapter 4.1.2, Table 5 292 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 293 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 294 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 295 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 296 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 297 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 298 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 299 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 300 301 // Floating-point to integer conversions. 302 // RTABI chapter 4.1.2, Table 6 303 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 304 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 305 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 306 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 307 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 308 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 309 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 310 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 311 312 // Conversions between floating types. 313 // RTABI chapter 4.1.2, Table 7 314 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 315 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 316 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 317 318 // Integer to floating-point conversions. 319 // RTABI chapter 4.1.2, Table 8 320 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 321 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 322 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 323 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 324 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 325 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 326 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 327 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 328 329 // Long long helper functions 330 // RTABI chapter 4.2, Table 9 331 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 332 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 333 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 334 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 335 336 // Integer division functions 337 // RTABI chapter 4.3.1 338 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 339 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 340 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 341 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 342 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 343 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 344 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 345 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 346 }; 347 348 for (const auto &LC : LibraryCalls) { 349 setLibcallName(LC.Op, LC.Name); 350 setLibcallCallingConv(LC.Op, LC.CC); 351 if (LC.Cond != ISD::SETCC_INVALID) 352 setCmpLibcallCC(LC.Op, LC.Cond); 353 } 354 355 // EABI dependent RTLIB 356 if (TM.Options.EABIVersion == EABI::EABI4 || 357 TM.Options.EABIVersion == EABI::EABI5) { 358 static const struct { 359 const RTLIB::Libcall Op; 360 const char *const Name; 361 const CallingConv::ID CC; 362 const ISD::CondCode Cond; 363 } MemOpsLibraryCalls[] = { 364 // Memory operations 365 // RTABI chapter 4.3.4 366 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 367 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 368 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 369 }; 370 371 for (const auto &LC : MemOpsLibraryCalls) { 372 setLibcallName(LC.Op, LC.Name); 373 setLibcallCallingConv(LC.Op, LC.CC); 374 if (LC.Cond != ISD::SETCC_INVALID) 375 setCmpLibcallCC(LC.Op, LC.Cond); 376 } 377 } 378 } 379 380 if (Subtarget->isTargetWindows()) { 381 static const struct { 382 const RTLIB::Libcall Op; 383 const char * const Name; 384 const CallingConv::ID CC; 385 } LibraryCalls[] = { 386 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 387 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 388 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 389 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 390 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 391 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 392 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 393 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 394 }; 395 396 for (const auto &LC : LibraryCalls) { 397 setLibcallName(LC.Op, LC.Name); 398 setLibcallCallingConv(LC.Op, LC.CC); 399 } 400 } 401 402 // Use divmod compiler-rt calls for iOS 5.0 and later. 403 if (Subtarget->isTargetWatchOS() || 404 (Subtarget->isTargetIOS() && 405 !Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 406 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 407 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 408 } 409 410 // The half <-> float conversion functions are always soft-float, but are 411 // needed for some targets which use a hard-float calling convention by 412 // default. 413 if (Subtarget->isAAPCS_ABI()) { 414 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 415 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 416 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 417 } else { 418 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 419 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 420 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 421 } 422 423 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 424 // a __gnu_ prefix (which is the default). 425 if (Subtarget->isTargetAEABI()) { 426 setLibcallName(RTLIB::FPROUND_F32_F16, "__aeabi_f2h"); 427 setLibcallName(RTLIB::FPROUND_F64_F16, "__aeabi_d2h"); 428 setLibcallName(RTLIB::FPEXT_F16_F32, "__aeabi_h2f"); 429 } 430 431 if (Subtarget->isThumb1Only()) 432 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 433 else 434 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 435 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 436 !Subtarget->isThumb1Only()) { 437 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 438 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 439 } 440 441 for (MVT VT : MVT::vector_valuetypes()) { 442 for (MVT InnerVT : MVT::vector_valuetypes()) { 443 setTruncStoreAction(VT, InnerVT, Expand); 444 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 445 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 446 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 447 } 448 449 setOperationAction(ISD::MULHS, VT, Expand); 450 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 451 setOperationAction(ISD::MULHU, VT, Expand); 452 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 453 454 setOperationAction(ISD::BSWAP, VT, Expand); 455 } 456 457 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 458 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 459 460 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 461 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 462 463 if (Subtarget->hasNEON()) { 464 addDRTypeForNEON(MVT::v2f32); 465 addDRTypeForNEON(MVT::v8i8); 466 addDRTypeForNEON(MVT::v4i16); 467 addDRTypeForNEON(MVT::v2i32); 468 addDRTypeForNEON(MVT::v1i64); 469 470 addQRTypeForNEON(MVT::v4f32); 471 addQRTypeForNEON(MVT::v2f64); 472 addQRTypeForNEON(MVT::v16i8); 473 addQRTypeForNEON(MVT::v8i16); 474 addQRTypeForNEON(MVT::v4i32); 475 addQRTypeForNEON(MVT::v2i64); 476 477 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 478 // neither Neon nor VFP support any arithmetic operations on it. 479 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 480 // supported for v4f32. 481 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 482 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 483 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 484 // FIXME: Code duplication: FDIV and FREM are expanded always, see 485 // ARMTargetLowering::addTypeForNEON method for details. 486 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 487 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 488 // FIXME: Create unittest. 489 // In another words, find a way when "copysign" appears in DAG with vector 490 // operands. 491 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 492 // FIXME: Code duplication: SETCC has custom operation action, see 493 // ARMTargetLowering::addTypeForNEON method for details. 494 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 495 // FIXME: Create unittest for FNEG and for FABS. 496 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 497 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 498 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 499 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 500 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 501 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 502 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 503 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 504 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 505 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 506 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 507 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 508 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 509 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 510 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 511 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 512 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 513 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 514 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 515 516 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 517 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 518 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 519 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 520 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 521 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 522 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 523 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 524 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 525 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 526 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 527 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 528 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 529 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 530 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 531 532 // Mark v2f32 intrinsics. 533 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 534 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 535 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 536 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 537 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 538 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 539 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 540 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 541 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 542 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 543 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 544 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 545 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 546 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 547 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 548 549 // Neon does not support some operations on v1i64 and v2i64 types. 550 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 551 // Custom handling for some quad-vector types to detect VMULL. 552 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 553 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 554 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 555 // Custom handling for some vector types to avoid expensive expansions 556 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 557 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 558 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 559 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 560 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 561 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 562 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 563 // a destination type that is wider than the source, and nor does 564 // it have a FP_TO_[SU]INT instruction with a narrower destination than 565 // source. 566 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 567 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 568 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 569 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 570 571 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 572 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 573 574 // NEON does not have single instruction CTPOP for vectors with element 575 // types wider than 8-bits. However, custom lowering can leverage the 576 // v8i8/v16i8 vcnt instruction. 577 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 578 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 579 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 580 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 581 setOperationAction(ISD::CTPOP, MVT::v1i64, Expand); 582 setOperationAction(ISD::CTPOP, MVT::v2i64, Expand); 583 584 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 585 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 586 587 // NEON does not have single instruction CTTZ for vectors. 588 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 589 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 590 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 591 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 592 593 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 594 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 595 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 596 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 597 598 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 599 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 600 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 601 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 602 603 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 604 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 605 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 606 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 607 608 // NEON only has FMA instructions as of VFP4. 609 if (!Subtarget->hasVFP4()) { 610 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 611 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 612 } 613 614 setTargetDAGCombine(ISD::INTRINSIC_VOID); 615 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 616 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 617 setTargetDAGCombine(ISD::SHL); 618 setTargetDAGCombine(ISD::SRL); 619 setTargetDAGCombine(ISD::SRA); 620 setTargetDAGCombine(ISD::SIGN_EXTEND); 621 setTargetDAGCombine(ISD::ZERO_EXTEND); 622 setTargetDAGCombine(ISD::ANY_EXTEND); 623 setTargetDAGCombine(ISD::BUILD_VECTOR); 624 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 625 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 626 setTargetDAGCombine(ISD::STORE); 627 setTargetDAGCombine(ISD::FP_TO_SINT); 628 setTargetDAGCombine(ISD::FP_TO_UINT); 629 setTargetDAGCombine(ISD::FDIV); 630 setTargetDAGCombine(ISD::LOAD); 631 632 // It is legal to extload from v4i8 to v4i16 or v4i32. 633 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 634 MVT::v2i32}) { 635 for (MVT VT : MVT::integer_vector_valuetypes()) { 636 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 637 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 638 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 639 } 640 } 641 } 642 643 // ARM and Thumb2 support UMLAL/SMLAL. 644 if (!Subtarget->isThumb1Only()) 645 setTargetDAGCombine(ISD::ADDC); 646 647 if (Subtarget->isFPOnlySP()) { 648 // When targeting a floating-point unit with only single-precision 649 // operations, f64 is legal for the few double-precision instructions which 650 // are present However, no double-precision operations other than moves, 651 // loads and stores are provided by the hardware. 652 setOperationAction(ISD::FADD, MVT::f64, Expand); 653 setOperationAction(ISD::FSUB, MVT::f64, Expand); 654 setOperationAction(ISD::FMUL, MVT::f64, Expand); 655 setOperationAction(ISD::FMA, MVT::f64, Expand); 656 setOperationAction(ISD::FDIV, MVT::f64, Expand); 657 setOperationAction(ISD::FREM, MVT::f64, Expand); 658 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 659 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 660 setOperationAction(ISD::FNEG, MVT::f64, Expand); 661 setOperationAction(ISD::FABS, MVT::f64, Expand); 662 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 663 setOperationAction(ISD::FSIN, MVT::f64, Expand); 664 setOperationAction(ISD::FCOS, MVT::f64, Expand); 665 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 666 setOperationAction(ISD::FPOW, MVT::f64, Expand); 667 setOperationAction(ISD::FLOG, MVT::f64, Expand); 668 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 669 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 670 setOperationAction(ISD::FEXP, MVT::f64, Expand); 671 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 672 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 673 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 674 setOperationAction(ISD::FRINT, MVT::f64, Expand); 675 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 676 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 677 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 678 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 679 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 680 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 681 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 682 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 683 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 684 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 685 } 686 687 computeRegisterProperties(Subtarget->getRegisterInfo()); 688 689 // ARM does not have floating-point extending loads. 690 for (MVT VT : MVT::fp_valuetypes()) { 691 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 692 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 693 } 694 695 // ... or truncating stores 696 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 697 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 698 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 699 700 // ARM does not have i1 sign extending load. 701 for (MVT VT : MVT::integer_valuetypes()) 702 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 703 704 // ARM supports all 4 flavors of integer indexed load / store. 705 if (!Subtarget->isThumb1Only()) { 706 for (unsigned im = (unsigned)ISD::PRE_INC; 707 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 708 setIndexedLoadAction(im, MVT::i1, Legal); 709 setIndexedLoadAction(im, MVT::i8, Legal); 710 setIndexedLoadAction(im, MVT::i16, Legal); 711 setIndexedLoadAction(im, MVT::i32, Legal); 712 setIndexedStoreAction(im, MVT::i1, Legal); 713 setIndexedStoreAction(im, MVT::i8, Legal); 714 setIndexedStoreAction(im, MVT::i16, Legal); 715 setIndexedStoreAction(im, MVT::i32, Legal); 716 } 717 } 718 719 setOperationAction(ISD::SADDO, MVT::i32, Custom); 720 setOperationAction(ISD::UADDO, MVT::i32, Custom); 721 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 722 setOperationAction(ISD::USUBO, MVT::i32, Custom); 723 724 // i64 operation support. 725 setOperationAction(ISD::MUL, MVT::i64, Expand); 726 setOperationAction(ISD::MULHU, MVT::i32, Expand); 727 if (Subtarget->isThumb1Only()) { 728 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 729 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 730 } 731 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 732 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 733 setOperationAction(ISD::MULHS, MVT::i32, Expand); 734 735 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 736 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 737 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 738 setOperationAction(ISD::SRL, MVT::i64, Custom); 739 setOperationAction(ISD::SRA, MVT::i64, Custom); 740 741 if (!Subtarget->isThumb1Only()) { 742 // FIXME: We should do this for Thumb1 as well. 743 setOperationAction(ISD::ADDC, MVT::i32, Custom); 744 setOperationAction(ISD::ADDE, MVT::i32, Custom); 745 setOperationAction(ISD::SUBC, MVT::i32, Custom); 746 setOperationAction(ISD::SUBE, MVT::i32, Custom); 747 } 748 749 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 750 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 751 752 // ARM does not have ROTL. 753 setOperationAction(ISD::ROTL, MVT::i32, Expand); 754 for (MVT VT : MVT::vector_valuetypes()) { 755 setOperationAction(ISD::ROTL, VT, Expand); 756 setOperationAction(ISD::ROTR, VT, Expand); 757 } 758 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 759 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 760 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 761 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 762 763 // @llvm.readcyclecounter requires the Performance Monitors extension. 764 // Default to the 0 expansion on unsupported platforms. 765 // FIXME: Technically there are older ARM CPUs that have 766 // implementation-specific ways of obtaining this information. 767 if (Subtarget->hasPerfMon()) 768 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 769 770 // Only ARMv6 has BSWAP. 771 if (!Subtarget->hasV6Ops()) 772 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 773 774 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide() 775 : Subtarget->hasDivideInARMMode(); 776 if (!hasDivide) { 777 // These are expanded into libcalls if the cpu doesn't have HW divider. 778 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 779 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 780 } 781 782 if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) { 783 setOperationAction(ISD::SDIV, MVT::i32, Custom); 784 setOperationAction(ISD::UDIV, MVT::i32, Custom); 785 786 setOperationAction(ISD::SDIV, MVT::i64, Custom); 787 setOperationAction(ISD::UDIV, MVT::i64, Custom); 788 } 789 790 setOperationAction(ISD::SREM, MVT::i32, Expand); 791 setOperationAction(ISD::UREM, MVT::i32, Expand); 792 // Register based DivRem for AEABI (RTABI 4.2) 793 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 794 Subtarget->isTargetGNUAEABI()) { 795 setOperationAction(ISD::SREM, MVT::i64, Custom); 796 setOperationAction(ISD::UREM, MVT::i64, Custom); 797 798 setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); 799 setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); 800 setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); 801 setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); 802 setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); 803 setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); 804 setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); 805 setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); 806 807 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); 808 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); 809 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); 810 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); 811 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); 812 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); 813 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); 814 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); 815 816 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 817 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 818 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 819 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 820 } else { 821 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 822 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 823 } 824 825 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 826 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 827 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 828 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 829 830 setOperationAction(ISD::TRAP, MVT::Other, Legal); 831 832 // Use the default implementation. 833 setOperationAction(ISD::VASTART, MVT::Other, Custom); 834 setOperationAction(ISD::VAARG, MVT::Other, Expand); 835 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 836 setOperationAction(ISD::VAEND, MVT::Other, Expand); 837 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 838 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 839 840 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 841 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 842 else 843 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 844 845 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 846 // the default expansion. 847 InsertFencesForAtomic = false; 848 if (Subtarget->hasAnyDataBarrier() && 849 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 850 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 851 // to ldrex/strex loops already. 852 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 853 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 854 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 855 856 // On v8, we have particularly efficient implementations of atomic fences 857 // if they can be combined with nearby atomic loads and stores. 858 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) { 859 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 860 InsertFencesForAtomic = true; 861 } 862 } else { 863 // If there's anything we can use as a barrier, go through custom lowering 864 // for ATOMIC_FENCE. 865 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 866 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 867 868 // Set them all for expansion, which will force libcalls. 869 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 870 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 871 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 872 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 873 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 874 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 875 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 876 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 877 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 878 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 879 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 880 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 881 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 882 // Unordered/Monotonic case. 883 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 884 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 885 } 886 887 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 888 889 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 890 if (!Subtarget->hasV6Ops()) { 891 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 892 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 893 } 894 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 895 896 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 897 !Subtarget->isThumb1Only()) { 898 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 899 // iff target supports vfp2. 900 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 901 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 902 } 903 904 // We want to custom lower some of our intrinsics. 905 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 906 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 907 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 908 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 909 if (Subtarget->useSjLjEH()) 910 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 911 912 setOperationAction(ISD::SETCC, MVT::i32, Expand); 913 setOperationAction(ISD::SETCC, MVT::f32, Expand); 914 setOperationAction(ISD::SETCC, MVT::f64, Expand); 915 setOperationAction(ISD::SELECT, MVT::i32, Custom); 916 setOperationAction(ISD::SELECT, MVT::f32, Custom); 917 setOperationAction(ISD::SELECT, MVT::f64, Custom); 918 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 919 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 920 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 921 922 // Thumb-1 cannot currently select ARMISD::SUBE. 923 if (!Subtarget->isThumb1Only()) 924 setOperationAction(ISD::SETCCE, MVT::i32, Custom); 925 926 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 927 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 928 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 929 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 930 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 931 932 // We don't support sin/cos/fmod/copysign/pow 933 setOperationAction(ISD::FSIN, MVT::f64, Expand); 934 setOperationAction(ISD::FSIN, MVT::f32, Expand); 935 setOperationAction(ISD::FCOS, MVT::f32, Expand); 936 setOperationAction(ISD::FCOS, MVT::f64, Expand); 937 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 938 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 939 setOperationAction(ISD::FREM, MVT::f64, Expand); 940 setOperationAction(ISD::FREM, MVT::f32, Expand); 941 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 942 !Subtarget->isThumb1Only()) { 943 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 944 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 945 } 946 setOperationAction(ISD::FPOW, MVT::f64, Expand); 947 setOperationAction(ISD::FPOW, MVT::f32, Expand); 948 949 if (!Subtarget->hasVFP4()) { 950 setOperationAction(ISD::FMA, MVT::f64, Expand); 951 setOperationAction(ISD::FMA, MVT::f32, Expand); 952 } 953 954 // Various VFP goodness 955 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 956 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 957 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { 958 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 959 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 960 } 961 962 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 963 if (!Subtarget->hasFP16()) { 964 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 965 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 966 } 967 } 968 969 // Combine sin / cos into one node or libcall if possible. 970 if (Subtarget->hasSinCos()) { 971 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 972 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 973 if (Subtarget->isTargetWatchABI()) { 974 setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP); 975 setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP); 976 } 977 if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) { 978 // For iOS, we don't want to the normal expansion of a libcall to 979 // sincos. We want to issue a libcall to __sincos_stret. 980 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 981 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 982 } 983 } 984 985 // FP-ARMv8 implements a lot of rounding-like FP operations. 986 if (Subtarget->hasFPARMv8()) { 987 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 988 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 989 setOperationAction(ISD::FROUND, MVT::f32, Legal); 990 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 991 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 992 setOperationAction(ISD::FRINT, MVT::f32, Legal); 993 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 994 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 995 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 996 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 997 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 998 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 999 1000 if (!Subtarget->isFPOnlySP()) { 1001 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1002 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1003 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1004 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1005 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1006 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1007 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1008 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1009 } 1010 } 1011 1012 if (Subtarget->hasNEON()) { 1013 // vmin and vmax aren't available in a scalar form, so we use 1014 // a NEON instruction with an undef lane instead. 1015 setOperationAction(ISD::FMINNAN, MVT::f32, Legal); 1016 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); 1017 setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal); 1018 setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal); 1019 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); 1020 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); 1021 } 1022 1023 // We have target-specific dag combine patterns for the following nodes: 1024 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1025 setTargetDAGCombine(ISD::ADD); 1026 setTargetDAGCombine(ISD::SUB); 1027 setTargetDAGCombine(ISD::MUL); 1028 setTargetDAGCombine(ISD::AND); 1029 setTargetDAGCombine(ISD::OR); 1030 setTargetDAGCombine(ISD::XOR); 1031 1032 if (Subtarget->hasV6Ops()) 1033 setTargetDAGCombine(ISD::SRL); 1034 1035 setStackPointerRegisterToSaveRestore(ARM::SP); 1036 1037 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1038 !Subtarget->hasVFP2()) 1039 setSchedulingPreference(Sched::RegPressure); 1040 else 1041 setSchedulingPreference(Sched::Hybrid); 1042 1043 //// temporary - rewrite interface to use type 1044 MaxStoresPerMemset = 8; 1045 MaxStoresPerMemsetOptSize = 4; 1046 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1047 MaxStoresPerMemcpyOptSize = 2; 1048 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1049 MaxStoresPerMemmoveOptSize = 2; 1050 1051 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1052 // are at least 4 bytes aligned. 1053 setMinStackArgumentAlignment(4); 1054 1055 // Prefer likely predicted branches to selects on out-of-order cores. 1056 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1057 1058 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 1059 } 1060 1061 bool ARMTargetLowering::useSoftFloat() const { 1062 return Subtarget->useSoftFloat(); 1063 } 1064 1065 // FIXME: It might make sense to define the representative register class as the 1066 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1067 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1068 // SPR's representative would be DPR_VFP2. This should work well if register 1069 // pressure tracking were modified such that a register use would increment the 1070 // pressure of the register class's representative and all of it's super 1071 // classes' representatives transitively. We have not implemented this because 1072 // of the difficulty prior to coalescing of modeling operand register classes 1073 // due to the common occurrence of cross class copies and subregister insertions 1074 // and extractions. 1075 std::pair<const TargetRegisterClass *, uint8_t> 1076 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1077 MVT VT) const { 1078 const TargetRegisterClass *RRC = nullptr; 1079 uint8_t Cost = 1; 1080 switch (VT.SimpleTy) { 1081 default: 1082 return TargetLowering::findRepresentativeClass(TRI, VT); 1083 // Use DPR as representative register class for all floating point 1084 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1085 // the cost is 1 for both f32 and f64. 1086 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1087 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1088 RRC = &ARM::DPRRegClass; 1089 // When NEON is used for SP, only half of the register file is available 1090 // because operations that define both SP and DP results will be constrained 1091 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1092 // coalescing by double-counting the SP regs. See the FIXME above. 1093 if (Subtarget->useNEONForSinglePrecisionFP()) 1094 Cost = 2; 1095 break; 1096 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1097 case MVT::v4f32: case MVT::v2f64: 1098 RRC = &ARM::DPRRegClass; 1099 Cost = 2; 1100 break; 1101 case MVT::v4i64: 1102 RRC = &ARM::DPRRegClass; 1103 Cost = 4; 1104 break; 1105 case MVT::v8i64: 1106 RRC = &ARM::DPRRegClass; 1107 Cost = 8; 1108 break; 1109 } 1110 return std::make_pair(RRC, Cost); 1111 } 1112 1113 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1114 switch ((ARMISD::NodeType)Opcode) { 1115 case ARMISD::FIRST_NUMBER: break; 1116 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1117 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1118 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1119 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; 1120 case ARMISD::CALL: return "ARMISD::CALL"; 1121 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1122 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1123 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1124 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1125 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1126 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1127 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1128 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1129 case ARMISD::CMP: return "ARMISD::CMP"; 1130 case ARMISD::CMN: return "ARMISD::CMN"; 1131 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1132 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1133 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1134 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1135 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1136 1137 case ARMISD::CMOV: return "ARMISD::CMOV"; 1138 1139 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1140 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1141 case ARMISD::RRX: return "ARMISD::RRX"; 1142 1143 case ARMISD::ADDC: return "ARMISD::ADDC"; 1144 case ARMISD::ADDE: return "ARMISD::ADDE"; 1145 case ARMISD::SUBC: return "ARMISD::SUBC"; 1146 case ARMISD::SUBE: return "ARMISD::SUBE"; 1147 1148 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1149 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1150 1151 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1152 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; 1153 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; 1154 1155 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1156 1157 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1158 1159 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1160 1161 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1162 1163 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1164 1165 case ARMISD::WIN__CHKSTK: return "ARMISD:::WIN__CHKSTK"; 1166 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; 1167 1168 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 1169 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 1170 case ARMISD::VCGE: return "ARMISD::VCGE"; 1171 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 1172 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 1173 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 1174 case ARMISD::VCGT: return "ARMISD::VCGT"; 1175 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1176 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1177 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1178 case ARMISD::VTST: return "ARMISD::VTST"; 1179 1180 case ARMISD::VSHL: return "ARMISD::VSHL"; 1181 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1182 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1183 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1184 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1185 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1186 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1187 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1188 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1189 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1190 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1191 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1192 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1193 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1194 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1195 case ARMISD::VSLI: return "ARMISD::VSLI"; 1196 case ARMISD::VSRI: return "ARMISD::VSRI"; 1197 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1198 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1199 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1200 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1201 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1202 case ARMISD::VDUP: return "ARMISD::VDUP"; 1203 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1204 case ARMISD::VEXT: return "ARMISD::VEXT"; 1205 case ARMISD::VREV64: return "ARMISD::VREV64"; 1206 case ARMISD::VREV32: return "ARMISD::VREV32"; 1207 case ARMISD::VREV16: return "ARMISD::VREV16"; 1208 case ARMISD::VZIP: return "ARMISD::VZIP"; 1209 case ARMISD::VUZP: return "ARMISD::VUZP"; 1210 case ARMISD::VTRN: return "ARMISD::VTRN"; 1211 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1212 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1213 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1214 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1215 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1216 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1217 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1218 case ARMISD::BFI: return "ARMISD::BFI"; 1219 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1220 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1221 case ARMISD::VBSL: return "ARMISD::VBSL"; 1222 case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; 1223 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1224 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1225 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1226 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1227 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1228 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1229 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1230 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1231 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1232 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1233 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1234 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1235 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1236 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1237 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1238 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1239 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1240 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1241 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1242 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1243 } 1244 return nullptr; 1245 } 1246 1247 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1248 EVT VT) const { 1249 if (!VT.isVector()) 1250 return getPointerTy(DL); 1251 return VT.changeVectorElementTypeToInteger(); 1252 } 1253 1254 /// getRegClassFor - Return the register class that should be used for the 1255 /// specified value type. 1256 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1257 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1258 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1259 // load / store 4 to 8 consecutive D registers. 1260 if (Subtarget->hasNEON()) { 1261 if (VT == MVT::v4i64) 1262 return &ARM::QQPRRegClass; 1263 if (VT == MVT::v8i64) 1264 return &ARM::QQQQPRRegClass; 1265 } 1266 return TargetLowering::getRegClassFor(VT); 1267 } 1268 1269 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1270 // source/dest is aligned and the copy size is large enough. We therefore want 1271 // to align such objects passed to memory intrinsics. 1272 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1273 unsigned &PrefAlign) const { 1274 if (!isa<MemIntrinsic>(CI)) 1275 return false; 1276 MinSize = 8; 1277 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1278 // cycle faster than 4-byte aligned LDM. 1279 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1280 return true; 1281 } 1282 1283 // Create a fast isel object. 1284 FastISel * 1285 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1286 const TargetLibraryInfo *libInfo) const { 1287 return ARM::createFastISel(funcInfo, libInfo); 1288 } 1289 1290 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1291 unsigned NumVals = N->getNumValues(); 1292 if (!NumVals) 1293 return Sched::RegPressure; 1294 1295 for (unsigned i = 0; i != NumVals; ++i) { 1296 EVT VT = N->getValueType(i); 1297 if (VT == MVT::Glue || VT == MVT::Other) 1298 continue; 1299 if (VT.isFloatingPoint() || VT.isVector()) 1300 return Sched::ILP; 1301 } 1302 1303 if (!N->isMachineOpcode()) 1304 return Sched::RegPressure; 1305 1306 // Load are scheduled for latency even if there instruction itinerary 1307 // is not available. 1308 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1309 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1310 1311 if (MCID.getNumDefs() == 0) 1312 return Sched::RegPressure; 1313 if (!Itins->isEmpty() && 1314 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1315 return Sched::ILP; 1316 1317 return Sched::RegPressure; 1318 } 1319 1320 //===----------------------------------------------------------------------===// 1321 // Lowering Code 1322 //===----------------------------------------------------------------------===// 1323 1324 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1325 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1326 switch (CC) { 1327 default: llvm_unreachable("Unknown condition code!"); 1328 case ISD::SETNE: return ARMCC::NE; 1329 case ISD::SETEQ: return ARMCC::EQ; 1330 case ISD::SETGT: return ARMCC::GT; 1331 case ISD::SETGE: return ARMCC::GE; 1332 case ISD::SETLT: return ARMCC::LT; 1333 case ISD::SETLE: return ARMCC::LE; 1334 case ISD::SETUGT: return ARMCC::HI; 1335 case ISD::SETUGE: return ARMCC::HS; 1336 case ISD::SETULT: return ARMCC::LO; 1337 case ISD::SETULE: return ARMCC::LS; 1338 } 1339 } 1340 1341 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1342 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1343 ARMCC::CondCodes &CondCode2) { 1344 CondCode2 = ARMCC::AL; 1345 switch (CC) { 1346 default: llvm_unreachable("Unknown FP condition!"); 1347 case ISD::SETEQ: 1348 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1349 case ISD::SETGT: 1350 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1351 case ISD::SETGE: 1352 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1353 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1354 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1355 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1356 case ISD::SETO: CondCode = ARMCC::VC; break; 1357 case ISD::SETUO: CondCode = ARMCC::VS; break; 1358 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1359 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1360 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1361 case ISD::SETLT: 1362 case ISD::SETULT: CondCode = ARMCC::LT; break; 1363 case ISD::SETLE: 1364 case ISD::SETULE: CondCode = ARMCC::LE; break; 1365 case ISD::SETNE: 1366 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1367 } 1368 } 1369 1370 //===----------------------------------------------------------------------===// 1371 // Calling Convention Implementation 1372 //===----------------------------------------------------------------------===// 1373 1374 #include "ARMGenCallingConv.inc" 1375 1376 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1377 /// account presence of floating point hardware and calling convention 1378 /// limitations, such as support for variadic functions. 1379 CallingConv::ID 1380 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1381 bool isVarArg) const { 1382 switch (CC) { 1383 default: 1384 llvm_unreachable("Unsupported calling convention"); 1385 case CallingConv::ARM_AAPCS: 1386 case CallingConv::ARM_APCS: 1387 case CallingConv::GHC: 1388 return CC; 1389 case CallingConv::PreserveMost: 1390 return CallingConv::PreserveMost; 1391 case CallingConv::ARM_AAPCS_VFP: 1392 case CallingConv::Swift: 1393 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1394 case CallingConv::C: 1395 if (!Subtarget->isAAPCS_ABI()) 1396 return CallingConv::ARM_APCS; 1397 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && 1398 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1399 !isVarArg) 1400 return CallingConv::ARM_AAPCS_VFP; 1401 else 1402 return CallingConv::ARM_AAPCS; 1403 case CallingConv::Fast: 1404 case CallingConv::CXX_FAST_TLS: 1405 if (!Subtarget->isAAPCS_ABI()) { 1406 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1407 return CallingConv::Fast; 1408 return CallingConv::ARM_APCS; 1409 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1410 return CallingConv::ARM_AAPCS_VFP; 1411 else 1412 return CallingConv::ARM_AAPCS; 1413 } 1414 } 1415 1416 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 1417 /// CallingConvention. 1418 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1419 bool Return, 1420 bool isVarArg) const { 1421 switch (getEffectiveCallingConv(CC, isVarArg)) { 1422 default: 1423 llvm_unreachable("Unsupported calling convention"); 1424 case CallingConv::ARM_APCS: 1425 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1426 case CallingConv::ARM_AAPCS: 1427 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1428 case CallingConv::ARM_AAPCS_VFP: 1429 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1430 case CallingConv::Fast: 1431 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1432 case CallingConv::GHC: 1433 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1434 case CallingConv::PreserveMost: 1435 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1436 } 1437 } 1438 1439 /// LowerCallResult - Lower the result values of a call into the 1440 /// appropriate copies out of appropriate physical registers. 1441 SDValue 1442 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1443 CallingConv::ID CallConv, bool isVarArg, 1444 const SmallVectorImpl<ISD::InputArg> &Ins, 1445 SDLoc dl, SelectionDAG &DAG, 1446 SmallVectorImpl<SDValue> &InVals, 1447 bool isThisReturn, SDValue ThisVal) const { 1448 1449 // Assign locations to each value returned by this call. 1450 SmallVector<CCValAssign, 16> RVLocs; 1451 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1452 *DAG.getContext(), Call); 1453 CCInfo.AnalyzeCallResult(Ins, 1454 CCAssignFnForNode(CallConv, /* Return*/ true, 1455 isVarArg)); 1456 1457 // Copy all of the result registers out of their specified physreg. 1458 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1459 CCValAssign VA = RVLocs[i]; 1460 1461 // Pass 'this' value directly from the argument to return value, to avoid 1462 // reg unit interference 1463 if (i == 0 && isThisReturn) { 1464 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1465 "unexpected return calling convention register assignment"); 1466 InVals.push_back(ThisVal); 1467 continue; 1468 } 1469 1470 SDValue Val; 1471 if (VA.needsCustom()) { 1472 // Handle f64 or half of a v2f64. 1473 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1474 InFlag); 1475 Chain = Lo.getValue(1); 1476 InFlag = Lo.getValue(2); 1477 VA = RVLocs[++i]; // skip ahead to next loc 1478 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1479 InFlag); 1480 Chain = Hi.getValue(1); 1481 InFlag = Hi.getValue(2); 1482 if (!Subtarget->isLittle()) 1483 std::swap (Lo, Hi); 1484 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1485 1486 if (VA.getLocVT() == MVT::v2f64) { 1487 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1488 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1489 DAG.getConstant(0, dl, MVT::i32)); 1490 1491 VA = RVLocs[++i]; // skip ahead to next loc 1492 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1493 Chain = Lo.getValue(1); 1494 InFlag = Lo.getValue(2); 1495 VA = RVLocs[++i]; // skip ahead to next loc 1496 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1497 Chain = Hi.getValue(1); 1498 InFlag = Hi.getValue(2); 1499 if (!Subtarget->isLittle()) 1500 std::swap (Lo, Hi); 1501 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1502 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1503 DAG.getConstant(1, dl, MVT::i32)); 1504 } 1505 } else { 1506 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1507 InFlag); 1508 Chain = Val.getValue(1); 1509 InFlag = Val.getValue(2); 1510 } 1511 1512 switch (VA.getLocInfo()) { 1513 default: llvm_unreachable("Unknown loc info!"); 1514 case CCValAssign::Full: break; 1515 case CCValAssign::BCvt: 1516 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1517 break; 1518 } 1519 1520 InVals.push_back(Val); 1521 } 1522 1523 return Chain; 1524 } 1525 1526 /// LowerMemOpCallTo - Store the argument to the stack. 1527 SDValue 1528 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1529 SDValue StackPtr, SDValue Arg, 1530 SDLoc dl, SelectionDAG &DAG, 1531 const CCValAssign &VA, 1532 ISD::ArgFlagsTy Flags) const { 1533 unsigned LocMemOffset = VA.getLocMemOffset(); 1534 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1535 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 1536 StackPtr, PtrOff); 1537 return DAG.getStore( 1538 Chain, dl, Arg, PtrOff, 1539 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset), 1540 false, false, 0); 1541 } 1542 1543 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 1544 SDValue Chain, SDValue &Arg, 1545 RegsToPassVector &RegsToPass, 1546 CCValAssign &VA, CCValAssign &NextVA, 1547 SDValue &StackPtr, 1548 SmallVectorImpl<SDValue> &MemOpChains, 1549 ISD::ArgFlagsTy Flags) const { 1550 1551 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1552 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1553 unsigned id = Subtarget->isLittle() ? 0 : 1; 1554 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 1555 1556 if (NextVA.isRegLoc()) 1557 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 1558 else { 1559 assert(NextVA.isMemLoc()); 1560 if (!StackPtr.getNode()) 1561 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 1562 getPointerTy(DAG.getDataLayout())); 1563 1564 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 1565 dl, DAG, NextVA, 1566 Flags)); 1567 } 1568 } 1569 1570 /// LowerCall - Lowering a call into a callseq_start <- 1571 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1572 /// nodes. 1573 SDValue 1574 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1575 SmallVectorImpl<SDValue> &InVals) const { 1576 SelectionDAG &DAG = CLI.DAG; 1577 SDLoc &dl = CLI.DL; 1578 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1579 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1580 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1581 SDValue Chain = CLI.Chain; 1582 SDValue Callee = CLI.Callee; 1583 bool &isTailCall = CLI.IsTailCall; 1584 CallingConv::ID CallConv = CLI.CallConv; 1585 bool doesNotRet = CLI.DoesNotReturn; 1586 bool isVarArg = CLI.IsVarArg; 1587 1588 MachineFunction &MF = DAG.getMachineFunction(); 1589 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1590 bool isThisReturn = false; 1591 bool isSibCall = false; 1592 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); 1593 1594 // Disable tail calls if they're not supported. 1595 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") 1596 isTailCall = false; 1597 1598 if (isTailCall) { 1599 // Check if it's really possible to do a tail call. 1600 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1601 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1602 Outs, OutVals, Ins, DAG); 1603 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 1604 report_fatal_error("failed to perform tail call elimination on a call " 1605 "site marked musttail"); 1606 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1607 // detected sibcalls. 1608 if (isTailCall) { 1609 ++NumTailCalls; 1610 isSibCall = true; 1611 } 1612 } 1613 1614 // Analyze operands of the call, assigning locations to each operand. 1615 SmallVector<CCValAssign, 16> ArgLocs; 1616 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1617 *DAG.getContext(), Call); 1618 CCInfo.AnalyzeCallOperands(Outs, 1619 CCAssignFnForNode(CallConv, /* Return*/ false, 1620 isVarArg)); 1621 1622 // Get a count of how many bytes are to be pushed on the stack. 1623 unsigned NumBytes = CCInfo.getNextStackOffset(); 1624 1625 // For tail calls, memory operands are available in our caller's stack. 1626 if (isSibCall) 1627 NumBytes = 0; 1628 1629 // Adjust the stack pointer for the new arguments... 1630 // These operations are automatically eliminated by the prolog/epilog pass 1631 if (!isSibCall) 1632 Chain = DAG.getCALLSEQ_START(Chain, 1633 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 1634 1635 SDValue StackPtr = 1636 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 1637 1638 RegsToPassVector RegsToPass; 1639 SmallVector<SDValue, 8> MemOpChains; 1640 1641 // Walk the register/memloc assignments, inserting copies/loads. In the case 1642 // of tail call optimization, arguments are handled later. 1643 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1644 i != e; 1645 ++i, ++realArgIdx) { 1646 CCValAssign &VA = ArgLocs[i]; 1647 SDValue Arg = OutVals[realArgIdx]; 1648 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1649 bool isByVal = Flags.isByVal(); 1650 1651 // Promote the value if needed. 1652 switch (VA.getLocInfo()) { 1653 default: llvm_unreachable("Unknown loc info!"); 1654 case CCValAssign::Full: break; 1655 case CCValAssign::SExt: 1656 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1657 break; 1658 case CCValAssign::ZExt: 1659 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1660 break; 1661 case CCValAssign::AExt: 1662 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1663 break; 1664 case CCValAssign::BCvt: 1665 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1666 break; 1667 } 1668 1669 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1670 if (VA.needsCustom()) { 1671 if (VA.getLocVT() == MVT::v2f64) { 1672 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1673 DAG.getConstant(0, dl, MVT::i32)); 1674 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1675 DAG.getConstant(1, dl, MVT::i32)); 1676 1677 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1678 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1679 1680 VA = ArgLocs[++i]; // skip ahead to next loc 1681 if (VA.isRegLoc()) { 1682 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1683 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1684 } else { 1685 assert(VA.isMemLoc()); 1686 1687 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1688 dl, DAG, VA, Flags)); 1689 } 1690 } else { 1691 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1692 StackPtr, MemOpChains, Flags); 1693 } 1694 } else if (VA.isRegLoc()) { 1695 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1696 assert(VA.getLocVT() == MVT::i32 && 1697 "unexpected calling convention register assignment"); 1698 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1699 "unexpected use of 'returned'"); 1700 isThisReturn = true; 1701 } 1702 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1703 } else if (isByVal) { 1704 assert(VA.isMemLoc()); 1705 unsigned offset = 0; 1706 1707 // True if this byval aggregate will be split between registers 1708 // and memory. 1709 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1710 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 1711 1712 if (CurByValIdx < ByValArgsCount) { 1713 1714 unsigned RegBegin, RegEnd; 1715 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1716 1717 EVT PtrVT = 1718 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1719 unsigned int i, j; 1720 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1721 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 1722 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1723 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1724 MachinePointerInfo(), 1725 false, false, false, 1726 DAG.InferPtrAlignment(AddArg)); 1727 MemOpChains.push_back(Load.getValue(1)); 1728 RegsToPass.push_back(std::make_pair(j, Load)); 1729 } 1730 1731 // If parameter size outsides register area, "offset" value 1732 // helps us to calculate stack slot for remained part properly. 1733 offset = RegEnd - RegBegin; 1734 1735 CCInfo.nextInRegsParam(); 1736 } 1737 1738 if (Flags.getByValSize() > 4*offset) { 1739 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1740 unsigned LocMemOffset = VA.getLocMemOffset(); 1741 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1742 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 1743 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 1744 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 1745 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 1746 MVT::i32); 1747 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, 1748 MVT::i32); 1749 1750 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1751 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1752 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1753 Ops)); 1754 } 1755 } else if (!isSibCall) { 1756 assert(VA.isMemLoc()); 1757 1758 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1759 dl, DAG, VA, Flags)); 1760 } 1761 } 1762 1763 if (!MemOpChains.empty()) 1764 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1765 1766 // Build a sequence of copy-to-reg nodes chained together with token chain 1767 // and flag operands which copy the outgoing args into the appropriate regs. 1768 SDValue InFlag; 1769 // Tail call byval lowering might overwrite argument registers so in case of 1770 // tail call optimization the copies to registers are lowered later. 1771 if (!isTailCall) 1772 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1773 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1774 RegsToPass[i].second, InFlag); 1775 InFlag = Chain.getValue(1); 1776 } 1777 1778 // For tail calls lower the arguments to the 'real' stack slot. 1779 if (isTailCall) { 1780 // Force all the incoming stack arguments to be loaded from the stack 1781 // before any new outgoing arguments are stored to the stack, because the 1782 // outgoing stack slots may alias the incoming argument stack slots, and 1783 // the alias isn't otherwise explicit. This is slightly more conservative 1784 // than necessary, because it means that each store effectively depends 1785 // on every argument instead of just those arguments it would clobber. 1786 1787 // Do not flag preceding copytoreg stuff together with the following stuff. 1788 InFlag = SDValue(); 1789 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1790 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1791 RegsToPass[i].second, InFlag); 1792 InFlag = Chain.getValue(1); 1793 } 1794 InFlag = SDValue(); 1795 } 1796 1797 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1798 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1799 // node so that legalize doesn't hack it. 1800 bool isDirect = false; 1801 bool isARMFunc = false; 1802 bool isLocalARMFunc = false; 1803 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1804 auto PtrVt = getPointerTy(DAG.getDataLayout()); 1805 1806 if (Subtarget->genLongCalls()) { 1807 assert((Subtarget->isTargetWindows() || 1808 getTargetMachine().getRelocationModel() == Reloc::Static) && 1809 "long-calls with non-static relocation model!"); 1810 // Handle a global address or an external symbol. If it's not one of 1811 // those, the target's already in a register, so we don't need to do 1812 // anything extra. 1813 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1814 const GlobalValue *GV = G->getGlobal(); 1815 // Create a constant pool entry for the callee address 1816 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1817 ARMConstantPoolValue *CPV = 1818 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1819 1820 // Get the address of the callee into a register 1821 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1822 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1823 Callee = DAG.getLoad( 1824 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1825 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1826 false, false, 0); 1827 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1828 const char *Sym = S->getSymbol(); 1829 1830 // Create a constant pool entry for the callee address 1831 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1832 ARMConstantPoolValue *CPV = 1833 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1834 ARMPCLabelIndex, 0); 1835 // Get the address of the callee into a register 1836 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1837 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1838 Callee = DAG.getLoad( 1839 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1840 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1841 false, false, 0); 1842 } 1843 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1844 const GlobalValue *GV = G->getGlobal(); 1845 isDirect = true; 1846 bool isDef = GV->isStrongDefinitionForLinker(); 1847 const TargetMachine &TM = getTargetMachine(); 1848 Reloc::Model RM = TM.getRelocationModel(); 1849 const Triple &TargetTriple = TM.getTargetTriple(); 1850 bool isStub = 1851 !shouldAssumeDSOLocal(RM, TargetTriple, *GV->getParent(), GV) && 1852 Subtarget->isTargetMachO(); 1853 1854 isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 1855 // ARM call to a local ARM function is predicable. 1856 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 1857 // tBX takes a register source operand. 1858 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1859 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 1860 Callee = DAG.getNode( 1861 ARMISD::WrapperPIC, dl, PtrVt, 1862 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 1863 Callee = DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), Callee, 1864 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 1865 false, false, true, 0); 1866 } else if (Subtarget->isTargetCOFF()) { 1867 assert(Subtarget->isTargetWindows() && 1868 "Windows is the only supported COFF target"); 1869 unsigned TargetFlags = GV->hasDLLImportStorageClass() 1870 ? ARMII::MO_DLLIMPORT 1871 : ARMII::MO_NO_FLAG; 1872 Callee = 1873 DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, TargetFlags); 1874 if (GV->hasDLLImportStorageClass()) 1875 Callee = 1876 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 1877 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 1878 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 1879 false, false, false, 0); 1880 } else { 1881 // On ELF targets for PIC code, direct calls should go through the PLT 1882 unsigned OpFlags = 0; 1883 if (Subtarget->isTargetELF() && 1884 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1885 OpFlags = ARMII::MO_PLT; 1886 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, OpFlags); 1887 } 1888 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1889 isDirect = true; 1890 bool isStub = Subtarget->isTargetMachO() && 1891 getTargetMachine().getRelocationModel() != Reloc::Static; 1892 isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 1893 // tBX takes a register source operand. 1894 const char *Sym = S->getSymbol(); 1895 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1896 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1897 ARMConstantPoolValue *CPV = 1898 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1899 ARMPCLabelIndex, 4); 1900 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 1901 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1902 Callee = DAG.getLoad( 1903 PtrVt, dl, DAG.getEntryNode(), CPAddr, 1904 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 1905 false, false, 0); 1906 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 1907 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 1908 } else { 1909 unsigned OpFlags = 0; 1910 // On ELF targets for PIC code, direct calls should go through the PLT 1911 if (Subtarget->isTargetELF() && 1912 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1913 OpFlags = ARMII::MO_PLT; 1914 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, OpFlags); 1915 } 1916 } 1917 1918 // FIXME: handle tail calls differently. 1919 unsigned CallOpc; 1920 if (Subtarget->isThumb()) { 1921 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1922 CallOpc = ARMISD::CALL_NOLINK; 1923 else 1924 CallOpc = ARMISD::CALL; 1925 } else { 1926 if (!isDirect && !Subtarget->hasV5TOps()) 1927 CallOpc = ARMISD::CALL_NOLINK; 1928 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && 1929 // Emit regular call when code size is the priority 1930 !MF.getFunction()->optForMinSize()) 1931 // "mov lr, pc; b _foo" to avoid confusing the RSP 1932 CallOpc = ARMISD::CALL_NOLINK; 1933 else 1934 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1935 } 1936 1937 std::vector<SDValue> Ops; 1938 Ops.push_back(Chain); 1939 Ops.push_back(Callee); 1940 1941 // Add argument registers to the end of the list so that they are known live 1942 // into the call. 1943 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1944 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1945 RegsToPass[i].second.getValueType())); 1946 1947 // Add a register mask operand representing the call-preserved registers. 1948 if (!isTailCall) { 1949 const uint32_t *Mask; 1950 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 1951 if (isThisReturn) { 1952 // For 'this' returns, use the R0-preserving mask if applicable 1953 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 1954 if (!Mask) { 1955 // Set isThisReturn to false if the calling convention is not one that 1956 // allows 'returned' to be modeled in this way, so LowerCallResult does 1957 // not try to pass 'this' straight through 1958 isThisReturn = false; 1959 Mask = ARI->getCallPreservedMask(MF, CallConv); 1960 } 1961 } else 1962 Mask = ARI->getCallPreservedMask(MF, CallConv); 1963 1964 assert(Mask && "Missing call preserved mask for calling convention"); 1965 Ops.push_back(DAG.getRegisterMask(Mask)); 1966 } 1967 1968 if (InFlag.getNode()) 1969 Ops.push_back(InFlag); 1970 1971 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1972 if (isTailCall) { 1973 MF.getFrameInfo()->setHasTailCall(); 1974 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 1975 } 1976 1977 // Returns a chain and a flag for retval copy to use. 1978 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 1979 InFlag = Chain.getValue(1); 1980 1981 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 1982 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 1983 if (!Ins.empty()) 1984 InFlag = Chain.getValue(1); 1985 1986 // Handle result values, copying them out of physregs into vregs that we 1987 // return. 1988 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 1989 InVals, isThisReturn, 1990 isThisReturn ? OutVals[0] : SDValue()); 1991 } 1992 1993 /// HandleByVal - Every parameter *after* a byval parameter is passed 1994 /// on the stack. Remember the next parameter register to allocate, 1995 /// and then confiscate the rest of the parameter registers to insure 1996 /// this. 1997 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 1998 unsigned Align) const { 1999 assert((State->getCallOrPrologue() == Prologue || 2000 State->getCallOrPrologue() == Call) && 2001 "unhandled ParmContext"); 2002 2003 // Byval (as with any stack) slots are always at least 4 byte aligned. 2004 Align = std::max(Align, 4U); 2005 2006 unsigned Reg = State->AllocateReg(GPRArgRegs); 2007 if (!Reg) 2008 return; 2009 2010 unsigned AlignInRegs = Align / 4; 2011 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2012 for (unsigned i = 0; i < Waste; ++i) 2013 Reg = State->AllocateReg(GPRArgRegs); 2014 2015 if (!Reg) 2016 return; 2017 2018 unsigned Excess = 4 * (ARM::R4 - Reg); 2019 2020 // Special case when NSAA != SP and parameter size greater than size of 2021 // all remained GPR regs. In that case we can't split parameter, we must 2022 // send it to stack. We also must set NCRN to R4, so waste all 2023 // remained registers. 2024 const unsigned NSAAOffset = State->getNextStackOffset(); 2025 if (NSAAOffset != 0 && Size > Excess) { 2026 while (State->AllocateReg(GPRArgRegs)) 2027 ; 2028 return; 2029 } 2030 2031 // First register for byval parameter is the first register that wasn't 2032 // allocated before this method call, so it would be "reg". 2033 // If parameter is small enough to be saved in range [reg, r4), then 2034 // the end (first after last) register would be reg + param-size-in-regs, 2035 // else parameter would be splitted between registers and stack, 2036 // end register would be r4 in this case. 2037 unsigned ByValRegBegin = Reg; 2038 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2039 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2040 // Note, first register is allocated in the beginning of function already, 2041 // allocate remained amount of registers we need. 2042 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2043 State->AllocateReg(GPRArgRegs); 2044 // A byval parameter that is split between registers and memory needs its 2045 // size truncated here. 2046 // In the case where the entire structure fits in registers, we set the 2047 // size in memory to zero. 2048 Size = std::max<int>(Size - Excess, 0); 2049 } 2050 2051 /// MatchingStackOffset - Return true if the given stack call argument is 2052 /// already available in the same position (relatively) of the caller's 2053 /// incoming argument stack. 2054 static 2055 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2056 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2057 const TargetInstrInfo *TII) { 2058 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2059 int FI = INT_MAX; 2060 if (Arg.getOpcode() == ISD::CopyFromReg) { 2061 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2062 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2063 return false; 2064 MachineInstr *Def = MRI->getVRegDef(VR); 2065 if (!Def) 2066 return false; 2067 if (!Flags.isByVal()) { 2068 if (!TII->isLoadFromStackSlot(Def, FI)) 2069 return false; 2070 } else { 2071 return false; 2072 } 2073 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2074 if (Flags.isByVal()) 2075 // ByVal argument is passed in as a pointer but it's now being 2076 // dereferenced. e.g. 2077 // define @foo(%struct.X* %A) { 2078 // tail call @bar(%struct.X* byval %A) 2079 // } 2080 return false; 2081 SDValue Ptr = Ld->getBasePtr(); 2082 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2083 if (!FINode) 2084 return false; 2085 FI = FINode->getIndex(); 2086 } else 2087 return false; 2088 2089 assert(FI != INT_MAX); 2090 if (!MFI->isFixedObjectIndex(FI)) 2091 return false; 2092 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2093 } 2094 2095 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2096 /// for tail call optimization. Targets which want to do tail call 2097 /// optimization should implement this function. 2098 bool 2099 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2100 CallingConv::ID CalleeCC, 2101 bool isVarArg, 2102 bool isCalleeStructRet, 2103 bool isCallerStructRet, 2104 const SmallVectorImpl<ISD::OutputArg> &Outs, 2105 const SmallVectorImpl<SDValue> &OutVals, 2106 const SmallVectorImpl<ISD::InputArg> &Ins, 2107 SelectionDAG& DAG) const { 2108 MachineFunction &MF = DAG.getMachineFunction(); 2109 const Function *CallerF = MF.getFunction(); 2110 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2111 2112 assert(Subtarget->supportsTailCall()); 2113 2114 // Look for obvious safe cases to perform tail call optimization that do not 2115 // require ABI changes. This is what gcc calls sibcall. 2116 2117 // Do not sibcall optimize vararg calls unless the call site is not passing 2118 // any arguments. 2119 if (isVarArg && !Outs.empty()) 2120 return false; 2121 2122 // Exception-handling functions need a special set of instructions to indicate 2123 // a return to the hardware. Tail-calling another function would probably 2124 // break this. 2125 if (CallerF->hasFnAttribute("interrupt")) 2126 return false; 2127 2128 // Also avoid sibcall optimization if either caller or callee uses struct 2129 // return semantics. 2130 if (isCalleeStructRet || isCallerStructRet) 2131 return false; 2132 2133 // Externally-defined functions with weak linkage should not be 2134 // tail-called on ARM when the OS does not support dynamic 2135 // pre-emption of symbols, as the AAELF spec requires normal calls 2136 // to undefined weak functions to be replaced with a NOP or jump to the 2137 // next instruction. The behaviour of branch instructions in this 2138 // situation (as used for tail calls) is implementation-defined, so we 2139 // cannot rely on the linker replacing the tail call with a return. 2140 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2141 const GlobalValue *GV = G->getGlobal(); 2142 const Triple &TT = getTargetMachine().getTargetTriple(); 2143 if (GV->hasExternalWeakLinkage() && 2144 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2145 return false; 2146 } 2147 2148 // Check that the call results are passed in the same way. 2149 LLVMContext &C = *DAG.getContext(); 2150 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, 2151 CCAssignFnForNode(CalleeCC, true, isVarArg), 2152 CCAssignFnForNode(CallerCC, true, isVarArg))) 2153 return false; 2154 // The callee has to preserve all registers the caller needs to preserve. 2155 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2156 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2157 if (CalleeCC != CallerCC) { 2158 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2159 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2160 return false; 2161 } 2162 2163 // If Caller's vararg or byval argument has been split between registers and 2164 // stack, do not perform tail call, since part of the argument is in caller's 2165 // local frame. 2166 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2167 if (AFI_Caller->getArgRegsSaveSize()) 2168 return false; 2169 2170 // If the callee takes no arguments then go on to check the results of the 2171 // call. 2172 if (!Outs.empty()) { 2173 // Check if stack adjustment is needed. For now, do not do this if any 2174 // argument is passed on the stack. 2175 SmallVector<CCValAssign, 16> ArgLocs; 2176 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call); 2177 CCInfo.AnalyzeCallOperands(Outs, 2178 CCAssignFnForNode(CalleeCC, false, isVarArg)); 2179 if (CCInfo.getNextStackOffset()) { 2180 // Check if the arguments are already laid out in the right way as 2181 // the caller's fixed stack objects. 2182 MachineFrameInfo *MFI = MF.getFrameInfo(); 2183 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2184 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2185 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2186 i != e; 2187 ++i, ++realArgIdx) { 2188 CCValAssign &VA = ArgLocs[i]; 2189 EVT RegVT = VA.getLocVT(); 2190 SDValue Arg = OutVals[realArgIdx]; 2191 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2192 if (VA.getLocInfo() == CCValAssign::Indirect) 2193 return false; 2194 if (VA.needsCustom()) { 2195 // f64 and vector types are split into multiple registers or 2196 // register/stack-slot combinations. The types will not match 2197 // the registers; give up on memory f64 refs until we figure 2198 // out what to do about this. 2199 if (!VA.isRegLoc()) 2200 return false; 2201 if (!ArgLocs[++i].isRegLoc()) 2202 return false; 2203 if (RegVT == MVT::v2f64) { 2204 if (!ArgLocs[++i].isRegLoc()) 2205 return false; 2206 if (!ArgLocs[++i].isRegLoc()) 2207 return false; 2208 } 2209 } else if (!VA.isRegLoc()) { 2210 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2211 MFI, MRI, TII)) 2212 return false; 2213 } 2214 } 2215 } 2216 2217 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2218 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2219 return false; 2220 } 2221 2222 return true; 2223 } 2224 2225 bool 2226 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2227 MachineFunction &MF, bool isVarArg, 2228 const SmallVectorImpl<ISD::OutputArg> &Outs, 2229 LLVMContext &Context) const { 2230 SmallVector<CCValAssign, 16> RVLocs; 2231 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2232 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2233 isVarArg)); 2234 } 2235 2236 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2237 SDLoc DL, SelectionDAG &DAG) { 2238 const MachineFunction &MF = DAG.getMachineFunction(); 2239 const Function *F = MF.getFunction(); 2240 2241 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); 2242 2243 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2244 // version of the "preferred return address". These offsets affect the return 2245 // instruction if this is a return from PL1 without hypervisor extensions. 2246 // IRQ/FIQ: +4 "subs pc, lr, #4" 2247 // SWI: 0 "subs pc, lr, #0" 2248 // ABORT: +4 "subs pc, lr, #4" 2249 // UNDEF: +4/+2 "subs pc, lr, #0" 2250 // UNDEF varies depending on where the exception came from ARM or Thumb 2251 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2252 2253 int64_t LROffset; 2254 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2255 IntKind == "ABORT") 2256 LROffset = 4; 2257 else if (IntKind == "SWI" || IntKind == "UNDEF") 2258 LROffset = 0; 2259 else 2260 report_fatal_error("Unsupported interrupt attribute. If present, value " 2261 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2262 2263 RetOps.insert(RetOps.begin() + 1, 2264 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2265 2266 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2267 } 2268 2269 SDValue 2270 ARMTargetLowering::LowerReturn(SDValue Chain, 2271 CallingConv::ID CallConv, bool isVarArg, 2272 const SmallVectorImpl<ISD::OutputArg> &Outs, 2273 const SmallVectorImpl<SDValue> &OutVals, 2274 SDLoc dl, SelectionDAG &DAG) const { 2275 2276 // CCValAssign - represent the assignment of the return value to a location. 2277 SmallVector<CCValAssign, 16> RVLocs; 2278 2279 // CCState - Info about the registers and stack slots. 2280 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2281 *DAG.getContext(), Call); 2282 2283 // Analyze outgoing return values. 2284 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2285 isVarArg)); 2286 2287 SDValue Flag; 2288 SmallVector<SDValue, 4> RetOps; 2289 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2290 bool isLittleEndian = Subtarget->isLittle(); 2291 2292 MachineFunction &MF = DAG.getMachineFunction(); 2293 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2294 AFI->setReturnRegsCount(RVLocs.size()); 2295 2296 // Copy the result values into the output registers. 2297 for (unsigned i = 0, realRVLocIdx = 0; 2298 i != RVLocs.size(); 2299 ++i, ++realRVLocIdx) { 2300 CCValAssign &VA = RVLocs[i]; 2301 assert(VA.isRegLoc() && "Can only return in registers!"); 2302 2303 SDValue Arg = OutVals[realRVLocIdx]; 2304 2305 switch (VA.getLocInfo()) { 2306 default: llvm_unreachable("Unknown loc info!"); 2307 case CCValAssign::Full: break; 2308 case CCValAssign::BCvt: 2309 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2310 break; 2311 } 2312 2313 if (VA.needsCustom()) { 2314 if (VA.getLocVT() == MVT::v2f64) { 2315 // Extract the first half and return it in two registers. 2316 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2317 DAG.getConstant(0, dl, MVT::i32)); 2318 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2319 DAG.getVTList(MVT::i32, MVT::i32), Half); 2320 2321 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2322 HalfGPRs.getValue(isLittleEndian ? 0 : 1), 2323 Flag); 2324 Flag = Chain.getValue(1); 2325 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2326 VA = RVLocs[++i]; // skip ahead to next loc 2327 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2328 HalfGPRs.getValue(isLittleEndian ? 1 : 0), 2329 Flag); 2330 Flag = Chain.getValue(1); 2331 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2332 VA = RVLocs[++i]; // skip ahead to next loc 2333 2334 // Extract the 2nd half and fall through to handle it as an f64 value. 2335 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2336 DAG.getConstant(1, dl, MVT::i32)); 2337 } 2338 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2339 // available. 2340 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2341 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2342 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2343 fmrrd.getValue(isLittleEndian ? 0 : 1), 2344 Flag); 2345 Flag = Chain.getValue(1); 2346 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2347 VA = RVLocs[++i]; // skip ahead to next loc 2348 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2349 fmrrd.getValue(isLittleEndian ? 1 : 0), 2350 Flag); 2351 } else 2352 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2353 2354 // Guarantee that all emitted copies are 2355 // stuck together, avoiding something bad. 2356 Flag = Chain.getValue(1); 2357 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2358 } 2359 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2360 const MCPhysReg *I = 2361 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2362 if (I) { 2363 for (; *I; ++I) { 2364 if (ARM::GPRRegClass.contains(*I)) 2365 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2366 else if (ARM::DPRRegClass.contains(*I)) 2367 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 2368 else 2369 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2370 } 2371 } 2372 2373 // Update chain and glue. 2374 RetOps[0] = Chain; 2375 if (Flag.getNode()) 2376 RetOps.push_back(Flag); 2377 2378 // CPUs which aren't M-class use a special sequence to return from 2379 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2380 // though we use "subs pc, lr, #N"). 2381 // 2382 // M-class CPUs actually use a normal return sequence with a special 2383 // (hardware-provided) value in LR, so the normal code path works. 2384 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && 2385 !Subtarget->isMClass()) { 2386 if (Subtarget->isThumb1Only()) 2387 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2388 return LowerInterruptReturn(RetOps, dl, DAG); 2389 } 2390 2391 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); 2392 } 2393 2394 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2395 if (N->getNumValues() != 1) 2396 return false; 2397 if (!N->hasNUsesOfValue(1, 0)) 2398 return false; 2399 2400 SDValue TCChain = Chain; 2401 SDNode *Copy = *N->use_begin(); 2402 if (Copy->getOpcode() == ISD::CopyToReg) { 2403 // If the copy has a glue operand, we conservatively assume it isn't safe to 2404 // perform a tail call. 2405 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2406 return false; 2407 TCChain = Copy->getOperand(0); 2408 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2409 SDNode *VMov = Copy; 2410 // f64 returned in a pair of GPRs. 2411 SmallPtrSet<SDNode*, 2> Copies; 2412 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2413 UI != UE; ++UI) { 2414 if (UI->getOpcode() != ISD::CopyToReg) 2415 return false; 2416 Copies.insert(*UI); 2417 } 2418 if (Copies.size() > 2) 2419 return false; 2420 2421 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2422 UI != UE; ++UI) { 2423 SDValue UseChain = UI->getOperand(0); 2424 if (Copies.count(UseChain.getNode())) 2425 // Second CopyToReg 2426 Copy = *UI; 2427 else { 2428 // We are at the top of this chain. 2429 // If the copy has a glue operand, we conservatively assume it 2430 // isn't safe to perform a tail call. 2431 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 2432 return false; 2433 // First CopyToReg 2434 TCChain = UseChain; 2435 } 2436 } 2437 } else if (Copy->getOpcode() == ISD::BITCAST) { 2438 // f32 returned in a single GPR. 2439 if (!Copy->hasOneUse()) 2440 return false; 2441 Copy = *Copy->use_begin(); 2442 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2443 return false; 2444 // If the copy has a glue operand, we conservatively assume it isn't safe to 2445 // perform a tail call. 2446 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2447 return false; 2448 TCChain = Copy->getOperand(0); 2449 } else { 2450 return false; 2451 } 2452 2453 bool HasRet = false; 2454 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2455 UI != UE; ++UI) { 2456 if (UI->getOpcode() != ARMISD::RET_FLAG && 2457 UI->getOpcode() != ARMISD::INTRET_FLAG) 2458 return false; 2459 HasRet = true; 2460 } 2461 2462 if (!HasRet) 2463 return false; 2464 2465 Chain = TCChain; 2466 return true; 2467 } 2468 2469 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2470 if (!Subtarget->supportsTailCall()) 2471 return false; 2472 2473 auto Attr = 2474 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); 2475 if (!CI->isTailCall() || Attr.getValueAsString() == "true") 2476 return false; 2477 2478 return true; 2479 } 2480 2481 // Trying to write a 64 bit value so need to split into two 32 bit values first, 2482 // and pass the lower and high parts through. 2483 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 2484 SDLoc DL(Op); 2485 SDValue WriteValue = Op->getOperand(2); 2486 2487 // This function is only supposed to be called for i64 type argument. 2488 assert(WriteValue.getValueType() == MVT::i64 2489 && "LowerWRITE_REGISTER called for non-i64 type argument."); 2490 2491 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2492 DAG.getConstant(0, DL, MVT::i32)); 2493 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2494 DAG.getConstant(1, DL, MVT::i32)); 2495 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 2496 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 2497 } 2498 2499 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2500 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2501 // one of the above mentioned nodes. It has to be wrapped because otherwise 2502 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2503 // be used to form addressing mode. These wrapped nodes will be selected 2504 // into MOVi. 2505 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2506 EVT PtrVT = Op.getValueType(); 2507 // FIXME there is no actual debug info here 2508 SDLoc dl(Op); 2509 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2510 SDValue Res; 2511 if (CP->isMachineConstantPoolEntry()) 2512 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2513 CP->getAlignment()); 2514 else 2515 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2516 CP->getAlignment()); 2517 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2518 } 2519 2520 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2521 return MachineJumpTableInfo::EK_Inline; 2522 } 2523 2524 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2525 SelectionDAG &DAG) const { 2526 MachineFunction &MF = DAG.getMachineFunction(); 2527 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2528 unsigned ARMPCLabelIndex = 0; 2529 SDLoc DL(Op); 2530 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2531 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2532 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2533 SDValue CPAddr; 2534 if (RelocM == Reloc::Static) { 2535 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2536 } else { 2537 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2538 ARMPCLabelIndex = AFI->createPICLabelUId(); 2539 ARMConstantPoolValue *CPV = 2540 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2541 ARMCP::CPBlockAddress, PCAdj); 2542 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2543 } 2544 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2545 SDValue Result = 2546 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2547 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 2548 false, false, false, 0); 2549 if (RelocM == Reloc::Static) 2550 return Result; 2551 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 2552 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2553 } 2554 2555 /// \brief Convert a TLS address reference into the correct sequence of loads 2556 /// and calls to compute the variable's address for Darwin, and return an 2557 /// SDValue containing the final node. 2558 2559 /// Darwin only has one TLS scheme which must be capable of dealing with the 2560 /// fully general situation, in the worst case. This means: 2561 /// + "extern __thread" declaration. 2562 /// + Defined in a possibly unknown dynamic library. 2563 /// 2564 /// The general system is that each __thread variable has a [3 x i32] descriptor 2565 /// which contains information used by the runtime to calculate the address. The 2566 /// only part of this the compiler needs to know about is the first word, which 2567 /// contains a function pointer that must be called with the address of the 2568 /// entire descriptor in "r0". 2569 /// 2570 /// Since this descriptor may be in a different unit, in general access must 2571 /// proceed along the usual ARM rules. A common sequence to produce is: 2572 /// 2573 /// movw rT1, :lower16:_var$non_lazy_ptr 2574 /// movt rT1, :upper16:_var$non_lazy_ptr 2575 /// ldr r0, [rT1] 2576 /// ldr rT2, [r0] 2577 /// blx rT2 2578 /// [...address now in r0...] 2579 SDValue 2580 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 2581 SelectionDAG &DAG) const { 2582 assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin"); 2583 SDLoc DL(Op); 2584 2585 // First step is to get the address of the actua global symbol. This is where 2586 // the TLS descriptor lives. 2587 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 2588 2589 // The first entry in the descriptor is a function pointer that we must call 2590 // to obtain the address of the variable. 2591 SDValue Chain = DAG.getEntryNode(); 2592 SDValue FuncTLVGet = 2593 DAG.getLoad(MVT::i32, DL, Chain, DescAddr, 2594 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2595 false, true, true, 4); 2596 Chain = FuncTLVGet.getValue(1); 2597 2598 MachineFunction &F = DAG.getMachineFunction(); 2599 MachineFrameInfo *MFI = F.getFrameInfo(); 2600 MFI->setAdjustsStack(true); 2601 2602 // TLS calls preserve all registers except those that absolutely must be 2603 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 2604 // silly). 2605 auto TRI = 2606 getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo(); 2607 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 2608 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 2609 2610 // Finally, we can make the call. This is just a degenerate version of a 2611 // normal AArch64 call node: r0 takes the address of the descriptor, and 2612 // returns the address of the variable in this thread. 2613 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 2614 Chain = 2615 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 2616 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 2617 DAG.getRegisterMask(Mask), Chain.getValue(1)); 2618 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 2619 } 2620 2621 SDValue 2622 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 2623 SelectionDAG &DAG) const { 2624 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 2625 2626 SDValue Chain = DAG.getEntryNode(); 2627 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2628 SDLoc DL(Op); 2629 2630 // Load the current TEB (thread environment block) 2631 SDValue Ops[] = {Chain, 2632 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 2633 DAG.getConstant(15, DL, MVT::i32), 2634 DAG.getConstant(0, DL, MVT::i32), 2635 DAG.getConstant(13, DL, MVT::i32), 2636 DAG.getConstant(0, DL, MVT::i32), 2637 DAG.getConstant(2, DL, MVT::i32)}; 2638 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 2639 DAG.getVTList(MVT::i32, MVT::Other), Ops); 2640 2641 SDValue TEB = CurrentTEB.getValue(0); 2642 Chain = CurrentTEB.getValue(1); 2643 2644 // Load the ThreadLocalStoragePointer from the TEB 2645 // A pointer to the TLS array is located at offset 0x2c from the TEB. 2646 SDValue TLSArray = 2647 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 2648 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo(), 2649 false, false, false, 0); 2650 2651 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 2652 // offset into the TLSArray. 2653 2654 // Load the TLS index from the C runtime 2655 SDValue TLSIndex = 2656 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 2657 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 2658 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo(), 2659 false, false, false, 0); 2660 2661 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 2662 DAG.getConstant(2, DL, MVT::i32)); 2663 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 2664 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 2665 MachinePointerInfo(), false, false, false, 0); 2666 2667 // Get the offset of the start of the .tls section (section base) 2668 const auto *GA = cast<GlobalAddressSDNode>(Op); 2669 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); 2670 SDValue Offset = 2671 DAG.getLoad(PtrVT, DL, Chain, 2672 DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, 2673 DAG.getTargetConstantPool(CPV, PtrVT, 4)), 2674 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 2675 false, false, false, 0); 2676 2677 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); 2678 } 2679 2680 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2681 SDValue 2682 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2683 SelectionDAG &DAG) const { 2684 SDLoc dl(GA); 2685 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2686 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2687 MachineFunction &MF = DAG.getMachineFunction(); 2688 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2689 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2690 ARMConstantPoolValue *CPV = 2691 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2692 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2693 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2694 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2695 Argument = 2696 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2697 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 2698 false, false, false, 0); 2699 SDValue Chain = Argument.getValue(1); 2700 2701 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2702 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2703 2704 // call __tls_get_addr. 2705 ArgListTy Args; 2706 ArgListEntry Entry; 2707 Entry.Node = Argument; 2708 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2709 Args.push_back(Entry); 2710 2711 // FIXME: is there useful debug info available here? 2712 TargetLowering::CallLoweringInfo CLI(DAG); 2713 CLI.setDebugLoc(dl).setChain(Chain) 2714 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 2715 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args), 2716 0); 2717 2718 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2719 return CallResult.first; 2720 } 2721 2722 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2723 // "local exec" model. 2724 SDValue 2725 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2726 SelectionDAG &DAG, 2727 TLSModel::Model model) const { 2728 const GlobalValue *GV = GA->getGlobal(); 2729 SDLoc dl(GA); 2730 SDValue Offset; 2731 SDValue Chain = DAG.getEntryNode(); 2732 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2733 // Get the Thread Pointer 2734 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2735 2736 if (model == TLSModel::InitialExec) { 2737 MachineFunction &MF = DAG.getMachineFunction(); 2738 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2739 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2740 // Initial exec model. 2741 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2742 ARMConstantPoolValue *CPV = 2743 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2744 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2745 true); 2746 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2747 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2748 Offset = DAG.getLoad( 2749 PtrVT, dl, Chain, Offset, 2750 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2751 false, false, 0); 2752 Chain = Offset.getValue(1); 2753 2754 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2755 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2756 2757 Offset = DAG.getLoad( 2758 PtrVT, dl, Chain, Offset, 2759 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2760 false, false, 0); 2761 } else { 2762 // local exec model 2763 assert(model == TLSModel::LocalExec); 2764 ARMConstantPoolValue *CPV = 2765 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2766 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2767 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2768 Offset = DAG.getLoad( 2769 PtrVT, dl, Chain, Offset, 2770 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2771 false, false, 0); 2772 } 2773 2774 // The address of the thread local variable is the add of the thread 2775 // pointer with the offset of the variable. 2776 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2777 } 2778 2779 SDValue 2780 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2781 if (Subtarget->isTargetDarwin()) 2782 return LowerGlobalTLSAddressDarwin(Op, DAG); 2783 2784 if (Subtarget->isTargetWindows()) 2785 return LowerGlobalTLSAddressWindows(Op, DAG); 2786 2787 // TODO: implement the "local dynamic" model 2788 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 2789 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2790 if (DAG.getTarget().Options.EmulatedTLS) 2791 return LowerToTLSEmulatedModel(GA, DAG); 2792 2793 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2794 2795 switch (model) { 2796 case TLSModel::GeneralDynamic: 2797 case TLSModel::LocalDynamic: 2798 return LowerToTLSGeneralDynamicModel(GA, DAG); 2799 case TLSModel::InitialExec: 2800 case TLSModel::LocalExec: 2801 return LowerToTLSExecModels(GA, DAG, model); 2802 } 2803 llvm_unreachable("bogus TLS model"); 2804 } 2805 2806 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2807 SelectionDAG &DAG) const { 2808 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2809 SDLoc dl(Op); 2810 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2811 const TargetMachine &TM = getTargetMachine(); 2812 Reloc::Model RM = TM.getRelocationModel(); 2813 const Triple &TargetTriple = TM.getTargetTriple(); 2814 if (RM == Reloc::PIC_) { 2815 bool UseGOT_PREL = 2816 !shouldAssumeDSOLocal(RM, TargetTriple, *GV->getParent(), GV); 2817 2818 MachineFunction &MF = DAG.getMachineFunction(); 2819 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2820 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2821 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2822 SDLoc dl(Op); 2823 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2824 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 2825 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 2826 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 2827 /*AddCurrentAddress=*/UseGOT_PREL); 2828 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2829 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2830 SDValue Result = DAG.getLoad( 2831 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2832 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2833 false, false, 0); 2834 SDValue Chain = Result.getValue(1); 2835 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2836 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2837 if (UseGOT_PREL) 2838 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2839 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2840 false, false, false, 0); 2841 return Result; 2842 } 2843 2844 // If we have T2 ops, we can materialize the address directly via movt/movw 2845 // pair. This is always cheaper. 2846 if (Subtarget->useMovt(DAG.getMachineFunction())) { 2847 ++NumMovwMovt; 2848 // FIXME: Once remat is capable of dealing with instructions with register 2849 // operands, expand this into two nodes. 2850 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2851 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2852 } else { 2853 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2854 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2855 return DAG.getLoad( 2856 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2857 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2858 false, false, 0); 2859 } 2860 } 2861 2862 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2863 SelectionDAG &DAG) const { 2864 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2865 SDLoc dl(Op); 2866 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2867 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2868 2869 if (Subtarget->useMovt(DAG.getMachineFunction())) 2870 ++NumMovwMovt; 2871 2872 // FIXME: Once remat is capable of dealing with instructions with register 2873 // operands, expand this into multiple nodes 2874 unsigned Wrapper = 2875 RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper; 2876 2877 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 2878 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 2879 2880 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2881 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2882 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2883 false, false, false, 0); 2884 return Result; 2885 } 2886 2887 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 2888 SelectionDAG &DAG) const { 2889 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 2890 assert(Subtarget->useMovt(DAG.getMachineFunction()) && 2891 "Windows on ARM expects to use movw/movt"); 2892 2893 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2894 const ARMII::TOF TargetFlags = 2895 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG); 2896 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2897 SDValue Result; 2898 SDLoc DL(Op); 2899 2900 ++NumMovwMovt; 2901 2902 // FIXME: Once remat is capable of dealing with instructions with register 2903 // operands, expand this into two nodes. 2904 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 2905 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, 2906 TargetFlags)); 2907 if (GV->hasDLLImportStorageClass()) 2908 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2909 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2910 false, false, false, 0); 2911 return Result; 2912 } 2913 2914 SDValue 2915 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2916 SDLoc dl(Op); 2917 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 2918 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2919 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2920 Op.getOperand(1), Val); 2921 } 2922 2923 SDValue 2924 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2925 SDLoc dl(Op); 2926 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2927 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 2928 } 2929 2930 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 2931 SelectionDAG &DAG) const { 2932 SDLoc dl(Op); 2933 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 2934 Op.getOperand(0)); 2935 } 2936 2937 SDValue 2938 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2939 const ARMSubtarget *Subtarget) const { 2940 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2941 SDLoc dl(Op); 2942 switch (IntNo) { 2943 default: return SDValue(); // Don't custom lower most intrinsics. 2944 case Intrinsic::arm_rbit: { 2945 assert(Op.getOperand(1).getValueType() == MVT::i32 && 2946 "RBIT intrinsic must have i32 type!"); 2947 return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1)); 2948 } 2949 case Intrinsic::thread_pointer: { 2950 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2951 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2952 } 2953 case Intrinsic::eh_sjlj_lsda: { 2954 MachineFunction &MF = DAG.getMachineFunction(); 2955 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2956 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2957 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2958 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2959 SDValue CPAddr; 2960 unsigned PCAdj = (RelocM != Reloc::PIC_) 2961 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2962 ARMConstantPoolValue *CPV = 2963 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2964 ARMCP::CPLSDA, PCAdj); 2965 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2966 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2967 SDValue Result = DAG.getLoad( 2968 PtrVT, dl, DAG.getEntryNode(), CPAddr, 2969 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), false, 2970 false, false, 0); 2971 2972 if (RelocM == Reloc::PIC_) { 2973 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2974 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2975 } 2976 return Result; 2977 } 2978 case Intrinsic::arm_neon_vmulls: 2979 case Intrinsic::arm_neon_vmullu: { 2980 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2981 ? ARMISD::VMULLs : ARMISD::VMULLu; 2982 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2983 Op.getOperand(1), Op.getOperand(2)); 2984 } 2985 case Intrinsic::arm_neon_vminnm: 2986 case Intrinsic::arm_neon_vmaxnm: { 2987 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 2988 ? ISD::FMINNUM : ISD::FMAXNUM; 2989 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2990 Op.getOperand(1), Op.getOperand(2)); 2991 } 2992 case Intrinsic::arm_neon_vminu: 2993 case Intrinsic::arm_neon_vmaxu: { 2994 if (Op.getValueType().isFloatingPoint()) 2995 return SDValue(); 2996 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 2997 ? ISD::UMIN : ISD::UMAX; 2998 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2999 Op.getOperand(1), Op.getOperand(2)); 3000 } 3001 case Intrinsic::arm_neon_vmins: 3002 case Intrinsic::arm_neon_vmaxs: { 3003 // v{min,max}s is overloaded between signed integers and floats. 3004 if (!Op.getValueType().isFloatingPoint()) { 3005 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3006 ? ISD::SMIN : ISD::SMAX; 3007 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3008 Op.getOperand(1), Op.getOperand(2)); 3009 } 3010 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3011 ? ISD::FMINNAN : ISD::FMAXNAN; 3012 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3013 Op.getOperand(1), Op.getOperand(2)); 3014 } 3015 } 3016 } 3017 3018 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 3019 const ARMSubtarget *Subtarget) { 3020 // FIXME: handle "fence singlethread" more efficiently. 3021 SDLoc dl(Op); 3022 if (!Subtarget->hasDataBarrier()) { 3023 // Some ARMv6 cpus can support data barriers with an mcr instruction. 3024 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 3025 // here. 3026 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 3027 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 3028 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 3029 DAG.getConstant(0, dl, MVT::i32)); 3030 } 3031 3032 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 3033 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 3034 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 3035 if (Subtarget->isMClass()) { 3036 // Only a full system barrier exists in the M-class architectures. 3037 Domain = ARM_MB::SY; 3038 } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) { 3039 // Swift happens to implement ISHST barriers in a way that's compatible with 3040 // Release semantics but weaker than ISH so we'd be fools not to use 3041 // it. Beware: other processors probably don't! 3042 Domain = ARM_MB::ISHST; 3043 } 3044 3045 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 3046 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 3047 DAG.getConstant(Domain, dl, MVT::i32)); 3048 } 3049 3050 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 3051 const ARMSubtarget *Subtarget) { 3052 // ARM pre v5TE and Thumb1 does not have preload instructions. 3053 if (!(Subtarget->isThumb2() || 3054 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 3055 // Just preserve the chain. 3056 return Op.getOperand(0); 3057 3058 SDLoc dl(Op); 3059 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 3060 if (!isRead && 3061 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 3062 // ARMv7 with MP extension has PLDW. 3063 return Op.getOperand(0); 3064 3065 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3066 if (Subtarget->isThumb()) { 3067 // Invert the bits. 3068 isRead = ~isRead & 1; 3069 isData = ~isData & 1; 3070 } 3071 3072 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 3073 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 3074 DAG.getConstant(isData, dl, MVT::i32)); 3075 } 3076 3077 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 3078 MachineFunction &MF = DAG.getMachineFunction(); 3079 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 3080 3081 // vastart just stores the address of the VarArgsFrameIndex slot into the 3082 // memory location argument. 3083 SDLoc dl(Op); 3084 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 3085 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3086 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3087 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3088 MachinePointerInfo(SV), false, false, 0); 3089 } 3090 3091 SDValue 3092 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 3093 SDValue &Root, SelectionDAG &DAG, 3094 SDLoc dl) const { 3095 MachineFunction &MF = DAG.getMachineFunction(); 3096 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3097 3098 const TargetRegisterClass *RC; 3099 if (AFI->isThumb1OnlyFunction()) 3100 RC = &ARM::tGPRRegClass; 3101 else 3102 RC = &ARM::GPRRegClass; 3103 3104 // Transform the arguments stored in physical registers into virtual ones. 3105 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3106 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3107 3108 SDValue ArgValue2; 3109 if (NextVA.isMemLoc()) { 3110 MachineFrameInfo *MFI = MF.getFrameInfo(); 3111 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 3112 3113 // Create load node to retrieve arguments from the stack. 3114 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3115 ArgValue2 = DAG.getLoad( 3116 MVT::i32, dl, Root, FIN, 3117 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 3118 false, false, 0); 3119 } else { 3120 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 3121 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3122 } 3123 if (!Subtarget->isLittle()) 3124 std::swap (ArgValue, ArgValue2); 3125 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 3126 } 3127 3128 // The remaining GPRs hold either the beginning of variable-argument 3129 // data, or the beginning of an aggregate passed by value (usually 3130 // byval). Either way, we allocate stack slots adjacent to the data 3131 // provided by our caller, and store the unallocated registers there. 3132 // If this is a variadic function, the va_list pointer will begin with 3133 // these values; otherwise, this reassembles a (byval) structure that 3134 // was split between registers and memory. 3135 // Return: The frame index registers were stored into. 3136 int 3137 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 3138 SDLoc dl, SDValue &Chain, 3139 const Value *OrigArg, 3140 unsigned InRegsParamRecordIdx, 3141 int ArgOffset, 3142 unsigned ArgSize) const { 3143 // Currently, two use-cases possible: 3144 // Case #1. Non-var-args function, and we meet first byval parameter. 3145 // Setup first unallocated register as first byval register; 3146 // eat all remained registers 3147 // (these two actions are performed by HandleByVal method). 3148 // Then, here, we initialize stack frame with 3149 // "store-reg" instructions. 3150 // Case #2. Var-args function, that doesn't contain byval parameters. 3151 // The same: eat all remained unallocated registers, 3152 // initialize stack frame. 3153 3154 MachineFunction &MF = DAG.getMachineFunction(); 3155 MachineFrameInfo *MFI = MF.getFrameInfo(); 3156 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3157 unsigned RBegin, REnd; 3158 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 3159 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 3160 } else { 3161 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3162 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 3163 REnd = ARM::R4; 3164 } 3165 3166 if (REnd != RBegin) 3167 ArgOffset = -4 * (ARM::R4 - RBegin); 3168 3169 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3170 int FrameIndex = MFI->CreateFixedObject(ArgSize, ArgOffset, false); 3171 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 3172 3173 SmallVector<SDValue, 4> MemOps; 3174 const TargetRegisterClass *RC = 3175 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 3176 3177 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 3178 unsigned VReg = MF.addLiveIn(Reg, RC); 3179 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3180 SDValue Store = 3181 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3182 MachinePointerInfo(OrigArg, 4 * i), false, false, 0); 3183 MemOps.push_back(Store); 3184 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 3185 } 3186 3187 if (!MemOps.empty()) 3188 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3189 return FrameIndex; 3190 } 3191 3192 // Setup stack frame, the va_list pointer will start from. 3193 void 3194 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 3195 SDLoc dl, SDValue &Chain, 3196 unsigned ArgOffset, 3197 unsigned TotalArgRegsSaveSize, 3198 bool ForceMutable) const { 3199 MachineFunction &MF = DAG.getMachineFunction(); 3200 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3201 3202 // Try to store any remaining integer argument regs 3203 // to their spots on the stack so that they may be loaded by deferencing 3204 // the result of va_next. 3205 // If there is no regs to be stored, just point address after last 3206 // argument passed via stack. 3207 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 3208 CCInfo.getInRegsParamsCount(), 3209 CCInfo.getNextStackOffset(), 4); 3210 AFI->setVarArgsFrameIndex(FrameIndex); 3211 } 3212 3213 SDValue 3214 ARMTargetLowering::LowerFormalArguments(SDValue Chain, 3215 CallingConv::ID CallConv, bool isVarArg, 3216 const SmallVectorImpl<ISD::InputArg> 3217 &Ins, 3218 SDLoc dl, SelectionDAG &DAG, 3219 SmallVectorImpl<SDValue> &InVals) 3220 const { 3221 MachineFunction &MF = DAG.getMachineFunction(); 3222 MachineFrameInfo *MFI = MF.getFrameInfo(); 3223 3224 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3225 3226 // Assign locations to all of the incoming arguments. 3227 SmallVector<CCValAssign, 16> ArgLocs; 3228 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3229 *DAG.getContext(), Prologue); 3230 CCInfo.AnalyzeFormalArguments(Ins, 3231 CCAssignFnForNode(CallConv, /* Return*/ false, 3232 isVarArg)); 3233 3234 SmallVector<SDValue, 16> ArgValues; 3235 SDValue ArgValue; 3236 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 3237 unsigned CurArgIdx = 0; 3238 3239 // Initially ArgRegsSaveSize is zero. 3240 // Then we increase this value each time we meet byval parameter. 3241 // We also increase this value in case of varargs function. 3242 AFI->setArgRegsSaveSize(0); 3243 3244 // Calculate the amount of stack space that we need to allocate to store 3245 // byval and variadic arguments that are passed in registers. 3246 // We need to know this before we allocate the first byval or variadic 3247 // argument, as they will be allocated a stack slot below the CFA (Canonical 3248 // Frame Address, the stack pointer at entry to the function). 3249 unsigned ArgRegBegin = ARM::R4; 3250 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3251 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 3252 break; 3253 3254 CCValAssign &VA = ArgLocs[i]; 3255 unsigned Index = VA.getValNo(); 3256 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 3257 if (!Flags.isByVal()) 3258 continue; 3259 3260 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 3261 unsigned RBegin, REnd; 3262 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 3263 ArgRegBegin = std::min(ArgRegBegin, RBegin); 3264 3265 CCInfo.nextInRegsParam(); 3266 } 3267 CCInfo.rewindByValRegsInfo(); 3268 3269 int lastInsIndex = -1; 3270 if (isVarArg && MFI->hasVAStart()) { 3271 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3272 if (RegIdx != array_lengthof(GPRArgRegs)) 3273 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 3274 } 3275 3276 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 3277 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 3278 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3279 3280 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3281 CCValAssign &VA = ArgLocs[i]; 3282 if (Ins[VA.getValNo()].isOrigArg()) { 3283 std::advance(CurOrigArg, 3284 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 3285 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 3286 } 3287 // Arguments stored in registers. 3288 if (VA.isRegLoc()) { 3289 EVT RegVT = VA.getLocVT(); 3290 3291 if (VA.needsCustom()) { 3292 // f64 and vector types are split up into multiple registers or 3293 // combinations of registers and stack slots. 3294 if (VA.getLocVT() == MVT::v2f64) { 3295 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 3296 Chain, DAG, dl); 3297 VA = ArgLocs[++i]; // skip ahead to next loc 3298 SDValue ArgValue2; 3299 if (VA.isMemLoc()) { 3300 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 3301 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3302 ArgValue2 = DAG.getLoad( 3303 MVT::f64, dl, Chain, FIN, 3304 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 3305 false, false, false, 0); 3306 } else { 3307 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3308 Chain, DAG, dl); 3309 } 3310 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3311 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3312 ArgValue, ArgValue1, 3313 DAG.getIntPtrConstant(0, dl)); 3314 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3315 ArgValue, ArgValue2, 3316 DAG.getIntPtrConstant(1, dl)); 3317 } else 3318 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3319 3320 } else { 3321 const TargetRegisterClass *RC; 3322 3323 if (RegVT == MVT::f32) 3324 RC = &ARM::SPRRegClass; 3325 else if (RegVT == MVT::f64) 3326 RC = &ARM::DPRRegClass; 3327 else if (RegVT == MVT::v2f64) 3328 RC = &ARM::QPRRegClass; 3329 else if (RegVT == MVT::i32) 3330 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 3331 : &ARM::GPRRegClass; 3332 else 3333 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3334 3335 // Transform the arguments in physical registers into virtual ones. 3336 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3337 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3338 } 3339 3340 // If this is an 8 or 16-bit value, it is really passed promoted 3341 // to 32 bits. Insert an assert[sz]ext to capture this, then 3342 // truncate to the right size. 3343 switch (VA.getLocInfo()) { 3344 default: llvm_unreachable("Unknown loc info!"); 3345 case CCValAssign::Full: break; 3346 case CCValAssign::BCvt: 3347 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3348 break; 3349 case CCValAssign::SExt: 3350 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3351 DAG.getValueType(VA.getValVT())); 3352 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3353 break; 3354 case CCValAssign::ZExt: 3355 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3356 DAG.getValueType(VA.getValVT())); 3357 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3358 break; 3359 } 3360 3361 InVals.push_back(ArgValue); 3362 3363 } else { // VA.isRegLoc() 3364 3365 // sanity check 3366 assert(VA.isMemLoc()); 3367 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3368 3369 int index = VA.getValNo(); 3370 3371 // Some Ins[] entries become multiple ArgLoc[] entries. 3372 // Process them only once. 3373 if (index != lastInsIndex) 3374 { 3375 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3376 // FIXME: For now, all byval parameter objects are marked mutable. 3377 // This can be changed with more analysis. 3378 // In case of tail call optimization mark all arguments mutable. 3379 // Since they could be overwritten by lowering of arguments in case of 3380 // a tail call. 3381 if (Flags.isByVal()) { 3382 assert(Ins[index].isOrigArg() && 3383 "Byval arguments cannot be implicit"); 3384 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 3385 3386 int FrameIndex = StoreByValRegs( 3387 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 3388 VA.getLocMemOffset(), Flags.getByValSize()); 3389 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 3390 CCInfo.nextInRegsParam(); 3391 } else { 3392 unsigned FIOffset = VA.getLocMemOffset(); 3393 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3394 FIOffset, true); 3395 3396 // Create load nodes to retrieve arguments from the stack. 3397 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3398 InVals.push_back(DAG.getLoad( 3399 VA.getValVT(), dl, Chain, FIN, 3400 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 3401 false, false, false, 0)); 3402 } 3403 lastInsIndex = index; 3404 } 3405 } 3406 } 3407 3408 // varargs 3409 if (isVarArg && MFI->hasVAStart()) 3410 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3411 CCInfo.getNextStackOffset(), 3412 TotalArgRegsSaveSize); 3413 3414 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 3415 3416 return Chain; 3417 } 3418 3419 /// isFloatingPointZero - Return true if this is +0.0. 3420 static bool isFloatingPointZero(SDValue Op) { 3421 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3422 return CFP->getValueAPF().isPosZero(); 3423 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3424 // Maybe this has already been legalized into the constant pool? 3425 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3426 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3427 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3428 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3429 return CFP->getValueAPF().isPosZero(); 3430 } 3431 } else if (Op->getOpcode() == ISD::BITCAST && 3432 Op->getValueType(0) == MVT::f64) { 3433 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 3434 // created by LowerConstantFP(). 3435 SDValue BitcastOp = Op->getOperand(0); 3436 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 3437 isNullConstant(BitcastOp->getOperand(0))) 3438 return true; 3439 } 3440 return false; 3441 } 3442 3443 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3444 /// the given operands. 3445 SDValue 3446 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3447 SDValue &ARMcc, SelectionDAG &DAG, 3448 SDLoc dl) const { 3449 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3450 unsigned C = RHSC->getZExtValue(); 3451 if (!isLegalICmpImmediate(C)) { 3452 // Constant does not fit, try adjusting it by one? 3453 switch (CC) { 3454 default: break; 3455 case ISD::SETLT: 3456 case ISD::SETGE: 3457 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3458 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3459 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3460 } 3461 break; 3462 case ISD::SETULT: 3463 case ISD::SETUGE: 3464 if (C != 0 && isLegalICmpImmediate(C-1)) { 3465 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3466 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3467 } 3468 break; 3469 case ISD::SETLE: 3470 case ISD::SETGT: 3471 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3472 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3473 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3474 } 3475 break; 3476 case ISD::SETULE: 3477 case ISD::SETUGT: 3478 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3479 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3480 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3481 } 3482 break; 3483 } 3484 } 3485 } 3486 3487 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3488 ARMISD::NodeType CompareType; 3489 switch (CondCode) { 3490 default: 3491 CompareType = ARMISD::CMP; 3492 break; 3493 case ARMCC::EQ: 3494 case ARMCC::NE: 3495 // Uses only Z Flag 3496 CompareType = ARMISD::CMPZ; 3497 break; 3498 } 3499 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3500 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3501 } 3502 3503 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3504 SDValue 3505 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 3506 SDLoc dl) const { 3507 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); 3508 SDValue Cmp; 3509 if (!isFloatingPointZero(RHS)) 3510 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3511 else 3512 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3513 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3514 } 3515 3516 /// duplicateCmp - Glue values can have only one use, so this function 3517 /// duplicates a comparison node. 3518 SDValue 3519 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3520 unsigned Opc = Cmp.getOpcode(); 3521 SDLoc DL(Cmp); 3522 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3523 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3524 3525 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3526 Cmp = Cmp.getOperand(0); 3527 Opc = Cmp.getOpcode(); 3528 if (Opc == ARMISD::CMPFP) 3529 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3530 else { 3531 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3532 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3533 } 3534 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3535 } 3536 3537 std::pair<SDValue, SDValue> 3538 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 3539 SDValue &ARMcc) const { 3540 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 3541 3542 SDValue Value, OverflowCmp; 3543 SDValue LHS = Op.getOperand(0); 3544 SDValue RHS = Op.getOperand(1); 3545 SDLoc dl(Op); 3546 3547 // FIXME: We are currently always generating CMPs because we don't support 3548 // generating CMN through the backend. This is not as good as the natural 3549 // CMP case because it causes a register dependency and cannot be folded 3550 // later. 3551 3552 switch (Op.getOpcode()) { 3553 default: 3554 llvm_unreachable("Unknown overflow instruction!"); 3555 case ISD::SADDO: 3556 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3557 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3558 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3559 break; 3560 case ISD::UADDO: 3561 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3562 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3563 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3564 break; 3565 case ISD::SSUBO: 3566 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3567 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3568 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3569 break; 3570 case ISD::USUBO: 3571 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3572 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3573 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3574 break; 3575 } // switch (...) 3576 3577 return std::make_pair(Value, OverflowCmp); 3578 } 3579 3580 3581 SDValue 3582 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 3583 // Let legalize expand this if it isn't a legal type yet. 3584 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 3585 return SDValue(); 3586 3587 SDValue Value, OverflowCmp; 3588 SDValue ARMcc; 3589 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 3590 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3591 SDLoc dl(Op); 3592 // We use 0 and 1 as false and true values. 3593 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 3594 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 3595 EVT VT = Op.getValueType(); 3596 3597 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 3598 ARMcc, CCR, OverflowCmp); 3599 3600 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 3601 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 3602 } 3603 3604 3605 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3606 SDValue Cond = Op.getOperand(0); 3607 SDValue SelectTrue = Op.getOperand(1); 3608 SDValue SelectFalse = Op.getOperand(2); 3609 SDLoc dl(Op); 3610 unsigned Opc = Cond.getOpcode(); 3611 3612 if (Cond.getResNo() == 1 && 3613 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 3614 Opc == ISD::USUBO)) { 3615 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 3616 return SDValue(); 3617 3618 SDValue Value, OverflowCmp; 3619 SDValue ARMcc; 3620 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 3621 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3622 EVT VT = Op.getValueType(); 3623 3624 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 3625 OverflowCmp, DAG); 3626 } 3627 3628 // Convert: 3629 // 3630 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3631 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3632 // 3633 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3634 const ConstantSDNode *CMOVTrue = 3635 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3636 const ConstantSDNode *CMOVFalse = 3637 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3638 3639 if (CMOVTrue && CMOVFalse) { 3640 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3641 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3642 3643 SDValue True; 3644 SDValue False; 3645 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3646 True = SelectTrue; 3647 False = SelectFalse; 3648 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3649 True = SelectFalse; 3650 False = SelectTrue; 3651 } 3652 3653 if (True.getNode() && False.getNode()) { 3654 EVT VT = Op.getValueType(); 3655 SDValue ARMcc = Cond.getOperand(2); 3656 SDValue CCR = Cond.getOperand(3); 3657 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3658 assert(True.getValueType() == VT); 3659 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 3660 } 3661 } 3662 } 3663 3664 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 3665 // undefined bits before doing a full-word comparison with zero. 3666 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 3667 DAG.getConstant(1, dl, Cond.getValueType())); 3668 3669 return DAG.getSelectCC(dl, Cond, 3670 DAG.getConstant(0, dl, Cond.getValueType()), 3671 SelectTrue, SelectFalse, ISD::SETNE); 3672 } 3673 3674 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 3675 bool &swpCmpOps, bool &swpVselOps) { 3676 // Start by selecting the GE condition code for opcodes that return true for 3677 // 'equality' 3678 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 3679 CC == ISD::SETULE) 3680 CondCode = ARMCC::GE; 3681 3682 // and GT for opcodes that return false for 'equality'. 3683 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 3684 CC == ISD::SETULT) 3685 CondCode = ARMCC::GT; 3686 3687 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 3688 // to swap the compare operands. 3689 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 3690 CC == ISD::SETULT) 3691 swpCmpOps = true; 3692 3693 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 3694 // If we have an unordered opcode, we need to swap the operands to the VSEL 3695 // instruction (effectively negating the condition). 3696 // 3697 // This also has the effect of swapping which one of 'less' or 'greater' 3698 // returns true, so we also swap the compare operands. It also switches 3699 // whether we return true for 'equality', so we compensate by picking the 3700 // opposite condition code to our original choice. 3701 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 3702 CC == ISD::SETUGT) { 3703 swpCmpOps = !swpCmpOps; 3704 swpVselOps = !swpVselOps; 3705 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 3706 } 3707 3708 // 'ordered' is 'anything but unordered', so use the VS condition code and 3709 // swap the VSEL operands. 3710 if (CC == ISD::SETO) { 3711 CondCode = ARMCC::VS; 3712 swpVselOps = true; 3713 } 3714 3715 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 3716 // code and swap the VSEL operands. 3717 if (CC == ISD::SETUNE) { 3718 CondCode = ARMCC::EQ; 3719 swpVselOps = true; 3720 } 3721 } 3722 3723 SDValue ARMTargetLowering::getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, 3724 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 3725 SDValue Cmp, SelectionDAG &DAG) const { 3726 if (Subtarget->isFPOnlySP() && VT == MVT::f64) { 3727 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 3728 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 3729 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 3730 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 3731 3732 SDValue TrueLow = TrueVal.getValue(0); 3733 SDValue TrueHigh = TrueVal.getValue(1); 3734 SDValue FalseLow = FalseVal.getValue(0); 3735 SDValue FalseHigh = FalseVal.getValue(1); 3736 3737 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 3738 ARMcc, CCR, Cmp); 3739 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 3740 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 3741 3742 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 3743 } else { 3744 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 3745 Cmp); 3746 } 3747 } 3748 3749 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3750 EVT VT = Op.getValueType(); 3751 SDValue LHS = Op.getOperand(0); 3752 SDValue RHS = Op.getOperand(1); 3753 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3754 SDValue TrueVal = Op.getOperand(2); 3755 SDValue FalseVal = Op.getOperand(3); 3756 SDLoc dl(Op); 3757 3758 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 3759 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 3760 dl); 3761 3762 // If softenSetCCOperands only returned one value, we should compare it to 3763 // zero. 3764 if (!RHS.getNode()) { 3765 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 3766 CC = ISD::SETNE; 3767 } 3768 } 3769 3770 if (LHS.getValueType() == MVT::i32) { 3771 // Try to generate VSEL on ARMv8. 3772 // The VSEL instruction can't use all the usual ARM condition 3773 // codes: it only has two bits to select the condition code, so it's 3774 // constrained to use only GE, GT, VS and EQ. 3775 // 3776 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 3777 // swap the operands of the previous compare instruction (effectively 3778 // inverting the compare condition, swapping 'less' and 'greater') and 3779 // sometimes need to swap the operands to the VSEL (which inverts the 3780 // condition in the sense of firing whenever the previous condition didn't) 3781 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3782 TrueVal.getValueType() == MVT::f64)) { 3783 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3784 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 3785 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 3786 CC = ISD::getSetCCInverse(CC, true); 3787 std::swap(TrueVal, FalseVal); 3788 } 3789 } 3790 3791 SDValue ARMcc; 3792 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3793 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3794 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 3795 } 3796 3797 ARMCC::CondCodes CondCode, CondCode2; 3798 FPCCToARMCC(CC, CondCode, CondCode2); 3799 3800 // Try to generate VMAXNM/VMINNM on ARMv8. 3801 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3802 TrueVal.getValueType() == MVT::f64)) { 3803 bool swpCmpOps = false; 3804 bool swpVselOps = false; 3805 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 3806 3807 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 3808 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 3809 if (swpCmpOps) 3810 std::swap(LHS, RHS); 3811 if (swpVselOps) 3812 std::swap(TrueVal, FalseVal); 3813 } 3814 } 3815 3816 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3817 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3818 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3819 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 3820 if (CondCode2 != ARMCC::AL) { 3821 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 3822 // FIXME: Needs another CMP because flag can have but one use. 3823 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 3824 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 3825 } 3826 return Result; 3827 } 3828 3829 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 3830 /// to morph to an integer compare sequence. 3831 static bool canChangeToInt(SDValue Op, bool &SeenZero, 3832 const ARMSubtarget *Subtarget) { 3833 SDNode *N = Op.getNode(); 3834 if (!N->hasOneUse()) 3835 // Otherwise it requires moving the value from fp to integer registers. 3836 return false; 3837 if (!N->getNumValues()) 3838 return false; 3839 EVT VT = Op.getValueType(); 3840 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3841 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3842 // vmrs are very slow, e.g. cortex-a8. 3843 return false; 3844 3845 if (isFloatingPointZero(Op)) { 3846 SeenZero = true; 3847 return true; 3848 } 3849 return ISD::isNormalLoad(N); 3850 } 3851 3852 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3853 if (isFloatingPointZero(Op)) 3854 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 3855 3856 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3857 return DAG.getLoad(MVT::i32, SDLoc(Op), 3858 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3859 Ld->isVolatile(), Ld->isNonTemporal(), 3860 Ld->isInvariant(), Ld->getAlignment()); 3861 3862 llvm_unreachable("Unknown VFP cmp argument!"); 3863 } 3864 3865 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3866 SDValue &RetVal1, SDValue &RetVal2) { 3867 SDLoc dl(Op); 3868 3869 if (isFloatingPointZero(Op)) { 3870 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 3871 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 3872 return; 3873 } 3874 3875 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3876 SDValue Ptr = Ld->getBasePtr(); 3877 RetVal1 = DAG.getLoad(MVT::i32, dl, 3878 Ld->getChain(), Ptr, 3879 Ld->getPointerInfo(), 3880 Ld->isVolatile(), Ld->isNonTemporal(), 3881 Ld->isInvariant(), Ld->getAlignment()); 3882 3883 EVT PtrType = Ptr.getValueType(); 3884 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3885 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 3886 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 3887 RetVal2 = DAG.getLoad(MVT::i32, dl, 3888 Ld->getChain(), NewPtr, 3889 Ld->getPointerInfo().getWithOffset(4), 3890 Ld->isVolatile(), Ld->isNonTemporal(), 3891 Ld->isInvariant(), NewAlign); 3892 return; 3893 } 3894 3895 llvm_unreachable("Unknown VFP cmp argument!"); 3896 } 3897 3898 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3899 /// f32 and even f64 comparisons to integer ones. 3900 SDValue 3901 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3902 SDValue Chain = Op.getOperand(0); 3903 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3904 SDValue LHS = Op.getOperand(2); 3905 SDValue RHS = Op.getOperand(3); 3906 SDValue Dest = Op.getOperand(4); 3907 SDLoc dl(Op); 3908 3909 bool LHSSeenZero = false; 3910 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3911 bool RHSSeenZero = false; 3912 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3913 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3914 // If unsafe fp math optimization is enabled and there are no other uses of 3915 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3916 // to an integer comparison. 3917 if (CC == ISD::SETOEQ) 3918 CC = ISD::SETEQ; 3919 else if (CC == ISD::SETUNE) 3920 CC = ISD::SETNE; 3921 3922 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 3923 SDValue ARMcc; 3924 if (LHS.getValueType() == MVT::f32) { 3925 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3926 bitcastf32Toi32(LHS, DAG), Mask); 3927 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3928 bitcastf32Toi32(RHS, DAG), Mask); 3929 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3930 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3931 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3932 Chain, Dest, ARMcc, CCR, Cmp); 3933 } 3934 3935 SDValue LHS1, LHS2; 3936 SDValue RHS1, RHS2; 3937 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3938 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3939 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3940 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3941 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3942 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3943 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3944 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3945 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 3946 } 3947 3948 return SDValue(); 3949 } 3950 3951 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3952 SDValue Chain = Op.getOperand(0); 3953 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3954 SDValue LHS = Op.getOperand(2); 3955 SDValue RHS = Op.getOperand(3); 3956 SDValue Dest = Op.getOperand(4); 3957 SDLoc dl(Op); 3958 3959 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 3960 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 3961 dl); 3962 3963 // If softenSetCCOperands only returned one value, we should compare it to 3964 // zero. 3965 if (!RHS.getNode()) { 3966 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 3967 CC = ISD::SETNE; 3968 } 3969 } 3970 3971 if (LHS.getValueType() == MVT::i32) { 3972 SDValue ARMcc; 3973 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3974 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3975 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3976 Chain, Dest, ARMcc, CCR, Cmp); 3977 } 3978 3979 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3980 3981 if (getTargetMachine().Options.UnsafeFPMath && 3982 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3983 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3984 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 3985 return Result; 3986 } 3987 3988 ARMCC::CondCodes CondCode, CondCode2; 3989 FPCCToARMCC(CC, CondCode, CondCode2); 3990 3991 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3992 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3993 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3994 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3995 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3996 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 3997 if (CondCode2 != ARMCC::AL) { 3998 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 3999 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 4000 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 4001 } 4002 return Res; 4003 } 4004 4005 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 4006 SDValue Chain = Op.getOperand(0); 4007 SDValue Table = Op.getOperand(1); 4008 SDValue Index = Op.getOperand(2); 4009 SDLoc dl(Op); 4010 4011 EVT PTy = getPointerTy(DAG.getDataLayout()); 4012 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 4013 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 4014 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 4015 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 4016 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 4017 if (Subtarget->isThumb2()) { 4018 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 4019 // which does another jump to the destination. This also makes it easier 4020 // to translate it to TBB / TBH later. 4021 // FIXME: This might not work if the function is extremely large. 4022 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 4023 Addr, Op.getOperand(2), JTI); 4024 } 4025 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 4026 Addr = 4027 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 4028 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), 4029 false, false, false, 0); 4030 Chain = Addr.getValue(1); 4031 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 4032 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4033 } else { 4034 Addr = 4035 DAG.getLoad(PTy, dl, Chain, Addr, 4036 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()), 4037 false, false, false, 0); 4038 Chain = Addr.getValue(1); 4039 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4040 } 4041 } 4042 4043 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 4044 EVT VT = Op.getValueType(); 4045 SDLoc dl(Op); 4046 4047 if (Op.getValueType().getVectorElementType() == MVT::i32) { 4048 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 4049 return Op; 4050 return DAG.UnrollVectorOp(Op.getNode()); 4051 } 4052 4053 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 4054 "Invalid type for custom lowering!"); 4055 if (VT != MVT::v4i16) 4056 return DAG.UnrollVectorOp(Op.getNode()); 4057 4058 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 4059 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 4060 } 4061 4062 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 4063 EVT VT = Op.getValueType(); 4064 if (VT.isVector()) 4065 return LowerVectorFP_TO_INT(Op, DAG); 4066 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { 4067 RTLIB::Libcall LC; 4068 if (Op.getOpcode() == ISD::FP_TO_SINT) 4069 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), 4070 Op.getValueType()); 4071 else 4072 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), 4073 Op.getValueType()); 4074 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4075 /*isSigned*/ false, SDLoc(Op)).first; 4076 } 4077 4078 return Op; 4079 } 4080 4081 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 4082 EVT VT = Op.getValueType(); 4083 SDLoc dl(Op); 4084 4085 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 4086 if (VT.getVectorElementType() == MVT::f32) 4087 return Op; 4088 return DAG.UnrollVectorOp(Op.getNode()); 4089 } 4090 4091 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 4092 "Invalid type for custom lowering!"); 4093 if (VT != MVT::v4f32) 4094 return DAG.UnrollVectorOp(Op.getNode()); 4095 4096 unsigned CastOpc; 4097 unsigned Opc; 4098 switch (Op.getOpcode()) { 4099 default: llvm_unreachable("Invalid opcode!"); 4100 case ISD::SINT_TO_FP: 4101 CastOpc = ISD::SIGN_EXTEND; 4102 Opc = ISD::SINT_TO_FP; 4103 break; 4104 case ISD::UINT_TO_FP: 4105 CastOpc = ISD::ZERO_EXTEND; 4106 Opc = ISD::UINT_TO_FP; 4107 break; 4108 } 4109 4110 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 4111 return DAG.getNode(Opc, dl, VT, Op); 4112 } 4113 4114 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 4115 EVT VT = Op.getValueType(); 4116 if (VT.isVector()) 4117 return LowerVectorINT_TO_FP(Op, DAG); 4118 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { 4119 RTLIB::Libcall LC; 4120 if (Op.getOpcode() == ISD::SINT_TO_FP) 4121 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 4122 Op.getValueType()); 4123 else 4124 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 4125 Op.getValueType()); 4126 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4127 /*isSigned*/ false, SDLoc(Op)).first; 4128 } 4129 4130 return Op; 4131 } 4132 4133 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 4134 // Implement fcopysign with a fabs and a conditional fneg. 4135 SDValue Tmp0 = Op.getOperand(0); 4136 SDValue Tmp1 = Op.getOperand(1); 4137 SDLoc dl(Op); 4138 EVT VT = Op.getValueType(); 4139 EVT SrcVT = Tmp1.getValueType(); 4140 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 4141 Tmp0.getOpcode() == ARMISD::VMOVDRR; 4142 bool UseNEON = !InGPR && Subtarget->hasNEON(); 4143 4144 if (UseNEON) { 4145 // Use VBSL to copy the sign bit. 4146 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 4147 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 4148 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 4149 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 4150 if (VT == MVT::f64) 4151 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4152 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 4153 DAG.getConstant(32, dl, MVT::i32)); 4154 else /*if (VT == MVT::f32)*/ 4155 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 4156 if (SrcVT == MVT::f32) { 4157 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 4158 if (VT == MVT::f64) 4159 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4160 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 4161 DAG.getConstant(32, dl, MVT::i32)); 4162 } else if (VT == MVT::f32) 4163 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 4164 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 4165 DAG.getConstant(32, dl, MVT::i32)); 4166 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 4167 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 4168 4169 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 4170 dl, MVT::i32); 4171 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 4172 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 4173 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 4174 4175 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 4176 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 4177 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 4178 if (VT == MVT::f32) { 4179 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 4180 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 4181 DAG.getConstant(0, dl, MVT::i32)); 4182 } else { 4183 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 4184 } 4185 4186 return Res; 4187 } 4188 4189 // Bitcast operand 1 to i32. 4190 if (SrcVT == MVT::f64) 4191 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4192 Tmp1).getValue(1); 4193 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 4194 4195 // Or in the signbit with integer operations. 4196 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 4197 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 4198 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 4199 if (VT == MVT::f32) { 4200 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 4201 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 4202 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4203 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 4204 } 4205 4206 // f64: Or the high part with signbit and then combine two parts. 4207 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4208 Tmp0); 4209 SDValue Lo = Tmp0.getValue(0); 4210 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 4211 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 4212 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 4213 } 4214 4215 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 4216 MachineFunction &MF = DAG.getMachineFunction(); 4217 MachineFrameInfo *MFI = MF.getFrameInfo(); 4218 MFI->setReturnAddressIsTaken(true); 4219 4220 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 4221 return SDValue(); 4222 4223 EVT VT = Op.getValueType(); 4224 SDLoc dl(Op); 4225 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4226 if (Depth) { 4227 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 4228 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 4229 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 4230 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 4231 MachinePointerInfo(), false, false, false, 0); 4232 } 4233 4234 // Return LR, which contains the return address. Mark it an implicit live-in. 4235 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 4236 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 4237 } 4238 4239 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 4240 const ARMBaseRegisterInfo &ARI = 4241 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 4242 MachineFunction &MF = DAG.getMachineFunction(); 4243 MachineFrameInfo *MFI = MF.getFrameInfo(); 4244 MFI->setFrameAddressIsTaken(true); 4245 4246 EVT VT = Op.getValueType(); 4247 SDLoc dl(Op); // FIXME probably not meaningful 4248 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4249 unsigned FrameReg = ARI.getFrameRegister(MF); 4250 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 4251 while (Depth--) 4252 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 4253 MachinePointerInfo(), 4254 false, false, false, 0); 4255 return FrameAddr; 4256 } 4257 4258 // FIXME? Maybe this could be a TableGen attribute on some registers and 4259 // this table could be generated automatically from RegInfo. 4260 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, 4261 SelectionDAG &DAG) const { 4262 unsigned Reg = StringSwitch<unsigned>(RegName) 4263 .Case("sp", ARM::SP) 4264 .Default(0); 4265 if (Reg) 4266 return Reg; 4267 report_fatal_error(Twine("Invalid register name \"" 4268 + StringRef(RegName) + "\".")); 4269 } 4270 4271 // Result is 64 bit value so split into two 32 bit values and return as a 4272 // pair of values. 4273 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 4274 SelectionDAG &DAG) { 4275 SDLoc DL(N); 4276 4277 // This function is only supposed to be called for i64 type destination. 4278 assert(N->getValueType(0) == MVT::i64 4279 && "ExpandREAD_REGISTER called for non-i64 type result."); 4280 4281 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 4282 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 4283 N->getOperand(0), 4284 N->getOperand(1)); 4285 4286 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 4287 Read.getValue(1))); 4288 Results.push_back(Read.getOperand(0)); 4289 } 4290 4291 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 4292 /// When \p DstVT, the destination type of \p BC, is on the vector 4293 /// register bank and the source of bitcast, \p Op, operates on the same bank, 4294 /// it might be possible to combine them, such that everything stays on the 4295 /// vector register bank. 4296 /// \p return The node that would replace \p BT, if the combine 4297 /// is possible. 4298 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 4299 SelectionDAG &DAG) { 4300 SDValue Op = BC->getOperand(0); 4301 EVT DstVT = BC->getValueType(0); 4302 4303 // The only vector instruction that can produce a scalar (remember, 4304 // since the bitcast was about to be turned into VMOVDRR, the source 4305 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 4306 // Moreover, we can do this combine only if there is one use. 4307 // Finally, if the destination type is not a vector, there is not 4308 // much point on forcing everything on the vector bank. 4309 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4310 !Op.hasOneUse()) 4311 return SDValue(); 4312 4313 // If the index is not constant, we will introduce an additional 4314 // multiply that will stick. 4315 // Give up in that case. 4316 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4317 if (!Index) 4318 return SDValue(); 4319 unsigned DstNumElt = DstVT.getVectorNumElements(); 4320 4321 // Compute the new index. 4322 const APInt &APIntIndex = Index->getAPIntValue(); 4323 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 4324 NewIndex *= APIntIndex; 4325 // Check if the new constant index fits into i32. 4326 if (NewIndex.getBitWidth() > 32) 4327 return SDValue(); 4328 4329 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 4330 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 4331 SDLoc dl(Op); 4332 SDValue ExtractSrc = Op.getOperand(0); 4333 EVT VecVT = EVT::getVectorVT( 4334 *DAG.getContext(), DstVT.getScalarType(), 4335 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 4336 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 4337 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 4338 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 4339 } 4340 4341 /// ExpandBITCAST - If the target supports VFP, this function is called to 4342 /// expand a bit convert where either the source or destination type is i64 to 4343 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 4344 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 4345 /// vectors), since the legalizer won't know what to do with that. 4346 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 4347 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4348 SDLoc dl(N); 4349 SDValue Op = N->getOperand(0); 4350 4351 // This function is only supposed to be called for i64 types, either as the 4352 // source or destination of the bit convert. 4353 EVT SrcVT = Op.getValueType(); 4354 EVT DstVT = N->getValueType(0); 4355 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 4356 "ExpandBITCAST called for non-i64 type"); 4357 4358 // Turn i64->f64 into VMOVDRR. 4359 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 4360 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 4361 // if we can combine the bitcast with its source. 4362 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 4363 return Val; 4364 4365 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4366 DAG.getConstant(0, dl, MVT::i32)); 4367 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4368 DAG.getConstant(1, dl, MVT::i32)); 4369 return DAG.getNode(ISD::BITCAST, dl, DstVT, 4370 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 4371 } 4372 4373 // Turn f64->i64 into VMOVRRD. 4374 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 4375 SDValue Cvt; 4376 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 4377 SrcVT.getVectorNumElements() > 1) 4378 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4379 DAG.getVTList(MVT::i32, MVT::i32), 4380 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 4381 else 4382 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4383 DAG.getVTList(MVT::i32, MVT::i32), Op); 4384 // Merge the pieces into a single i64 value. 4385 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 4386 } 4387 4388 return SDValue(); 4389 } 4390 4391 /// getZeroVector - Returns a vector of specified type with all zero elements. 4392 /// Zero vectors are used to represent vector negation and in those cases 4393 /// will be implemented with the NEON VNEG instruction. However, VNEG does 4394 /// not support i64 elements, so sometimes the zero vectors will need to be 4395 /// explicitly constructed. Regardless, use a canonical VMOV to create the 4396 /// zero vector. 4397 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) { 4398 assert(VT.isVector() && "Expected a vector type"); 4399 // The canonical modified immediate encoding of a zero vector is....0! 4400 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 4401 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 4402 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 4403 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4404 } 4405 4406 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 4407 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4408 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 4409 SelectionDAG &DAG) const { 4410 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4411 EVT VT = Op.getValueType(); 4412 unsigned VTBits = VT.getSizeInBits(); 4413 SDLoc dl(Op); 4414 SDValue ShOpLo = Op.getOperand(0); 4415 SDValue ShOpHi = Op.getOperand(1); 4416 SDValue ShAmt = Op.getOperand(2); 4417 SDValue ARMcc; 4418 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 4419 4420 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 4421 4422 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4423 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4424 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 4425 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4426 DAG.getConstant(VTBits, dl, MVT::i32)); 4427 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 4428 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4429 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 4430 4431 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4432 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4433 ISD::SETGE, ARMcc, DAG, dl); 4434 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 4435 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 4436 CCR, Cmp); 4437 4438 SDValue Ops[2] = { Lo, Hi }; 4439 return DAG.getMergeValues(Ops, dl); 4440 } 4441 4442 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 4443 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4444 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 4445 SelectionDAG &DAG) const { 4446 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4447 EVT VT = Op.getValueType(); 4448 unsigned VTBits = VT.getSizeInBits(); 4449 SDLoc dl(Op); 4450 SDValue ShOpLo = Op.getOperand(0); 4451 SDValue ShOpHi = Op.getOperand(1); 4452 SDValue ShAmt = Op.getOperand(2); 4453 SDValue ARMcc; 4454 4455 assert(Op.getOpcode() == ISD::SHL_PARTS); 4456 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4457 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4458 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 4459 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4460 DAG.getConstant(VTBits, dl, MVT::i32)); 4461 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 4462 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 4463 4464 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4465 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4466 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4467 ISD::SETGE, ARMcc, DAG, dl); 4468 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 4469 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 4470 CCR, Cmp); 4471 4472 SDValue Ops[2] = { Lo, Hi }; 4473 return DAG.getMergeValues(Ops, dl); 4474 } 4475 4476 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4477 SelectionDAG &DAG) const { 4478 // The rounding mode is in bits 23:22 of the FPSCR. 4479 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 4480 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 4481 // so that the shift + and get folded into a bitfield extract. 4482 SDLoc dl(Op); 4483 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 4484 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, 4485 MVT::i32)); 4486 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 4487 DAG.getConstant(1U << 22, dl, MVT::i32)); 4488 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 4489 DAG.getConstant(22, dl, MVT::i32)); 4490 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 4491 DAG.getConstant(3, dl, MVT::i32)); 4492 } 4493 4494 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 4495 const ARMSubtarget *ST) { 4496 SDLoc dl(N); 4497 EVT VT = N->getValueType(0); 4498 if (VT.isVector()) { 4499 assert(ST->hasNEON()); 4500 4501 // Compute the least significant set bit: LSB = X & -X 4502 SDValue X = N->getOperand(0); 4503 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 4504 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 4505 4506 EVT ElemTy = VT.getVectorElementType(); 4507 4508 if (ElemTy == MVT::i8) { 4509 // Compute with: cttz(x) = ctpop(lsb - 1) 4510 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4511 DAG.getTargetConstant(1, dl, ElemTy)); 4512 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 4513 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 4514 } 4515 4516 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 4517 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 4518 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 4519 unsigned NumBits = ElemTy.getSizeInBits(); 4520 SDValue WidthMinus1 = 4521 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4522 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 4523 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 4524 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 4525 } 4526 4527 // Compute with: cttz(x) = ctpop(lsb - 1) 4528 4529 // Since we can only compute the number of bits in a byte with vcnt.8, we 4530 // have to gather the result with pairwise addition (vpaddl) for i16, i32, 4531 // and i64. 4532 4533 // Compute LSB - 1. 4534 SDValue Bits; 4535 if (ElemTy == MVT::i64) { 4536 // Load constant 0xffff'ffff'ffff'ffff to register. 4537 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4538 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 4539 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 4540 } else { 4541 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4542 DAG.getTargetConstant(1, dl, ElemTy)); 4543 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 4544 } 4545 4546 // Count #bits with vcnt.8. 4547 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4548 SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits); 4549 SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8); 4550 4551 // Gather the #bits with vpaddl (pairwise add.) 4552 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4553 SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit, 4554 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4555 Cnt8); 4556 if (ElemTy == MVT::i16) 4557 return Cnt16; 4558 4559 EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32; 4560 SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit, 4561 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4562 Cnt16); 4563 if (ElemTy == MVT::i32) 4564 return Cnt32; 4565 4566 assert(ElemTy == MVT::i64); 4567 SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4568 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 4569 Cnt32); 4570 return Cnt64; 4571 } 4572 4573 if (!ST->hasV6T2Ops()) 4574 return SDValue(); 4575 4576 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 4577 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 4578 } 4579 4580 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 4581 /// for each 16-bit element from operand, repeated. The basic idea is to 4582 /// leverage vcnt to get the 8-bit counts, gather and add the results. 4583 /// 4584 /// Trace for v4i16: 4585 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4586 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 4587 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 4588 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 4589 /// [b0 b1 b2 b3 b4 b5 b6 b7] 4590 /// +[b1 b0 b3 b2 b5 b4 b7 b6] 4591 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 4592 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 4593 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 4594 EVT VT = N->getValueType(0); 4595 SDLoc DL(N); 4596 4597 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4598 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 4599 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 4600 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 4601 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 4602 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 4603 } 4604 4605 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 4606 /// bit-count for each 16-bit element from the operand. We need slightly 4607 /// different sequencing for v4i16 and v8i16 to stay within NEON's available 4608 /// 64/128-bit registers. 4609 /// 4610 /// Trace for v4i16: 4611 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4612 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 4613 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 4614 /// v4i16:Extracted = [k0 k1 k2 k3 ] 4615 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 4616 EVT VT = N->getValueType(0); 4617 SDLoc DL(N); 4618 4619 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 4620 if (VT.is64BitVector()) { 4621 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 4622 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 4623 DAG.getIntPtrConstant(0, DL)); 4624 } else { 4625 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 4626 BitCounts, DAG.getIntPtrConstant(0, DL)); 4627 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 4628 } 4629 } 4630 4631 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 4632 /// bit-count for each 32-bit element from the operand. The idea here is 4633 /// to split the vector into 16-bit elements, leverage the 16-bit count 4634 /// routine, and then combine the results. 4635 /// 4636 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 4637 /// input = [v0 v1 ] (vi: 32-bit elements) 4638 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 4639 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 4640 /// vrev: N0 = [k1 k0 k3 k2 ] 4641 /// [k0 k1 k2 k3 ] 4642 /// N1 =+[k1 k0 k3 k2 ] 4643 /// [k0 k2 k1 k3 ] 4644 /// N2 =+[k1 k3 k0 k2 ] 4645 /// [k0 k2 k1 k3 ] 4646 /// Extended =+[k1 k3 k0 k2 ] 4647 /// [k0 k2 ] 4648 /// Extracted=+[k1 k3 ] 4649 /// 4650 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 4651 EVT VT = N->getValueType(0); 4652 SDLoc DL(N); 4653 4654 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4655 4656 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 4657 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 4658 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 4659 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 4660 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 4661 4662 if (VT.is64BitVector()) { 4663 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 4664 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 4665 DAG.getIntPtrConstant(0, DL)); 4666 } else { 4667 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 4668 DAG.getIntPtrConstant(0, DL)); 4669 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 4670 } 4671 } 4672 4673 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 4674 const ARMSubtarget *ST) { 4675 EVT VT = N->getValueType(0); 4676 4677 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 4678 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 4679 VT == MVT::v4i16 || VT == MVT::v8i16) && 4680 "Unexpected type for custom ctpop lowering"); 4681 4682 if (VT.getVectorElementType() == MVT::i32) 4683 return lowerCTPOP32BitElements(N, DAG); 4684 else 4685 return lowerCTPOP16BitElements(N, DAG); 4686 } 4687 4688 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 4689 const ARMSubtarget *ST) { 4690 EVT VT = N->getValueType(0); 4691 SDLoc dl(N); 4692 4693 if (!VT.isVector()) 4694 return SDValue(); 4695 4696 // Lower vector shifts on NEON to use VSHL. 4697 assert(ST->hasNEON() && "unexpected vector shift"); 4698 4699 // Left shifts translate directly to the vshiftu intrinsic. 4700 if (N->getOpcode() == ISD::SHL) 4701 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4702 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, 4703 MVT::i32), 4704 N->getOperand(0), N->getOperand(1)); 4705 4706 assert((N->getOpcode() == ISD::SRA || 4707 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 4708 4709 // NEON uses the same intrinsics for both left and right shifts. For 4710 // right shifts, the shift amounts are negative, so negate the vector of 4711 // shift amounts. 4712 EVT ShiftVT = N->getOperand(1).getValueType(); 4713 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 4714 getZeroVector(ShiftVT, DAG, dl), 4715 N->getOperand(1)); 4716 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 4717 Intrinsic::arm_neon_vshifts : 4718 Intrinsic::arm_neon_vshiftu); 4719 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4720 DAG.getConstant(vshiftInt, dl, MVT::i32), 4721 N->getOperand(0), NegatedCount); 4722 } 4723 4724 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 4725 const ARMSubtarget *ST) { 4726 EVT VT = N->getValueType(0); 4727 SDLoc dl(N); 4728 4729 // We can get here for a node like i32 = ISD::SHL i32, i64 4730 if (VT != MVT::i64) 4731 return SDValue(); 4732 4733 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 4734 "Unknown shift to lower!"); 4735 4736 // We only lower SRA, SRL of 1 here, all others use generic lowering. 4737 if (!isOneConstant(N->getOperand(1))) 4738 return SDValue(); 4739 4740 // If we are in thumb mode, we don't have RRX. 4741 if (ST->isThumb1Only()) return SDValue(); 4742 4743 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 4744 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4745 DAG.getConstant(0, dl, MVT::i32)); 4746 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4747 DAG.getConstant(1, dl, MVT::i32)); 4748 4749 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 4750 // captures the result into a carry flag. 4751 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 4752 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 4753 4754 // The low part is an ARMISD::RRX operand, which shifts the carry in. 4755 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 4756 4757 // Merge the pieces into a single i64 value. 4758 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4759 } 4760 4761 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 4762 SDValue TmpOp0, TmpOp1; 4763 bool Invert = false; 4764 bool Swap = false; 4765 unsigned Opc = 0; 4766 4767 SDValue Op0 = Op.getOperand(0); 4768 SDValue Op1 = Op.getOperand(1); 4769 SDValue CC = Op.getOperand(2); 4770 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 4771 EVT VT = Op.getValueType(); 4772 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4773 SDLoc dl(Op); 4774 4775 if (CmpVT.getVectorElementType() == MVT::i64) 4776 // 64-bit comparisons are not legal. We've marked SETCC as non-Custom, 4777 // but it's possible that our operands are 64-bit but our result is 32-bit. 4778 // Bail in this case. 4779 return SDValue(); 4780 4781 if (Op1.getValueType().isFloatingPoint()) { 4782 switch (SetCCOpcode) { 4783 default: llvm_unreachable("Illegal FP comparison"); 4784 case ISD::SETUNE: 4785 case ISD::SETNE: Invert = true; // Fallthrough 4786 case ISD::SETOEQ: 4787 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4788 case ISD::SETOLT: 4789 case ISD::SETLT: Swap = true; // Fallthrough 4790 case ISD::SETOGT: 4791 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4792 case ISD::SETOLE: 4793 case ISD::SETLE: Swap = true; // Fallthrough 4794 case ISD::SETOGE: 4795 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4796 case ISD::SETUGE: Swap = true; // Fallthrough 4797 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 4798 case ISD::SETUGT: Swap = true; // Fallthrough 4799 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 4800 case ISD::SETUEQ: Invert = true; // Fallthrough 4801 case ISD::SETONE: 4802 // Expand this to (OLT | OGT). 4803 TmpOp0 = Op0; 4804 TmpOp1 = Op1; 4805 Opc = ISD::OR; 4806 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 4807 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); 4808 break; 4809 case ISD::SETUO: Invert = true; // Fallthrough 4810 case ISD::SETO: 4811 // Expand this to (OLT | OGE). 4812 TmpOp0 = Op0; 4813 TmpOp1 = Op1; 4814 Opc = ISD::OR; 4815 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 4816 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); 4817 break; 4818 } 4819 } else { 4820 // Integer comparisons. 4821 switch (SetCCOpcode) { 4822 default: llvm_unreachable("Illegal integer comparison"); 4823 case ISD::SETNE: Invert = true; 4824 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4825 case ISD::SETLT: Swap = true; 4826 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4827 case ISD::SETLE: Swap = true; 4828 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4829 case ISD::SETULT: Swap = true; 4830 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 4831 case ISD::SETULE: Swap = true; 4832 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 4833 } 4834 4835 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 4836 if (Opc == ARMISD::VCEQ) { 4837 4838 SDValue AndOp; 4839 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4840 AndOp = Op0; 4841 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 4842 AndOp = Op1; 4843 4844 // Ignore bitconvert. 4845 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 4846 AndOp = AndOp.getOperand(0); 4847 4848 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 4849 Opc = ARMISD::VTST; 4850 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 4851 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 4852 Invert = !Invert; 4853 } 4854 } 4855 } 4856 4857 if (Swap) 4858 std::swap(Op0, Op1); 4859 4860 // If one of the operands is a constant vector zero, attempt to fold the 4861 // comparison to a specialized compare-against-zero form. 4862 SDValue SingleOp; 4863 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4864 SingleOp = Op0; 4865 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 4866 if (Opc == ARMISD::VCGE) 4867 Opc = ARMISD::VCLEZ; 4868 else if (Opc == ARMISD::VCGT) 4869 Opc = ARMISD::VCLTZ; 4870 SingleOp = Op1; 4871 } 4872 4873 SDValue Result; 4874 if (SingleOp.getNode()) { 4875 switch (Opc) { 4876 case ARMISD::VCEQ: 4877 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; 4878 case ARMISD::VCGE: 4879 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; 4880 case ARMISD::VCLEZ: 4881 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; 4882 case ARMISD::VCGT: 4883 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; 4884 case ARMISD::VCLTZ: 4885 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; 4886 default: 4887 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 4888 } 4889 } else { 4890 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 4891 } 4892 4893 Result = DAG.getSExtOrTrunc(Result, dl, VT); 4894 4895 if (Invert) 4896 Result = DAG.getNOT(dl, Result, VT); 4897 4898 return Result; 4899 } 4900 4901 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) { 4902 SDValue LHS = Op.getOperand(0); 4903 SDValue RHS = Op.getOperand(1); 4904 SDValue Carry = Op.getOperand(2); 4905 SDValue Cond = Op.getOperand(3); 4906 SDLoc DL(Op); 4907 4908 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only."); 4909 4910 assert(Carry.getOpcode() != ISD::CARRY_FALSE); 4911 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 4912 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 4913 4914 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 4915 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 4916 SDValue ARMcc = DAG.getConstant( 4917 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 4918 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4919 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 4920 Cmp.getValue(1), SDValue()); 4921 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 4922 CCR, Chain.getValue(1)); 4923 } 4924 4925 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 4926 /// valid vector constant for a NEON instruction with a "modified immediate" 4927 /// operand (e.g., VMOV). If so, return the encoded value. 4928 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 4929 unsigned SplatBitSize, SelectionDAG &DAG, 4930 SDLoc dl, EVT &VT, bool is128Bits, 4931 NEONModImmType type) { 4932 unsigned OpCmode, Imm; 4933 4934 // SplatBitSize is set to the smallest size that splats the vector, so a 4935 // zero vector will always have SplatBitSize == 8. However, NEON modified 4936 // immediate instructions others than VMOV do not support the 8-bit encoding 4937 // of a zero vector, and the default encoding of zero is supposed to be the 4938 // 32-bit version. 4939 if (SplatBits == 0) 4940 SplatBitSize = 32; 4941 4942 switch (SplatBitSize) { 4943 case 8: 4944 if (type != VMOVModImm) 4945 return SDValue(); 4946 // Any 1-byte value is OK. Op=0, Cmode=1110. 4947 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 4948 OpCmode = 0xe; 4949 Imm = SplatBits; 4950 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 4951 break; 4952 4953 case 16: 4954 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 4955 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 4956 if ((SplatBits & ~0xff) == 0) { 4957 // Value = 0x00nn: Op=x, Cmode=100x. 4958 OpCmode = 0x8; 4959 Imm = SplatBits; 4960 break; 4961 } 4962 if ((SplatBits & ~0xff00) == 0) { 4963 // Value = 0xnn00: Op=x, Cmode=101x. 4964 OpCmode = 0xa; 4965 Imm = SplatBits >> 8; 4966 break; 4967 } 4968 return SDValue(); 4969 4970 case 32: 4971 // NEON's 32-bit VMOV supports splat values where: 4972 // * only one byte is nonzero, or 4973 // * the least significant byte is 0xff and the second byte is nonzero, or 4974 // * the least significant 2 bytes are 0xff and the third is nonzero. 4975 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 4976 if ((SplatBits & ~0xff) == 0) { 4977 // Value = 0x000000nn: Op=x, Cmode=000x. 4978 OpCmode = 0; 4979 Imm = SplatBits; 4980 break; 4981 } 4982 if ((SplatBits & ~0xff00) == 0) { 4983 // Value = 0x0000nn00: Op=x, Cmode=001x. 4984 OpCmode = 0x2; 4985 Imm = SplatBits >> 8; 4986 break; 4987 } 4988 if ((SplatBits & ~0xff0000) == 0) { 4989 // Value = 0x00nn0000: Op=x, Cmode=010x. 4990 OpCmode = 0x4; 4991 Imm = SplatBits >> 16; 4992 break; 4993 } 4994 if ((SplatBits & ~0xff000000) == 0) { 4995 // Value = 0xnn000000: Op=x, Cmode=011x. 4996 OpCmode = 0x6; 4997 Imm = SplatBits >> 24; 4998 break; 4999 } 5000 5001 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 5002 if (type == OtherModImm) return SDValue(); 5003 5004 if ((SplatBits & ~0xffff) == 0 && 5005 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 5006 // Value = 0x0000nnff: Op=x, Cmode=1100. 5007 OpCmode = 0xc; 5008 Imm = SplatBits >> 8; 5009 break; 5010 } 5011 5012 if ((SplatBits & ~0xffffff) == 0 && 5013 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 5014 // Value = 0x00nnffff: Op=x, Cmode=1101. 5015 OpCmode = 0xd; 5016 Imm = SplatBits >> 16; 5017 break; 5018 } 5019 5020 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 5021 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 5022 // VMOV.I32. A (very) minor optimization would be to replicate the value 5023 // and fall through here to test for a valid 64-bit splat. But, then the 5024 // caller would also need to check and handle the change in size. 5025 return SDValue(); 5026 5027 case 64: { 5028 if (type != VMOVModImm) 5029 return SDValue(); 5030 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 5031 uint64_t BitMask = 0xff; 5032 uint64_t Val = 0; 5033 unsigned ImmMask = 1; 5034 Imm = 0; 5035 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 5036 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 5037 Val |= BitMask; 5038 Imm |= ImmMask; 5039 } else if ((SplatBits & BitMask) != 0) { 5040 return SDValue(); 5041 } 5042 BitMask <<= 8; 5043 ImmMask <<= 1; 5044 } 5045 5046 if (DAG.getDataLayout().isBigEndian()) 5047 // swap higher and lower 32 bit word 5048 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); 5049 5050 // Op=1, Cmode=1110. 5051 OpCmode = 0x1e; 5052 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 5053 break; 5054 } 5055 5056 default: 5057 llvm_unreachable("unexpected size for isNEONModifiedImm"); 5058 } 5059 5060 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 5061 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 5062 } 5063 5064 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 5065 const ARMSubtarget *ST) const { 5066 if (!ST->hasVFP3()) 5067 return SDValue(); 5068 5069 bool IsDouble = Op.getValueType() == MVT::f64; 5070 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 5071 5072 // Use the default (constant pool) lowering for double constants when we have 5073 // an SP-only FPU 5074 if (IsDouble && Subtarget->isFPOnlySP()) 5075 return SDValue(); 5076 5077 // Try splatting with a VMOV.f32... 5078 const APFloat &FPVal = CFP->getValueAPF(); 5079 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 5080 5081 if (ImmVal != -1) { 5082 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 5083 // We have code in place to select a valid ConstantFP already, no need to 5084 // do any mangling. 5085 return Op; 5086 } 5087 5088 // It's a float and we are trying to use NEON operations where 5089 // possible. Lower it to a splat followed by an extract. 5090 SDLoc DL(Op); 5091 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 5092 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 5093 NewVal); 5094 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 5095 DAG.getConstant(0, DL, MVT::i32)); 5096 } 5097 5098 // The rest of our options are NEON only, make sure that's allowed before 5099 // proceeding.. 5100 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 5101 return SDValue(); 5102 5103 EVT VMovVT; 5104 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 5105 5106 // It wouldn't really be worth bothering for doubles except for one very 5107 // important value, which does happen to match: 0.0. So make sure we don't do 5108 // anything stupid. 5109 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 5110 return SDValue(); 5111 5112 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 5113 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 5114 VMovVT, false, VMOVModImm); 5115 if (NewVal != SDValue()) { 5116 SDLoc DL(Op); 5117 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 5118 NewVal); 5119 if (IsDouble) 5120 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5121 5122 // It's a float: cast and extract a vector element. 5123 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5124 VecConstant); 5125 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5126 DAG.getConstant(0, DL, MVT::i32)); 5127 } 5128 5129 // Finally, try a VMVN.i32 5130 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 5131 false, VMVNModImm); 5132 if (NewVal != SDValue()) { 5133 SDLoc DL(Op); 5134 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 5135 5136 if (IsDouble) 5137 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5138 5139 // It's a float: cast and extract a vector element. 5140 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5141 VecConstant); 5142 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5143 DAG.getConstant(0, DL, MVT::i32)); 5144 } 5145 5146 return SDValue(); 5147 } 5148 5149 // check if an VEXT instruction can handle the shuffle mask when the 5150 // vector sources of the shuffle are the same. 5151 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 5152 unsigned NumElts = VT.getVectorNumElements(); 5153 5154 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5155 if (M[0] < 0) 5156 return false; 5157 5158 Imm = M[0]; 5159 5160 // If this is a VEXT shuffle, the immediate value is the index of the first 5161 // element. The other shuffle indices must be the successive elements after 5162 // the first one. 5163 unsigned ExpectedElt = Imm; 5164 for (unsigned i = 1; i < NumElts; ++i) { 5165 // Increment the expected index. If it wraps around, just follow it 5166 // back to index zero and keep going. 5167 ++ExpectedElt; 5168 if (ExpectedElt == NumElts) 5169 ExpectedElt = 0; 5170 5171 if (M[i] < 0) continue; // ignore UNDEF indices 5172 if (ExpectedElt != static_cast<unsigned>(M[i])) 5173 return false; 5174 } 5175 5176 return true; 5177 } 5178 5179 5180 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 5181 bool &ReverseVEXT, unsigned &Imm) { 5182 unsigned NumElts = VT.getVectorNumElements(); 5183 ReverseVEXT = false; 5184 5185 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5186 if (M[0] < 0) 5187 return false; 5188 5189 Imm = M[0]; 5190 5191 // If this is a VEXT shuffle, the immediate value is the index of the first 5192 // element. The other shuffle indices must be the successive elements after 5193 // the first one. 5194 unsigned ExpectedElt = Imm; 5195 for (unsigned i = 1; i < NumElts; ++i) { 5196 // Increment the expected index. If it wraps around, it may still be 5197 // a VEXT but the source vectors must be swapped. 5198 ExpectedElt += 1; 5199 if (ExpectedElt == NumElts * 2) { 5200 ExpectedElt = 0; 5201 ReverseVEXT = true; 5202 } 5203 5204 if (M[i] < 0) continue; // ignore UNDEF indices 5205 if (ExpectedElt != static_cast<unsigned>(M[i])) 5206 return false; 5207 } 5208 5209 // Adjust the index value if the source operands will be swapped. 5210 if (ReverseVEXT) 5211 Imm -= NumElts; 5212 5213 return true; 5214 } 5215 5216 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 5217 /// instruction with the specified blocksize. (The order of the elements 5218 /// within each block of the vector is reversed.) 5219 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 5220 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 5221 "Only possible block sizes for VREV are: 16, 32, 64"); 5222 5223 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5224 if (EltSz == 64) 5225 return false; 5226 5227 unsigned NumElts = VT.getVectorNumElements(); 5228 unsigned BlockElts = M[0] + 1; 5229 // If the first shuffle index is UNDEF, be optimistic. 5230 if (M[0] < 0) 5231 BlockElts = BlockSize / EltSz; 5232 5233 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 5234 return false; 5235 5236 for (unsigned i = 0; i < NumElts; ++i) { 5237 if (M[i] < 0) continue; // ignore UNDEF indices 5238 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 5239 return false; 5240 } 5241 5242 return true; 5243 } 5244 5245 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 5246 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 5247 // range, then 0 is placed into the resulting vector. So pretty much any mask 5248 // of 8 elements can work here. 5249 return VT == MVT::v8i8 && M.size() == 8; 5250 } 5251 5252 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 5253 // checking that pairs of elements in the shuffle mask represent the same index 5254 // in each vector, incrementing the expected index by 2 at each step. 5255 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 5256 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 5257 // v2={e,f,g,h} 5258 // WhichResult gives the offset for each element in the mask based on which 5259 // of the two results it belongs to. 5260 // 5261 // The transpose can be represented either as: 5262 // result1 = shufflevector v1, v2, result1_shuffle_mask 5263 // result2 = shufflevector v1, v2, result2_shuffle_mask 5264 // where v1/v2 and the shuffle masks have the same number of elements 5265 // (here WhichResult (see below) indicates which result is being checked) 5266 // 5267 // or as: 5268 // results = shufflevector v1, v2, shuffle_mask 5269 // where both results are returned in one vector and the shuffle mask has twice 5270 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 5271 // want to check the low half and high half of the shuffle mask as if it were 5272 // the other case 5273 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5274 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5275 if (EltSz == 64) 5276 return false; 5277 5278 unsigned NumElts = VT.getVectorNumElements(); 5279 if (M.size() != NumElts && M.size() != NumElts*2) 5280 return false; 5281 5282 // If the mask is twice as long as the input vector then we need to check the 5283 // upper and lower parts of the mask with a matching value for WhichResult 5284 // FIXME: A mask with only even values will be rejected in case the first 5285 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 5286 // M[0] is used to determine WhichResult 5287 for (unsigned i = 0; i < M.size(); i += NumElts) { 5288 if (M.size() == NumElts * 2) 5289 WhichResult = i / NumElts; 5290 else 5291 WhichResult = M[i] == 0 ? 0 : 1; 5292 for (unsigned j = 0; j < NumElts; j += 2) { 5293 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5294 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 5295 return false; 5296 } 5297 } 5298 5299 if (M.size() == NumElts*2) 5300 WhichResult = 0; 5301 5302 return true; 5303 } 5304 5305 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 5306 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5307 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 5308 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5309 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5310 if (EltSz == 64) 5311 return false; 5312 5313 unsigned NumElts = VT.getVectorNumElements(); 5314 if (M.size() != NumElts && M.size() != NumElts*2) 5315 return false; 5316 5317 for (unsigned i = 0; i < M.size(); i += NumElts) { 5318 if (M.size() == NumElts * 2) 5319 WhichResult = i / NumElts; 5320 else 5321 WhichResult = M[i] == 0 ? 0 : 1; 5322 for (unsigned j = 0; j < NumElts; j += 2) { 5323 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5324 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 5325 return false; 5326 } 5327 } 5328 5329 if (M.size() == NumElts*2) 5330 WhichResult = 0; 5331 5332 return true; 5333 } 5334 5335 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 5336 // that the mask elements are either all even and in steps of size 2 or all odd 5337 // and in steps of size 2. 5338 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 5339 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 5340 // v2={e,f,g,h} 5341 // Requires similar checks to that of isVTRNMask with 5342 // respect the how results are returned. 5343 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5344 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5345 if (EltSz == 64) 5346 return false; 5347 5348 unsigned NumElts = VT.getVectorNumElements(); 5349 if (M.size() != NumElts && M.size() != NumElts*2) 5350 return false; 5351 5352 for (unsigned i = 0; i < M.size(); i += NumElts) { 5353 WhichResult = M[i] == 0 ? 0 : 1; 5354 for (unsigned j = 0; j < NumElts; ++j) { 5355 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 5356 return false; 5357 } 5358 } 5359 5360 if (M.size() == NumElts*2) 5361 WhichResult = 0; 5362 5363 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5364 if (VT.is64BitVector() && EltSz == 32) 5365 return false; 5366 5367 return true; 5368 } 5369 5370 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 5371 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5372 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 5373 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5374 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5375 if (EltSz == 64) 5376 return false; 5377 5378 unsigned NumElts = VT.getVectorNumElements(); 5379 if (M.size() != NumElts && M.size() != NumElts*2) 5380 return false; 5381 5382 unsigned Half = NumElts / 2; 5383 for (unsigned i = 0; i < M.size(); i += NumElts) { 5384 WhichResult = M[i] == 0 ? 0 : 1; 5385 for (unsigned j = 0; j < NumElts; j += Half) { 5386 unsigned Idx = WhichResult; 5387 for (unsigned k = 0; k < Half; ++k) { 5388 int MIdx = M[i + j + k]; 5389 if (MIdx >= 0 && (unsigned) MIdx != Idx) 5390 return false; 5391 Idx += 2; 5392 } 5393 } 5394 } 5395 5396 if (M.size() == NumElts*2) 5397 WhichResult = 0; 5398 5399 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5400 if (VT.is64BitVector() && EltSz == 32) 5401 return false; 5402 5403 return true; 5404 } 5405 5406 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 5407 // that pairs of elements of the shufflemask represent the same index in each 5408 // vector incrementing sequentially through the vectors. 5409 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 5410 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 5411 // v2={e,f,g,h} 5412 // Requires similar checks to that of isVTRNMask with respect the how results 5413 // are returned. 5414 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5415 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5416 if (EltSz == 64) 5417 return false; 5418 5419 unsigned NumElts = VT.getVectorNumElements(); 5420 if (M.size() != NumElts && M.size() != NumElts*2) 5421 return false; 5422 5423 for (unsigned i = 0; i < M.size(); i += NumElts) { 5424 WhichResult = M[i] == 0 ? 0 : 1; 5425 unsigned Idx = WhichResult * NumElts / 2; 5426 for (unsigned j = 0; j < NumElts; j += 2) { 5427 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5428 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 5429 return false; 5430 Idx += 1; 5431 } 5432 } 5433 5434 if (M.size() == NumElts*2) 5435 WhichResult = 0; 5436 5437 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5438 if (VT.is64BitVector() && EltSz == 32) 5439 return false; 5440 5441 return true; 5442 } 5443 5444 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 5445 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5446 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 5447 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5448 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 5449 if (EltSz == 64) 5450 return false; 5451 5452 unsigned NumElts = VT.getVectorNumElements(); 5453 if (M.size() != NumElts && M.size() != NumElts*2) 5454 return false; 5455 5456 for (unsigned i = 0; i < M.size(); i += NumElts) { 5457 WhichResult = M[i] == 0 ? 0 : 1; 5458 unsigned Idx = WhichResult * NumElts / 2; 5459 for (unsigned j = 0; j < NumElts; j += 2) { 5460 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5461 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 5462 return false; 5463 Idx += 1; 5464 } 5465 } 5466 5467 if (M.size() == NumElts*2) 5468 WhichResult = 0; 5469 5470 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5471 if (VT.is64BitVector() && EltSz == 32) 5472 return false; 5473 5474 return true; 5475 } 5476 5477 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 5478 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 5479 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 5480 unsigned &WhichResult, 5481 bool &isV_UNDEF) { 5482 isV_UNDEF = false; 5483 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5484 return ARMISD::VTRN; 5485 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5486 return ARMISD::VUZP; 5487 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5488 return ARMISD::VZIP; 5489 5490 isV_UNDEF = true; 5491 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5492 return ARMISD::VTRN; 5493 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5494 return ARMISD::VUZP; 5495 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5496 return ARMISD::VZIP; 5497 5498 return 0; 5499 } 5500 5501 /// \return true if this is a reverse operation on an vector. 5502 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 5503 unsigned NumElts = VT.getVectorNumElements(); 5504 // Make sure the mask has the right size. 5505 if (NumElts != M.size()) 5506 return false; 5507 5508 // Look for <15, ..., 3, -1, 1, 0>. 5509 for (unsigned i = 0; i != NumElts; ++i) 5510 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 5511 return false; 5512 5513 return true; 5514 } 5515 5516 // If N is an integer constant that can be moved into a register in one 5517 // instruction, return an SDValue of such a constant (will become a MOV 5518 // instruction). Otherwise return null. 5519 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 5520 const ARMSubtarget *ST, SDLoc dl) { 5521 uint64_t Val; 5522 if (!isa<ConstantSDNode>(N)) 5523 return SDValue(); 5524 Val = cast<ConstantSDNode>(N)->getZExtValue(); 5525 5526 if (ST->isThumb1Only()) { 5527 if (Val <= 255 || ~Val <= 255) 5528 return DAG.getConstant(Val, dl, MVT::i32); 5529 } else { 5530 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 5531 return DAG.getConstant(Val, dl, MVT::i32); 5532 } 5533 return SDValue(); 5534 } 5535 5536 // If this is a case we can't handle, return null and let the default 5537 // expansion code take care of it. 5538 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 5539 const ARMSubtarget *ST) const { 5540 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 5541 SDLoc dl(Op); 5542 EVT VT = Op.getValueType(); 5543 5544 APInt SplatBits, SplatUndef; 5545 unsigned SplatBitSize; 5546 bool HasAnyUndefs; 5547 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 5548 if (SplatBitSize <= 64) { 5549 // Check if an immediate VMOV works. 5550 EVT VmovVT; 5551 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 5552 SplatUndef.getZExtValue(), SplatBitSize, 5553 DAG, dl, VmovVT, VT.is128BitVector(), 5554 VMOVModImm); 5555 if (Val.getNode()) { 5556 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 5557 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 5558 } 5559 5560 // Try an immediate VMVN. 5561 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 5562 Val = isNEONModifiedImm(NegatedImm, 5563 SplatUndef.getZExtValue(), SplatBitSize, 5564 DAG, dl, VmovVT, VT.is128BitVector(), 5565 VMVNModImm); 5566 if (Val.getNode()) { 5567 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 5568 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 5569 } 5570 5571 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 5572 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 5573 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 5574 if (ImmVal != -1) { 5575 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 5576 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 5577 } 5578 } 5579 } 5580 } 5581 5582 // Scan through the operands to see if only one value is used. 5583 // 5584 // As an optimisation, even if more than one value is used it may be more 5585 // profitable to splat with one value then change some lanes. 5586 // 5587 // Heuristically we decide to do this if the vector has a "dominant" value, 5588 // defined as splatted to more than half of the lanes. 5589 unsigned NumElts = VT.getVectorNumElements(); 5590 bool isOnlyLowElement = true; 5591 bool usesOnlyOneValue = true; 5592 bool hasDominantValue = false; 5593 bool isConstant = true; 5594 5595 // Map of the number of times a particular SDValue appears in the 5596 // element list. 5597 DenseMap<SDValue, unsigned> ValueCounts; 5598 SDValue Value; 5599 for (unsigned i = 0; i < NumElts; ++i) { 5600 SDValue V = Op.getOperand(i); 5601 if (V.isUndef()) 5602 continue; 5603 if (i > 0) 5604 isOnlyLowElement = false; 5605 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 5606 isConstant = false; 5607 5608 ValueCounts.insert(std::make_pair(V, 0)); 5609 unsigned &Count = ValueCounts[V]; 5610 5611 // Is this value dominant? (takes up more than half of the lanes) 5612 if (++Count > (NumElts / 2)) { 5613 hasDominantValue = true; 5614 Value = V; 5615 } 5616 } 5617 if (ValueCounts.size() != 1) 5618 usesOnlyOneValue = false; 5619 if (!Value.getNode() && ValueCounts.size() > 0) 5620 Value = ValueCounts.begin()->first; 5621 5622 if (ValueCounts.size() == 0) 5623 return DAG.getUNDEF(VT); 5624 5625 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 5626 // Keep going if we are hitting this case. 5627 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 5628 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 5629 5630 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5631 5632 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 5633 // i32 and try again. 5634 if (hasDominantValue && EltSize <= 32) { 5635 if (!isConstant) { 5636 SDValue N; 5637 5638 // If we are VDUPing a value that comes directly from a vector, that will 5639 // cause an unnecessary move to and from a GPR, where instead we could 5640 // just use VDUPLANE. We can only do this if the lane being extracted 5641 // is at a constant index, as the VDUP from lane instructions only have 5642 // constant-index forms. 5643 ConstantSDNode *constIndex; 5644 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5645 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 5646 // We need to create a new undef vector to use for the VDUPLANE if the 5647 // size of the vector from which we get the value is different than the 5648 // size of the vector that we need to create. We will insert the element 5649 // such that the register coalescer will remove unnecessary copies. 5650 if (VT != Value->getOperand(0).getValueType()) { 5651 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 5652 VT.getVectorNumElements(); 5653 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5654 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 5655 Value, DAG.getConstant(index, dl, MVT::i32)), 5656 DAG.getConstant(index, dl, MVT::i32)); 5657 } else 5658 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5659 Value->getOperand(0), Value->getOperand(1)); 5660 } else 5661 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 5662 5663 if (!usesOnlyOneValue) { 5664 // The dominant value was splatted as 'N', but we now have to insert 5665 // all differing elements. 5666 for (unsigned I = 0; I < NumElts; ++I) { 5667 if (Op.getOperand(I) == Value) 5668 continue; 5669 SmallVector<SDValue, 3> Ops; 5670 Ops.push_back(N); 5671 Ops.push_back(Op.getOperand(I)); 5672 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 5673 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 5674 } 5675 } 5676 return N; 5677 } 5678 if (VT.getVectorElementType().isFloatingPoint()) { 5679 SmallVector<SDValue, 8> Ops; 5680 for (unsigned i = 0; i < NumElts; ++i) 5681 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 5682 Op.getOperand(i))); 5683 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 5684 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 5685 Val = LowerBUILD_VECTOR(Val, DAG, ST); 5686 if (Val.getNode()) 5687 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5688 } 5689 if (usesOnlyOneValue) { 5690 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 5691 if (isConstant && Val.getNode()) 5692 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 5693 } 5694 } 5695 5696 // If all elements are constants and the case above didn't get hit, fall back 5697 // to the default expansion, which will generate a load from the constant 5698 // pool. 5699 if (isConstant) 5700 return SDValue(); 5701 5702 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 5703 if (NumElts >= 4) { 5704 SDValue shuffle = ReconstructShuffle(Op, DAG); 5705 if (shuffle != SDValue()) 5706 return shuffle; 5707 } 5708 5709 // Vectors with 32- or 64-bit elements can be built by directly assigning 5710 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 5711 // will be legalized. 5712 if (EltSize >= 32) { 5713 // Do the expansion with floating-point types, since that is what the VFP 5714 // registers are defined to use, and since i64 is not legal. 5715 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5716 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5717 SmallVector<SDValue, 8> Ops; 5718 for (unsigned i = 0; i < NumElts; ++i) 5719 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 5720 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 5721 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5722 } 5723 5724 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 5725 // know the default expansion would otherwise fall back on something even 5726 // worse. For a vector with one or two non-undef values, that's 5727 // scalar_to_vector for the elements followed by a shuffle (provided the 5728 // shuffle is valid for the target) and materialization element by element 5729 // on the stack followed by a load for everything else. 5730 if (!isConstant && !usesOnlyOneValue) { 5731 SDValue Vec = DAG.getUNDEF(VT); 5732 for (unsigned i = 0 ; i < NumElts; ++i) { 5733 SDValue V = Op.getOperand(i); 5734 if (V.isUndef()) 5735 continue; 5736 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 5737 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 5738 } 5739 return Vec; 5740 } 5741 5742 return SDValue(); 5743 } 5744 5745 // Gather data to see if the operation can be modelled as a 5746 // shuffle in combination with VEXTs. 5747 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 5748 SelectionDAG &DAG) const { 5749 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 5750 SDLoc dl(Op); 5751 EVT VT = Op.getValueType(); 5752 unsigned NumElts = VT.getVectorNumElements(); 5753 5754 struct ShuffleSourceInfo { 5755 SDValue Vec; 5756 unsigned MinElt; 5757 unsigned MaxElt; 5758 5759 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 5760 // be compatible with the shuffle we intend to construct. As a result 5761 // ShuffleVec will be some sliding window into the original Vec. 5762 SDValue ShuffleVec; 5763 5764 // Code should guarantee that element i in Vec starts at element "WindowBase 5765 // + i * WindowScale in ShuffleVec". 5766 int WindowBase; 5767 int WindowScale; 5768 5769 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 5770 ShuffleSourceInfo(SDValue Vec) 5771 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0), 5772 WindowScale(1) {} 5773 }; 5774 5775 // First gather all vectors used as an immediate source for this BUILD_VECTOR 5776 // node. 5777 SmallVector<ShuffleSourceInfo, 2> Sources; 5778 for (unsigned i = 0; i < NumElts; ++i) { 5779 SDValue V = Op.getOperand(i); 5780 if (V.isUndef()) 5781 continue; 5782 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 5783 // A shuffle can only come from building a vector from various 5784 // elements of other vectors. 5785 return SDValue(); 5786 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 5787 // Furthermore, shuffles require a constant mask, whereas extractelts 5788 // accept variable indices. 5789 return SDValue(); 5790 } 5791 5792 // Add this element source to the list if it's not already there. 5793 SDValue SourceVec = V.getOperand(0); 5794 auto Source = std::find(Sources.begin(), Sources.end(), SourceVec); 5795 if (Source == Sources.end()) 5796 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 5797 5798 // Update the minimum and maximum lane number seen. 5799 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 5800 Source->MinElt = std::min(Source->MinElt, EltNo); 5801 Source->MaxElt = std::max(Source->MaxElt, EltNo); 5802 } 5803 5804 // Currently only do something sane when at most two source vectors 5805 // are involved. 5806 if (Sources.size() > 2) 5807 return SDValue(); 5808 5809 // Find out the smallest element size among result and two sources, and use 5810 // it as element size to build the shuffle_vector. 5811 EVT SmallestEltTy = VT.getVectorElementType(); 5812 for (auto &Source : Sources) { 5813 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 5814 if (SrcEltTy.bitsLT(SmallestEltTy)) 5815 SmallestEltTy = SrcEltTy; 5816 } 5817 unsigned ResMultiplier = 5818 VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits(); 5819 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5820 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 5821 5822 // If the source vector is too wide or too narrow, we may nevertheless be able 5823 // to construct a compatible shuffle either by concatenating it with UNDEF or 5824 // extracting a suitable range of elements. 5825 for (auto &Src : Sources) { 5826 EVT SrcVT = Src.ShuffleVec.getValueType(); 5827 5828 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 5829 continue; 5830 5831 // This stage of the search produces a source with the same element type as 5832 // the original, but with a total width matching the BUILD_VECTOR output. 5833 EVT EltVT = SrcVT.getVectorElementType(); 5834 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 5835 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 5836 5837 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 5838 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) 5839 return SDValue(); 5840 // We can pad out the smaller vector for free, so if it's part of a 5841 // shuffle... 5842 Src.ShuffleVec = 5843 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 5844 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 5845 continue; 5846 } 5847 5848 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) 5849 return SDValue(); 5850 5851 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 5852 // Span too large for a VEXT to cope 5853 return SDValue(); 5854 } 5855 5856 if (Src.MinElt >= NumSrcElts) { 5857 // The extraction can just take the second half 5858 Src.ShuffleVec = 5859 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5860 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 5861 Src.WindowBase = -NumSrcElts; 5862 } else if (Src.MaxElt < NumSrcElts) { 5863 // The extraction can just take the first half 5864 Src.ShuffleVec = 5865 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5866 DAG.getConstant(0, dl, MVT::i32)); 5867 } else { 5868 // An actual VEXT is needed 5869 SDValue VEXTSrc1 = 5870 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5871 DAG.getConstant(0, dl, MVT::i32)); 5872 SDValue VEXTSrc2 = 5873 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5874 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 5875 5876 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 5877 VEXTSrc2, 5878 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 5879 Src.WindowBase = -Src.MinElt; 5880 } 5881 } 5882 5883 // Another possible incompatibility occurs from the vector element types. We 5884 // can fix this by bitcasting the source vectors to the same type we intend 5885 // for the shuffle. 5886 for (auto &Src : Sources) { 5887 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 5888 if (SrcEltTy == SmallestEltTy) 5889 continue; 5890 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 5891 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); 5892 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5893 Src.WindowBase *= Src.WindowScale; 5894 } 5895 5896 // Final sanity check before we try to actually produce a shuffle. 5897 DEBUG( 5898 for (auto Src : Sources) 5899 assert(Src.ShuffleVec.getValueType() == ShuffleVT); 5900 ); 5901 5902 // The stars all align, our next step is to produce the mask for the shuffle. 5903 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 5904 int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits(); 5905 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 5906 SDValue Entry = Op.getOperand(i); 5907 if (Entry.isUndef()) 5908 continue; 5909 5910 auto Src = std::find(Sources.begin(), Sources.end(), Entry.getOperand(0)); 5911 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 5912 5913 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 5914 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 5915 // segment. 5916 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 5917 int BitsDefined = std::min(OrigEltTy.getSizeInBits(), 5918 VT.getVectorElementType().getSizeInBits()); 5919 int LanesDefined = BitsDefined / BitsPerShuffleLane; 5920 5921 // This source is expected to fill ResMultiplier lanes of the final shuffle, 5922 // starting at the appropriate offset. 5923 int *LaneMask = &Mask[i * ResMultiplier]; 5924 5925 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 5926 ExtractBase += NumElts * (Src - Sources.begin()); 5927 for (int j = 0; j < LanesDefined; ++j) 5928 LaneMask[j] = ExtractBase + j; 5929 } 5930 5931 // Final check before we try to produce nonsense... 5932 if (!isShuffleMaskLegal(Mask, ShuffleVT)) 5933 return SDValue(); 5934 5935 // We can't handle more than two sources. This should have already 5936 // been checked before this point. 5937 assert(Sources.size() <= 2 && "Too many sources!"); 5938 5939 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 5940 for (unsigned i = 0; i < Sources.size(); ++i) 5941 ShuffleOps[i] = Sources[i].ShuffleVec; 5942 5943 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 5944 ShuffleOps[1], &Mask[0]); 5945 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 5946 } 5947 5948 /// isShuffleMaskLegal - Targets can use this to indicate that they only 5949 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5950 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5951 /// are assumed to be legal. 5952 bool 5953 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 5954 EVT VT) const { 5955 if (VT.getVectorNumElements() == 4 && 5956 (VT.is128BitVector() || VT.is64BitVector())) { 5957 unsigned PFIndexes[4]; 5958 for (unsigned i = 0; i != 4; ++i) { 5959 if (M[i] < 0) 5960 PFIndexes[i] = 8; 5961 else 5962 PFIndexes[i] = M[i]; 5963 } 5964 5965 // Compute the index in the perfect shuffle table. 5966 unsigned PFTableIndex = 5967 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5968 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5969 unsigned Cost = (PFEntry >> 30); 5970 5971 if (Cost <= 4) 5972 return true; 5973 } 5974 5975 bool ReverseVEXT, isV_UNDEF; 5976 unsigned Imm, WhichResult; 5977 5978 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5979 return (EltSize >= 32 || 5980 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 5981 isVREVMask(M, VT, 64) || 5982 isVREVMask(M, VT, 32) || 5983 isVREVMask(M, VT, 16) || 5984 isVEXTMask(M, VT, ReverseVEXT, Imm) || 5985 isVTBLMask(M, VT) || 5986 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || 5987 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 5988 } 5989 5990 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5991 /// the specified operations to build the shuffle. 5992 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5993 SDValue RHS, SelectionDAG &DAG, 5994 SDLoc dl) { 5995 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5996 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5997 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5998 5999 enum { 6000 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6001 OP_VREV, 6002 OP_VDUP0, 6003 OP_VDUP1, 6004 OP_VDUP2, 6005 OP_VDUP3, 6006 OP_VEXT1, 6007 OP_VEXT2, 6008 OP_VEXT3, 6009 OP_VUZPL, // VUZP, left result 6010 OP_VUZPR, // VUZP, right result 6011 OP_VZIPL, // VZIP, left result 6012 OP_VZIPR, // VZIP, right result 6013 OP_VTRNL, // VTRN, left result 6014 OP_VTRNR // VTRN, right result 6015 }; 6016 6017 if (OpNum == OP_COPY) { 6018 if (LHSID == (1*9+2)*9+3) return LHS; 6019 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6020 return RHS; 6021 } 6022 6023 SDValue OpLHS, OpRHS; 6024 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6025 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6026 EVT VT = OpLHS.getValueType(); 6027 6028 switch (OpNum) { 6029 default: llvm_unreachable("Unknown shuffle opcode!"); 6030 case OP_VREV: 6031 // VREV divides the vector in half and swaps within the half. 6032 if (VT.getVectorElementType() == MVT::i32 || 6033 VT.getVectorElementType() == MVT::f32) 6034 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 6035 // vrev <4 x i16> -> VREV32 6036 if (VT.getVectorElementType() == MVT::i16) 6037 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 6038 // vrev <4 x i8> -> VREV16 6039 assert(VT.getVectorElementType() == MVT::i8); 6040 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 6041 case OP_VDUP0: 6042 case OP_VDUP1: 6043 case OP_VDUP2: 6044 case OP_VDUP3: 6045 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6046 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 6047 case OP_VEXT1: 6048 case OP_VEXT2: 6049 case OP_VEXT3: 6050 return DAG.getNode(ARMISD::VEXT, dl, VT, 6051 OpLHS, OpRHS, 6052 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 6053 case OP_VUZPL: 6054 case OP_VUZPR: 6055 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 6056 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 6057 case OP_VZIPL: 6058 case OP_VZIPR: 6059 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 6060 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 6061 case OP_VTRNL: 6062 case OP_VTRNR: 6063 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 6064 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 6065 } 6066 } 6067 6068 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 6069 ArrayRef<int> ShuffleMask, 6070 SelectionDAG &DAG) { 6071 // Check to see if we can use the VTBL instruction. 6072 SDValue V1 = Op.getOperand(0); 6073 SDValue V2 = Op.getOperand(1); 6074 SDLoc DL(Op); 6075 6076 SmallVector<SDValue, 8> VTBLMask; 6077 for (ArrayRef<int>::iterator 6078 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 6079 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 6080 6081 if (V2.getNode()->isUndef()) 6082 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 6083 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6084 6085 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 6086 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6087 } 6088 6089 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 6090 SelectionDAG &DAG) { 6091 SDLoc DL(Op); 6092 SDValue OpLHS = Op.getOperand(0); 6093 EVT VT = OpLHS.getValueType(); 6094 6095 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 6096 "Expect an v8i16/v16i8 type"); 6097 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 6098 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 6099 // extract the first 8 bytes into the top double word and the last 8 bytes 6100 // into the bottom double word. The v8i16 case is similar. 6101 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 6102 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 6103 DAG.getConstant(ExtractNum, DL, MVT::i32)); 6104 } 6105 6106 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 6107 SDValue V1 = Op.getOperand(0); 6108 SDValue V2 = Op.getOperand(1); 6109 SDLoc dl(Op); 6110 EVT VT = Op.getValueType(); 6111 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 6112 6113 // Convert shuffles that are directly supported on NEON to target-specific 6114 // DAG nodes, instead of keeping them as shuffles and matching them again 6115 // during code selection. This is more efficient and avoids the possibility 6116 // of inconsistencies between legalization and selection. 6117 // FIXME: floating-point vectors should be canonicalized to integer vectors 6118 // of the same time so that they get CSEd properly. 6119 ArrayRef<int> ShuffleMask = SVN->getMask(); 6120 6121 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 6122 if (EltSize <= 32) { 6123 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 6124 int Lane = SVN->getSplatIndex(); 6125 // If this is undef splat, generate it via "just" vdup, if possible. 6126 if (Lane == -1) Lane = 0; 6127 6128 // Test if V1 is a SCALAR_TO_VECTOR. 6129 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6130 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6131 } 6132 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 6133 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 6134 // reaches it). 6135 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 6136 !isa<ConstantSDNode>(V1.getOperand(0))) { 6137 bool IsScalarToVector = true; 6138 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 6139 if (!V1.getOperand(i).isUndef()) { 6140 IsScalarToVector = false; 6141 break; 6142 } 6143 if (IsScalarToVector) 6144 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6145 } 6146 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 6147 DAG.getConstant(Lane, dl, MVT::i32)); 6148 } 6149 6150 bool ReverseVEXT; 6151 unsigned Imm; 6152 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 6153 if (ReverseVEXT) 6154 std::swap(V1, V2); 6155 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 6156 DAG.getConstant(Imm, dl, MVT::i32)); 6157 } 6158 6159 if (isVREVMask(ShuffleMask, VT, 64)) 6160 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 6161 if (isVREVMask(ShuffleMask, VT, 32)) 6162 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 6163 if (isVREVMask(ShuffleMask, VT, 16)) 6164 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 6165 6166 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 6167 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 6168 DAG.getConstant(Imm, dl, MVT::i32)); 6169 } 6170 6171 // Check for Neon shuffles that modify both input vectors in place. 6172 // If both results are used, i.e., if there are two shuffles with the same 6173 // source operands and with masks corresponding to both results of one of 6174 // these operations, DAG memoization will ensure that a single node is 6175 // used for both shuffles. 6176 unsigned WhichResult; 6177 bool isV_UNDEF; 6178 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6179 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 6180 if (isV_UNDEF) 6181 V2 = V1; 6182 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 6183 .getValue(WhichResult); 6184 } 6185 6186 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 6187 // shuffles that produce a result larger than their operands with: 6188 // shuffle(concat(v1, undef), concat(v2, undef)) 6189 // -> 6190 // shuffle(concat(v1, v2), undef) 6191 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 6192 // 6193 // This is useful in the general case, but there are special cases where 6194 // native shuffles produce larger results: the two-result ops. 6195 // 6196 // Look through the concat when lowering them: 6197 // shuffle(concat(v1, v2), undef) 6198 // -> 6199 // concat(VZIP(v1, v2):0, :1) 6200 // 6201 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 6202 SDValue SubV1 = V1->getOperand(0); 6203 SDValue SubV2 = V1->getOperand(1); 6204 EVT SubVT = SubV1.getValueType(); 6205 6206 // We expect these to have been canonicalized to -1. 6207 assert(std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](int i) { 6208 return i < (int)VT.getVectorNumElements(); 6209 }) && "Unexpected shuffle index into UNDEF operand!"); 6210 6211 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6212 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 6213 if (isV_UNDEF) 6214 SubV2 = SubV1; 6215 assert((WhichResult == 0) && 6216 "In-place shuffle of concat can only have one result!"); 6217 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 6218 SubV1, SubV2); 6219 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 6220 Res.getValue(1)); 6221 } 6222 } 6223 } 6224 6225 // If the shuffle is not directly supported and it has 4 elements, use 6226 // the PerfectShuffle-generated table to synthesize it from other shuffles. 6227 unsigned NumElts = VT.getVectorNumElements(); 6228 if (NumElts == 4) { 6229 unsigned PFIndexes[4]; 6230 for (unsigned i = 0; i != 4; ++i) { 6231 if (ShuffleMask[i] < 0) 6232 PFIndexes[i] = 8; 6233 else 6234 PFIndexes[i] = ShuffleMask[i]; 6235 } 6236 6237 // Compute the index in the perfect shuffle table. 6238 unsigned PFTableIndex = 6239 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6240 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6241 unsigned Cost = (PFEntry >> 30); 6242 6243 if (Cost <= 4) 6244 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 6245 } 6246 6247 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 6248 if (EltSize >= 32) { 6249 // Do the expansion with floating-point types, since that is what the VFP 6250 // registers are defined to use, and since i64 is not legal. 6251 EVT EltVT = EVT::getFloatingPointVT(EltSize); 6252 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 6253 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 6254 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 6255 SmallVector<SDValue, 8> Ops; 6256 for (unsigned i = 0; i < NumElts; ++i) { 6257 if (ShuffleMask[i] < 0) 6258 Ops.push_back(DAG.getUNDEF(EltVT)); 6259 else 6260 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6261 ShuffleMask[i] < (int)NumElts ? V1 : V2, 6262 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 6263 dl, MVT::i32))); 6264 } 6265 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 6266 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6267 } 6268 6269 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 6270 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 6271 6272 if (VT == MVT::v8i8) 6273 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 6274 return NewOp; 6275 6276 return SDValue(); 6277 } 6278 6279 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6280 // INSERT_VECTOR_ELT is legal only for immediate indexes. 6281 SDValue Lane = Op.getOperand(2); 6282 if (!isa<ConstantSDNode>(Lane)) 6283 return SDValue(); 6284 6285 return Op; 6286 } 6287 6288 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6289 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 6290 SDValue Lane = Op.getOperand(1); 6291 if (!isa<ConstantSDNode>(Lane)) 6292 return SDValue(); 6293 6294 SDValue Vec = Op.getOperand(0); 6295 if (Op.getValueType() == MVT::i32 && 6296 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 6297 SDLoc dl(Op); 6298 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 6299 } 6300 6301 return Op; 6302 } 6303 6304 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 6305 // The only time a CONCAT_VECTORS operation can have legal types is when 6306 // two 64-bit vectors are concatenated to a 128-bit vector. 6307 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 6308 "unexpected CONCAT_VECTORS"); 6309 SDLoc dl(Op); 6310 SDValue Val = DAG.getUNDEF(MVT::v2f64); 6311 SDValue Op0 = Op.getOperand(0); 6312 SDValue Op1 = Op.getOperand(1); 6313 if (!Op0.isUndef()) 6314 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6315 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 6316 DAG.getIntPtrConstant(0, dl)); 6317 if (!Op1.isUndef()) 6318 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6319 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 6320 DAG.getIntPtrConstant(1, dl)); 6321 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 6322 } 6323 6324 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 6325 /// element has been zero/sign-extended, depending on the isSigned parameter, 6326 /// from an integer type half its size. 6327 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 6328 bool isSigned) { 6329 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 6330 EVT VT = N->getValueType(0); 6331 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 6332 SDNode *BVN = N->getOperand(0).getNode(); 6333 if (BVN->getValueType(0) != MVT::v4i32 || 6334 BVN->getOpcode() != ISD::BUILD_VECTOR) 6335 return false; 6336 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6337 unsigned HiElt = 1 - LoElt; 6338 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 6339 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 6340 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 6341 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 6342 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 6343 return false; 6344 if (isSigned) { 6345 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 6346 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 6347 return true; 6348 } else { 6349 if (Hi0->isNullValue() && Hi1->isNullValue()) 6350 return true; 6351 } 6352 return false; 6353 } 6354 6355 if (N->getOpcode() != ISD::BUILD_VECTOR) 6356 return false; 6357 6358 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 6359 SDNode *Elt = N->getOperand(i).getNode(); 6360 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 6361 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 6362 unsigned HalfSize = EltSize / 2; 6363 if (isSigned) { 6364 if (!isIntN(HalfSize, C->getSExtValue())) 6365 return false; 6366 } else { 6367 if (!isUIntN(HalfSize, C->getZExtValue())) 6368 return false; 6369 } 6370 continue; 6371 } 6372 return false; 6373 } 6374 6375 return true; 6376 } 6377 6378 /// isSignExtended - Check if a node is a vector value that is sign-extended 6379 /// or a constant BUILD_VECTOR with sign-extended elements. 6380 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 6381 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 6382 return true; 6383 if (isExtendedBUILD_VECTOR(N, DAG, true)) 6384 return true; 6385 return false; 6386 } 6387 6388 /// isZeroExtended - Check if a node is a vector value that is zero-extended 6389 /// or a constant BUILD_VECTOR with zero-extended elements. 6390 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 6391 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 6392 return true; 6393 if (isExtendedBUILD_VECTOR(N, DAG, false)) 6394 return true; 6395 return false; 6396 } 6397 6398 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 6399 if (OrigVT.getSizeInBits() >= 64) 6400 return OrigVT; 6401 6402 assert(OrigVT.isSimple() && "Expecting a simple value type"); 6403 6404 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 6405 switch (OrigSimpleTy) { 6406 default: llvm_unreachable("Unexpected Vector Type"); 6407 case MVT::v2i8: 6408 case MVT::v2i16: 6409 return MVT::v2i32; 6410 case MVT::v4i8: 6411 return MVT::v4i16; 6412 } 6413 } 6414 6415 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 6416 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 6417 /// We insert the required extension here to get the vector to fill a D register. 6418 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 6419 const EVT &OrigTy, 6420 const EVT &ExtTy, 6421 unsigned ExtOpcode) { 6422 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 6423 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 6424 // 64-bits we need to insert a new extension so that it will be 64-bits. 6425 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 6426 if (OrigTy.getSizeInBits() >= 64) 6427 return N; 6428 6429 // Must extend size to at least 64 bits to be used as an operand for VMULL. 6430 EVT NewVT = getExtensionTo64Bits(OrigTy); 6431 6432 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 6433 } 6434 6435 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 6436 /// does not do any sign/zero extension. If the original vector is less 6437 /// than 64 bits, an appropriate extension will be added after the load to 6438 /// reach a total size of 64 bits. We have to add the extension separately 6439 /// because ARM does not have a sign/zero extending load for vectors. 6440 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 6441 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 6442 6443 // The load already has the right type. 6444 if (ExtendedTy == LD->getMemoryVT()) 6445 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 6446 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 6447 LD->isNonTemporal(), LD->isInvariant(), 6448 LD->getAlignment()); 6449 6450 // We need to create a zextload/sextload. We cannot just create a load 6451 // followed by a zext/zext node because LowerMUL is also run during normal 6452 // operation legalization where we can't create illegal types. 6453 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 6454 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 6455 LD->getMemoryVT(), LD->isVolatile(), LD->isInvariant(), 6456 LD->isNonTemporal(), LD->getAlignment()); 6457 } 6458 6459 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 6460 /// extending load, or BUILD_VECTOR with extended elements, return the 6461 /// unextended value. The unextended vector should be 64 bits so that it can 6462 /// be used as an operand to a VMULL instruction. If the original vector size 6463 /// before extension is less than 64 bits we add a an extension to resize 6464 /// the vector to 64 bits. 6465 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 6466 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 6467 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 6468 N->getOperand(0)->getValueType(0), 6469 N->getValueType(0), 6470 N->getOpcode()); 6471 6472 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 6473 return SkipLoadExtensionForVMULL(LD, DAG); 6474 6475 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 6476 // have been legalized as a BITCAST from v4i32. 6477 if (N->getOpcode() == ISD::BITCAST) { 6478 SDNode *BVN = N->getOperand(0).getNode(); 6479 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 6480 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 6481 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6482 return DAG.getBuildVector( 6483 MVT::v2i32, SDLoc(N), 6484 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 6485 } 6486 // Construct a new BUILD_VECTOR with elements truncated to half the size. 6487 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 6488 EVT VT = N->getValueType(0); 6489 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 6490 unsigned NumElts = VT.getVectorNumElements(); 6491 MVT TruncVT = MVT::getIntegerVT(EltSize); 6492 SmallVector<SDValue, 8> Ops; 6493 SDLoc dl(N); 6494 for (unsigned i = 0; i != NumElts; ++i) { 6495 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 6496 const APInt &CInt = C->getAPIntValue(); 6497 // Element types smaller than 32 bits are not legal, so use i32 elements. 6498 // The values are implicitly truncated so sext vs. zext doesn't matter. 6499 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 6500 } 6501 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 6502 } 6503 6504 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 6505 unsigned Opcode = N->getOpcode(); 6506 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6507 SDNode *N0 = N->getOperand(0).getNode(); 6508 SDNode *N1 = N->getOperand(1).getNode(); 6509 return N0->hasOneUse() && N1->hasOneUse() && 6510 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 6511 } 6512 return false; 6513 } 6514 6515 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 6516 unsigned Opcode = N->getOpcode(); 6517 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6518 SDNode *N0 = N->getOperand(0).getNode(); 6519 SDNode *N1 = N->getOperand(1).getNode(); 6520 return N0->hasOneUse() && N1->hasOneUse() && 6521 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 6522 } 6523 return false; 6524 } 6525 6526 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 6527 // Multiplications are only custom-lowered for 128-bit vectors so that 6528 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 6529 EVT VT = Op.getValueType(); 6530 assert(VT.is128BitVector() && VT.isInteger() && 6531 "unexpected type for custom-lowering ISD::MUL"); 6532 SDNode *N0 = Op.getOperand(0).getNode(); 6533 SDNode *N1 = Op.getOperand(1).getNode(); 6534 unsigned NewOpc = 0; 6535 bool isMLA = false; 6536 bool isN0SExt = isSignExtended(N0, DAG); 6537 bool isN1SExt = isSignExtended(N1, DAG); 6538 if (isN0SExt && isN1SExt) 6539 NewOpc = ARMISD::VMULLs; 6540 else { 6541 bool isN0ZExt = isZeroExtended(N0, DAG); 6542 bool isN1ZExt = isZeroExtended(N1, DAG); 6543 if (isN0ZExt && isN1ZExt) 6544 NewOpc = ARMISD::VMULLu; 6545 else if (isN1SExt || isN1ZExt) { 6546 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 6547 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 6548 if (isN1SExt && isAddSubSExt(N0, DAG)) { 6549 NewOpc = ARMISD::VMULLs; 6550 isMLA = true; 6551 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 6552 NewOpc = ARMISD::VMULLu; 6553 isMLA = true; 6554 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 6555 std::swap(N0, N1); 6556 NewOpc = ARMISD::VMULLu; 6557 isMLA = true; 6558 } 6559 } 6560 6561 if (!NewOpc) { 6562 if (VT == MVT::v2i64) 6563 // Fall through to expand this. It is not legal. 6564 return SDValue(); 6565 else 6566 // Other vector multiplications are legal. 6567 return Op; 6568 } 6569 } 6570 6571 // Legalize to a VMULL instruction. 6572 SDLoc DL(Op); 6573 SDValue Op0; 6574 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 6575 if (!isMLA) { 6576 Op0 = SkipExtensionForVMULL(N0, DAG); 6577 assert(Op0.getValueType().is64BitVector() && 6578 Op1.getValueType().is64BitVector() && 6579 "unexpected types for extended operands to VMULL"); 6580 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 6581 } 6582 6583 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 6584 // isel lowering to take advantage of no-stall back to back vmul + vmla. 6585 // vmull q0, d4, d6 6586 // vmlal q0, d5, d6 6587 // is faster than 6588 // vaddl q0, d4, d5 6589 // vmovl q1, d6 6590 // vmul q0, q0, q1 6591 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 6592 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 6593 EVT Op1VT = Op1.getValueType(); 6594 return DAG.getNode(N0->getOpcode(), DL, VT, 6595 DAG.getNode(NewOpc, DL, VT, 6596 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 6597 DAG.getNode(NewOpc, DL, VT, 6598 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 6599 } 6600 6601 static SDValue 6602 LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) { 6603 // TODO: Should this propagate fast-math-flags? 6604 6605 // Convert to float 6606 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 6607 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 6608 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 6609 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 6610 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 6611 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 6612 // Get reciprocal estimate. 6613 // float4 recip = vrecpeq_f32(yf); 6614 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6615 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6616 Y); 6617 // Because char has a smaller range than uchar, we can actually get away 6618 // without any newton steps. This requires that we use a weird bias 6619 // of 0xb000, however (again, this has been exhaustively tested). 6620 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 6621 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 6622 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 6623 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 6624 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 6625 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 6626 // Convert back to short. 6627 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 6628 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 6629 return X; 6630 } 6631 6632 static SDValue 6633 LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) { 6634 // TODO: Should this propagate fast-math-flags? 6635 6636 SDValue N2; 6637 // Convert to float. 6638 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 6639 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 6640 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 6641 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 6642 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 6643 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 6644 6645 // Use reciprocal estimate and one refinement step. 6646 // float4 recip = vrecpeq_f32(yf); 6647 // recip *= vrecpsq_f32(yf, recip); 6648 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6649 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6650 N1); 6651 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6652 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6653 N1, N2); 6654 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6655 // Because short has a smaller range than ushort, we can actually get away 6656 // with only a single newton step. This requires that we use a weird bias 6657 // of 89, however (again, this has been exhaustively tested). 6658 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 6659 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 6660 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 6661 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 6662 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 6663 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 6664 // Convert back to integer and return. 6665 // return vmovn_s32(vcvt_s32_f32(result)); 6666 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 6667 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 6668 return N0; 6669 } 6670 6671 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 6672 EVT VT = Op.getValueType(); 6673 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 6674 "unexpected type for custom-lowering ISD::SDIV"); 6675 6676 SDLoc dl(Op); 6677 SDValue N0 = Op.getOperand(0); 6678 SDValue N1 = Op.getOperand(1); 6679 SDValue N2, N3; 6680 6681 if (VT == MVT::v8i8) { 6682 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 6683 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 6684 6685 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6686 DAG.getIntPtrConstant(4, dl)); 6687 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6688 DAG.getIntPtrConstant(4, dl)); 6689 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6690 DAG.getIntPtrConstant(0, dl)); 6691 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6692 DAG.getIntPtrConstant(0, dl)); 6693 6694 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 6695 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 6696 6697 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 6698 N0 = LowerCONCAT_VECTORS(N0, DAG); 6699 6700 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 6701 return N0; 6702 } 6703 return LowerSDIV_v4i16(N0, N1, dl, DAG); 6704 } 6705 6706 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 6707 // TODO: Should this propagate fast-math-flags? 6708 EVT VT = Op.getValueType(); 6709 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 6710 "unexpected type for custom-lowering ISD::UDIV"); 6711 6712 SDLoc dl(Op); 6713 SDValue N0 = Op.getOperand(0); 6714 SDValue N1 = Op.getOperand(1); 6715 SDValue N2, N3; 6716 6717 if (VT == MVT::v8i8) { 6718 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 6719 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 6720 6721 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6722 DAG.getIntPtrConstant(4, dl)); 6723 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6724 DAG.getIntPtrConstant(4, dl)); 6725 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6726 DAG.getIntPtrConstant(0, dl)); 6727 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6728 DAG.getIntPtrConstant(0, dl)); 6729 6730 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 6731 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 6732 6733 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 6734 N0 = LowerCONCAT_VECTORS(N0, DAG); 6735 6736 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 6737 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 6738 MVT::i32), 6739 N0); 6740 return N0; 6741 } 6742 6743 // v4i16 sdiv ... Convert to float. 6744 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 6745 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 6746 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 6747 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 6748 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 6749 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 6750 6751 // Use reciprocal estimate and two refinement steps. 6752 // float4 recip = vrecpeq_f32(yf); 6753 // recip *= vrecpsq_f32(yf, recip); 6754 // recip *= vrecpsq_f32(yf, recip); 6755 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6756 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 6757 BN1); 6758 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6759 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6760 BN1, N2); 6761 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6762 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6763 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 6764 BN1, N2); 6765 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6766 // Simply multiplying by the reciprocal estimate can leave us a few ulps 6767 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 6768 // and that it will never cause us to return an answer too large). 6769 // float4 result = as_float4(as_int4(xf*recip) + 2); 6770 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 6771 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 6772 N1 = DAG.getConstant(2, dl, MVT::v4i32); 6773 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 6774 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 6775 // Convert back to integer and return. 6776 // return vmovn_u32(vcvt_s32_f32(result)); 6777 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 6778 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 6779 return N0; 6780 } 6781 6782 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 6783 EVT VT = Op.getNode()->getValueType(0); 6784 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 6785 6786 unsigned Opc; 6787 bool ExtraOp = false; 6788 switch (Op.getOpcode()) { 6789 default: llvm_unreachable("Invalid code"); 6790 case ISD::ADDC: Opc = ARMISD::ADDC; break; 6791 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 6792 case ISD::SUBC: Opc = ARMISD::SUBC; break; 6793 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 6794 } 6795 6796 if (!ExtraOp) 6797 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6798 Op.getOperand(1)); 6799 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6800 Op.getOperand(1), Op.getOperand(2)); 6801 } 6802 6803 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 6804 assert(Subtarget->isTargetDarwin()); 6805 6806 // For iOS, we want to call an alternative entry point: __sincos_stret, 6807 // return values are passed via sret. 6808 SDLoc dl(Op); 6809 SDValue Arg = Op.getOperand(0); 6810 EVT ArgVT = Arg.getValueType(); 6811 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 6812 auto PtrVT = getPointerTy(DAG.getDataLayout()); 6813 6814 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6815 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6816 6817 // Pair of floats / doubles used to pass the result. 6818 Type *RetTy = StructType::get(ArgTy, ArgTy, nullptr); 6819 auto &DL = DAG.getDataLayout(); 6820 6821 ArgListTy Args; 6822 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 6823 SDValue SRet; 6824 if (ShouldUseSRet) { 6825 // Create stack object for sret. 6826 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 6827 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); 6828 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 6829 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 6830 6831 ArgListEntry Entry; 6832 Entry.Node = SRet; 6833 Entry.Ty = RetTy->getPointerTo(); 6834 Entry.isSExt = false; 6835 Entry.isZExt = false; 6836 Entry.isSRet = true; 6837 Args.push_back(Entry); 6838 RetTy = Type::getVoidTy(*DAG.getContext()); 6839 } 6840 6841 ArgListEntry Entry; 6842 Entry.Node = Arg; 6843 Entry.Ty = ArgTy; 6844 Entry.isSExt = false; 6845 Entry.isZExt = false; 6846 Args.push_back(Entry); 6847 6848 const char *LibcallName = 6849 (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret"; 6850 RTLIB::Libcall LC = 6851 (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32; 6852 CallingConv::ID CC = getLibcallCallingConv(LC); 6853 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 6854 6855 TargetLowering::CallLoweringInfo CLI(DAG); 6856 CLI.setDebugLoc(dl) 6857 .setChain(DAG.getEntryNode()) 6858 .setCallee(CC, RetTy, Callee, std::move(Args), 0) 6859 .setDiscardResult(ShouldUseSRet); 6860 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 6861 6862 if (!ShouldUseSRet) 6863 return CallResult.first; 6864 6865 SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet, 6866 MachinePointerInfo(), false, false, false, 0); 6867 6868 // Address of cos field. 6869 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 6870 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 6871 SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, 6872 MachinePointerInfo(), false, false, false, 0); 6873 6874 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 6875 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 6876 LoadSin.getValue(0), LoadCos.getValue(0)); 6877 } 6878 6879 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 6880 bool Signed, 6881 SDValue &Chain) const { 6882 EVT VT = Op.getValueType(); 6883 assert((VT == MVT::i32 || VT == MVT::i64) && 6884 "unexpected type for custom lowering DIV"); 6885 SDLoc dl(Op); 6886 6887 const auto &DL = DAG.getDataLayout(); 6888 const auto &TLI = DAG.getTargetLoweringInfo(); 6889 6890 const char *Name = nullptr; 6891 if (Signed) 6892 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 6893 else 6894 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 6895 6896 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 6897 6898 ARMTargetLowering::ArgListTy Args; 6899 6900 for (auto AI : {1, 0}) { 6901 ArgListEntry Arg; 6902 Arg.Node = Op.getOperand(AI); 6903 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 6904 Args.push_back(Arg); 6905 } 6906 6907 CallLoweringInfo CLI(DAG); 6908 CLI.setDebugLoc(dl) 6909 .setChain(Chain) 6910 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 6911 ES, std::move(Args), 0); 6912 6913 return LowerCallTo(CLI).first; 6914 } 6915 6916 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 6917 bool Signed) const { 6918 assert(Op.getValueType() == MVT::i32 && 6919 "unexpected type for custom lowering DIV"); 6920 SDLoc dl(Op); 6921 6922 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 6923 DAG.getEntryNode(), Op.getOperand(1)); 6924 6925 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 6926 } 6927 6928 void ARMTargetLowering::ExpandDIV_Windows( 6929 SDValue Op, SelectionDAG &DAG, bool Signed, 6930 SmallVectorImpl<SDValue> &Results) const { 6931 const auto &DL = DAG.getDataLayout(); 6932 const auto &TLI = DAG.getTargetLoweringInfo(); 6933 6934 assert(Op.getValueType() == MVT::i64 && 6935 "unexpected type for custom lowering DIV"); 6936 SDLoc dl(Op); 6937 6938 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 6939 DAG.getConstant(0, dl, MVT::i32)); 6940 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 6941 DAG.getConstant(1, dl, MVT::i32)); 6942 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, Lo, Hi); 6943 6944 SDValue DBZCHK = 6945 DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, DAG.getEntryNode(), Or); 6946 6947 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 6948 6949 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 6950 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 6951 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 6952 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 6953 6954 Results.push_back(Lower); 6955 Results.push_back(Upper); 6956 } 6957 6958 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 6959 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 6960 // Acquire/Release load/store is not legal for targets without a dmb or 6961 // equivalent available. 6962 return SDValue(); 6963 6964 // Monotonic load/store is legal for all targets. 6965 return Op; 6966 } 6967 6968 static void ReplaceREADCYCLECOUNTER(SDNode *N, 6969 SmallVectorImpl<SDValue> &Results, 6970 SelectionDAG &DAG, 6971 const ARMSubtarget *Subtarget) { 6972 SDLoc DL(N); 6973 // Under Power Management extensions, the cycle-count is: 6974 // mrc p15, #0, <Rt>, c9, c13, #0 6975 SDValue Ops[] = { N->getOperand(0), // Chain 6976 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 6977 DAG.getConstant(15, DL, MVT::i32), 6978 DAG.getConstant(0, DL, MVT::i32), 6979 DAG.getConstant(9, DL, MVT::i32), 6980 DAG.getConstant(13, DL, MVT::i32), 6981 DAG.getConstant(0, DL, MVT::i32) 6982 }; 6983 6984 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 6985 DAG.getVTList(MVT::i32, MVT::Other), Ops); 6986 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 6987 DAG.getConstant(0, DL, MVT::i32))); 6988 Results.push_back(Cycles32.getValue(1)); 6989 } 6990 6991 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 6992 SDLoc dl(V.getNode()); 6993 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 6994 SDValue VHi = DAG.getAnyExtOrTrunc( 6995 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 6996 dl, MVT::i32); 6997 SDValue RegClass = 6998 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 6999 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 7000 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 7001 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 7002 return SDValue( 7003 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 7004 } 7005 7006 static void ReplaceCMP_SWAP_64Results(SDNode *N, 7007 SmallVectorImpl<SDValue> & Results, 7008 SelectionDAG &DAG) { 7009 assert(N->getValueType(0) == MVT::i64 && 7010 "AtomicCmpSwap on types less than 64 should be legal"); 7011 SDValue Ops[] = {N->getOperand(1), 7012 createGPRPairNode(DAG, N->getOperand(2)), 7013 createGPRPairNode(DAG, N->getOperand(3)), 7014 N->getOperand(0)}; 7015 SDNode *CmpSwap = DAG.getMachineNode( 7016 ARM::CMP_SWAP_64, SDLoc(N), 7017 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 7018 7019 MachineFunction &MF = DAG.getMachineFunction(); 7020 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); 7021 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 7022 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); 7023 7024 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32, 7025 SDValue(CmpSwap, 0))); 7026 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32, 7027 SDValue(CmpSwap, 0))); 7028 Results.push_back(SDValue(CmpSwap, 2)); 7029 } 7030 7031 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7032 switch (Op.getOpcode()) { 7033 default: llvm_unreachable("Don't know how to custom lower this!"); 7034 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 7035 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7036 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7037 case ISD::GlobalAddress: 7038 switch (Subtarget->getTargetTriple().getObjectFormat()) { 7039 default: llvm_unreachable("unknown object format"); 7040 case Triple::COFF: 7041 return LowerGlobalAddressWindows(Op, DAG); 7042 case Triple::ELF: 7043 return LowerGlobalAddressELF(Op, DAG); 7044 case Triple::MachO: 7045 return LowerGlobalAddressDarwin(Op, DAG); 7046 } 7047 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7048 case ISD::SELECT: return LowerSELECT(Op, DAG); 7049 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7050 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 7051 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 7052 case ISD::VASTART: return LowerVASTART(Op, DAG); 7053 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 7054 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 7055 case ISD::SINT_TO_FP: 7056 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7057 case ISD::FP_TO_SINT: 7058 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 7059 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 7060 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7061 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7062 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 7063 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 7064 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 7065 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 7066 Subtarget); 7067 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 7068 case ISD::SHL: 7069 case ISD::SRL: 7070 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 7071 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 7072 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 7073 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 7074 case ISD::SRL_PARTS: 7075 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 7076 case ISD::CTTZ: 7077 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 7078 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 7079 case ISD::SETCC: return LowerVSETCC(Op, DAG); 7080 case ISD::SETCCE: return LowerSETCCE(Op, DAG); 7081 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 7082 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 7083 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7084 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 7085 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7086 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 7087 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7088 case ISD::MUL: return LowerMUL(Op, DAG); 7089 case ISD::SDIV: 7090 if (Subtarget->isTargetWindows()) 7091 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 7092 return LowerSDIV(Op, DAG); 7093 case ISD::UDIV: 7094 if (Subtarget->isTargetWindows()) 7095 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 7096 return LowerUDIV(Op, DAG); 7097 case ISD::ADDC: 7098 case ISD::ADDE: 7099 case ISD::SUBC: 7100 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 7101 case ISD::SADDO: 7102 case ISD::UADDO: 7103 case ISD::SSUBO: 7104 case ISD::USUBO: 7105 return LowerXALUO(Op, DAG); 7106 case ISD::ATOMIC_LOAD: 7107 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 7108 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 7109 case ISD::SDIVREM: 7110 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 7111 case ISD::DYNAMIC_STACKALLOC: 7112 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 7113 return LowerDYNAMIC_STACKALLOC(Op, DAG); 7114 llvm_unreachable("Don't know how to custom lower this!"); 7115 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 7116 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 7117 case ARMISD::WIN__DBZCHK: return SDValue(); 7118 } 7119 } 7120 7121 /// ReplaceNodeResults - Replace the results of node with an illegal result 7122 /// type with new values built out of custom code. 7123 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 7124 SmallVectorImpl<SDValue> &Results, 7125 SelectionDAG &DAG) const { 7126 SDValue Res; 7127 switch (N->getOpcode()) { 7128 default: 7129 llvm_unreachable("Don't know how to custom expand this!"); 7130 case ISD::READ_REGISTER: 7131 ExpandREAD_REGISTER(N, Results, DAG); 7132 break; 7133 case ISD::BITCAST: 7134 Res = ExpandBITCAST(N, DAG); 7135 break; 7136 case ISD::SRL: 7137 case ISD::SRA: 7138 Res = Expand64BitShift(N, DAG, Subtarget); 7139 break; 7140 case ISD::SREM: 7141 case ISD::UREM: 7142 Res = LowerREM(N, DAG); 7143 break; 7144 case ISD::SDIVREM: 7145 case ISD::UDIVREM: 7146 Res = LowerDivRem(SDValue(N, 0), DAG); 7147 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 7148 Results.push_back(Res.getValue(0)); 7149 Results.push_back(Res.getValue(1)); 7150 return; 7151 case ISD::READCYCLECOUNTER: 7152 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 7153 return; 7154 case ISD::UDIV: 7155 case ISD::SDIV: 7156 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 7157 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 7158 Results); 7159 case ISD::ATOMIC_CMP_SWAP: 7160 ReplaceCMP_SWAP_64Results(N, Results, DAG); 7161 return; 7162 } 7163 if (Res.getNode()) 7164 Results.push_back(Res); 7165 } 7166 7167 //===----------------------------------------------------------------------===// 7168 // ARM Scheduler Hooks 7169 //===----------------------------------------------------------------------===// 7170 7171 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 7172 /// registers the function context. 7173 void ARMTargetLowering:: 7174 SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 7175 MachineBasicBlock *DispatchBB, int FI) const { 7176 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7177 DebugLoc dl = MI->getDebugLoc(); 7178 MachineFunction *MF = MBB->getParent(); 7179 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7180 MachineConstantPool *MCP = MF->getConstantPool(); 7181 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 7182 const Function *F = MF->getFunction(); 7183 7184 bool isThumb = Subtarget->isThumb(); 7185 bool isThumb2 = Subtarget->isThumb2(); 7186 7187 unsigned PCLabelId = AFI->createPICLabelUId(); 7188 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 7189 ARMConstantPoolValue *CPV = 7190 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 7191 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 7192 7193 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 7194 : &ARM::GPRRegClass; 7195 7196 // Grab constant pool and fixed stack memory operands. 7197 MachineMemOperand *CPMMO = 7198 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 7199 MachineMemOperand::MOLoad, 4, 4); 7200 7201 MachineMemOperand *FIMMOSt = 7202 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 7203 MachineMemOperand::MOStore, 4, 4); 7204 7205 // Load the address of the dispatch MBB into the jump buffer. 7206 if (isThumb2) { 7207 // Incoming value: jbuf 7208 // ldr.n r5, LCPI1_1 7209 // orr r5, r5, #1 7210 // add r5, pc 7211 // str r5, [$jbuf, #+4] ; &jbuf[1] 7212 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7213 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 7214 .addConstantPoolIndex(CPI) 7215 .addMemOperand(CPMMO)); 7216 // Set the low bit because of thumb mode. 7217 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7218 AddDefaultCC( 7219 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 7220 .addReg(NewVReg1, RegState::Kill) 7221 .addImm(0x01))); 7222 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7223 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 7224 .addReg(NewVReg2, RegState::Kill) 7225 .addImm(PCLabelId); 7226 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 7227 .addReg(NewVReg3, RegState::Kill) 7228 .addFrameIndex(FI) 7229 .addImm(36) // &jbuf[1] :: pc 7230 .addMemOperand(FIMMOSt)); 7231 } else if (isThumb) { 7232 // Incoming value: jbuf 7233 // ldr.n r1, LCPI1_4 7234 // add r1, pc 7235 // mov r2, #1 7236 // orrs r1, r2 7237 // add r2, $jbuf, #+4 ; &jbuf[1] 7238 // str r1, [r2] 7239 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7240 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 7241 .addConstantPoolIndex(CPI) 7242 .addMemOperand(CPMMO)); 7243 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7244 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 7245 .addReg(NewVReg1, RegState::Kill) 7246 .addImm(PCLabelId); 7247 // Set the low bit because of thumb mode. 7248 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7249 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 7250 .addReg(ARM::CPSR, RegState::Define) 7251 .addImm(1)); 7252 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7253 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 7254 .addReg(ARM::CPSR, RegState::Define) 7255 .addReg(NewVReg2, RegState::Kill) 7256 .addReg(NewVReg3, RegState::Kill)); 7257 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7258 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 7259 .addFrameIndex(FI) 7260 .addImm(36); // &jbuf[1] :: pc 7261 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 7262 .addReg(NewVReg4, RegState::Kill) 7263 .addReg(NewVReg5, RegState::Kill) 7264 .addImm(0) 7265 .addMemOperand(FIMMOSt)); 7266 } else { 7267 // Incoming value: jbuf 7268 // ldr r1, LCPI1_1 7269 // add r1, pc, r1 7270 // str r1, [$jbuf, #+4] ; &jbuf[1] 7271 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7272 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 7273 .addConstantPoolIndex(CPI) 7274 .addImm(0) 7275 .addMemOperand(CPMMO)); 7276 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7277 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 7278 .addReg(NewVReg1, RegState::Kill) 7279 .addImm(PCLabelId)); 7280 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 7281 .addReg(NewVReg2, RegState::Kill) 7282 .addFrameIndex(FI) 7283 .addImm(36) // &jbuf[1] :: pc 7284 .addMemOperand(FIMMOSt)); 7285 } 7286 } 7287 7288 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI, 7289 MachineBasicBlock *MBB) const { 7290 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7291 DebugLoc dl = MI->getDebugLoc(); 7292 MachineFunction *MF = MBB->getParent(); 7293 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7294 MachineFrameInfo *MFI = MF->getFrameInfo(); 7295 int FI = MFI->getFunctionContextIndex(); 7296 7297 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 7298 : &ARM::GPRnopcRegClass; 7299 7300 // Get a mapping of the call site numbers to all of the landing pads they're 7301 // associated with. 7302 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 7303 unsigned MaxCSNum = 0; 7304 MachineModuleInfo &MMI = MF->getMMI(); 7305 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 7306 ++BB) { 7307 if (!BB->isEHPad()) continue; 7308 7309 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 7310 // pad. 7311 for (MachineBasicBlock::iterator 7312 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 7313 if (!II->isEHLabel()) continue; 7314 7315 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 7316 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 7317 7318 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 7319 for (SmallVectorImpl<unsigned>::iterator 7320 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 7321 CSI != CSE; ++CSI) { 7322 CallSiteNumToLPad[*CSI].push_back(&*BB); 7323 MaxCSNum = std::max(MaxCSNum, *CSI); 7324 } 7325 break; 7326 } 7327 } 7328 7329 // Get an ordered list of the machine basic blocks for the jump table. 7330 std::vector<MachineBasicBlock*> LPadList; 7331 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 7332 LPadList.reserve(CallSiteNumToLPad.size()); 7333 for (unsigned I = 1; I <= MaxCSNum; ++I) { 7334 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 7335 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7336 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 7337 LPadList.push_back(*II); 7338 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 7339 } 7340 } 7341 7342 assert(!LPadList.empty() && 7343 "No landing pad destinations for the dispatch jump table!"); 7344 7345 // Create the jump table and associated information. 7346 MachineJumpTableInfo *JTI = 7347 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 7348 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 7349 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 7350 7351 // Create the MBBs for the dispatch code. 7352 7353 // Shove the dispatch's address into the return slot in the function context. 7354 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 7355 DispatchBB->setIsEHPad(); 7356 7357 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 7358 unsigned trap_opcode; 7359 if (Subtarget->isThumb()) 7360 trap_opcode = ARM::tTRAP; 7361 else 7362 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 7363 7364 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 7365 DispatchBB->addSuccessor(TrapBB); 7366 7367 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 7368 DispatchBB->addSuccessor(DispContBB); 7369 7370 // Insert and MBBs. 7371 MF->insert(MF->end(), DispatchBB); 7372 MF->insert(MF->end(), DispContBB); 7373 MF->insert(MF->end(), TrapBB); 7374 7375 // Insert code into the entry block that creates and registers the function 7376 // context. 7377 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 7378 7379 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 7380 MachinePointerInfo::getFixedStack(*MF, FI), 7381 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); 7382 7383 MachineInstrBuilder MIB; 7384 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 7385 7386 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 7387 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 7388 7389 // Add a register mask with no preserved registers. This results in all 7390 // registers being marked as clobbered. 7391 MIB.addRegMask(RI.getNoPreservedMask()); 7392 7393 unsigned NumLPads = LPadList.size(); 7394 if (Subtarget->isThumb2()) { 7395 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7396 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 7397 .addFrameIndex(FI) 7398 .addImm(4) 7399 .addMemOperand(FIMMOLd)); 7400 7401 if (NumLPads < 256) { 7402 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 7403 .addReg(NewVReg1) 7404 .addImm(LPadList.size())); 7405 } else { 7406 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7407 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 7408 .addImm(NumLPads & 0xFFFF)); 7409 7410 unsigned VReg2 = VReg1; 7411 if ((NumLPads & 0xFFFF0000) != 0) { 7412 VReg2 = MRI->createVirtualRegister(TRC); 7413 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 7414 .addReg(VReg1) 7415 .addImm(NumLPads >> 16)); 7416 } 7417 7418 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 7419 .addReg(NewVReg1) 7420 .addReg(VReg2)); 7421 } 7422 7423 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 7424 .addMBB(TrapBB) 7425 .addImm(ARMCC::HI) 7426 .addReg(ARM::CPSR); 7427 7428 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7429 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 7430 .addJumpTableIndex(MJTI)); 7431 7432 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7433 AddDefaultCC( 7434 AddDefaultPred( 7435 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 7436 .addReg(NewVReg3, RegState::Kill) 7437 .addReg(NewVReg1) 7438 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7439 7440 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 7441 .addReg(NewVReg4, RegState::Kill) 7442 .addReg(NewVReg1) 7443 .addJumpTableIndex(MJTI); 7444 } else if (Subtarget->isThumb()) { 7445 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7446 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 7447 .addFrameIndex(FI) 7448 .addImm(1) 7449 .addMemOperand(FIMMOLd)); 7450 7451 if (NumLPads < 256) { 7452 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 7453 .addReg(NewVReg1) 7454 .addImm(NumLPads)); 7455 } else { 7456 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7457 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7458 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7459 7460 // MachineConstantPool wants an explicit alignment. 7461 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7462 if (Align == 0) 7463 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7464 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7465 7466 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7467 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 7468 .addReg(VReg1, RegState::Define) 7469 .addConstantPoolIndex(Idx)); 7470 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 7471 .addReg(NewVReg1) 7472 .addReg(VReg1)); 7473 } 7474 7475 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 7476 .addMBB(TrapBB) 7477 .addImm(ARMCC::HI) 7478 .addReg(ARM::CPSR); 7479 7480 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7481 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 7482 .addReg(ARM::CPSR, RegState::Define) 7483 .addReg(NewVReg1) 7484 .addImm(2)); 7485 7486 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7487 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 7488 .addJumpTableIndex(MJTI)); 7489 7490 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7491 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 7492 .addReg(ARM::CPSR, RegState::Define) 7493 .addReg(NewVReg2, RegState::Kill) 7494 .addReg(NewVReg3)); 7495 7496 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 7497 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 7498 7499 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7500 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 7501 .addReg(NewVReg4, RegState::Kill) 7502 .addImm(0) 7503 .addMemOperand(JTMMOLd)); 7504 7505 unsigned NewVReg6 = NewVReg5; 7506 if (RelocM == Reloc::PIC_) { 7507 NewVReg6 = MRI->createVirtualRegister(TRC); 7508 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 7509 .addReg(ARM::CPSR, RegState::Define) 7510 .addReg(NewVReg5, RegState::Kill) 7511 .addReg(NewVReg3)); 7512 } 7513 7514 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 7515 .addReg(NewVReg6, RegState::Kill) 7516 .addJumpTableIndex(MJTI); 7517 } else { 7518 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7519 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 7520 .addFrameIndex(FI) 7521 .addImm(4) 7522 .addMemOperand(FIMMOLd)); 7523 7524 if (NumLPads < 256) { 7525 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 7526 .addReg(NewVReg1) 7527 .addImm(NumLPads)); 7528 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 7529 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7530 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 7531 .addImm(NumLPads & 0xFFFF)); 7532 7533 unsigned VReg2 = VReg1; 7534 if ((NumLPads & 0xFFFF0000) != 0) { 7535 VReg2 = MRI->createVirtualRegister(TRC); 7536 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 7537 .addReg(VReg1) 7538 .addImm(NumLPads >> 16)); 7539 } 7540 7541 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7542 .addReg(NewVReg1) 7543 .addReg(VReg2)); 7544 } else { 7545 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7546 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7547 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7548 7549 // MachineConstantPool wants an explicit alignment. 7550 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7551 if (Align == 0) 7552 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7553 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7554 7555 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7556 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 7557 .addReg(VReg1, RegState::Define) 7558 .addConstantPoolIndex(Idx) 7559 .addImm(0)); 7560 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7561 .addReg(NewVReg1) 7562 .addReg(VReg1, RegState::Kill)); 7563 } 7564 7565 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 7566 .addMBB(TrapBB) 7567 .addImm(ARMCC::HI) 7568 .addReg(ARM::CPSR); 7569 7570 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7571 AddDefaultCC( 7572 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 7573 .addReg(NewVReg1) 7574 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7575 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7576 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 7577 .addJumpTableIndex(MJTI)); 7578 7579 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 7580 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 7581 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7582 AddDefaultPred( 7583 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 7584 .addReg(NewVReg3, RegState::Kill) 7585 .addReg(NewVReg4) 7586 .addImm(0) 7587 .addMemOperand(JTMMOLd)); 7588 7589 if (RelocM == Reloc::PIC_) { 7590 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 7591 .addReg(NewVReg5, RegState::Kill) 7592 .addReg(NewVReg4) 7593 .addJumpTableIndex(MJTI); 7594 } else { 7595 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 7596 .addReg(NewVReg5, RegState::Kill) 7597 .addJumpTableIndex(MJTI); 7598 } 7599 } 7600 7601 // Add the jump table entries as successors to the MBB. 7602 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 7603 for (std::vector<MachineBasicBlock*>::iterator 7604 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 7605 MachineBasicBlock *CurMBB = *I; 7606 if (SeenMBBs.insert(CurMBB).second) 7607 DispContBB->addSuccessor(CurMBB); 7608 } 7609 7610 // N.B. the order the invoke BBs are processed in doesn't matter here. 7611 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 7612 SmallVector<MachineBasicBlock*, 64> MBBLPads; 7613 for (MachineBasicBlock *BB : InvokeBBs) { 7614 7615 // Remove the landing pad successor from the invoke block and replace it 7616 // with the new dispatch block. 7617 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 7618 BB->succ_end()); 7619 while (!Successors.empty()) { 7620 MachineBasicBlock *SMBB = Successors.pop_back_val(); 7621 if (SMBB->isEHPad()) { 7622 BB->removeSuccessor(SMBB); 7623 MBBLPads.push_back(SMBB); 7624 } 7625 } 7626 7627 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 7628 BB->normalizeSuccProbs(); 7629 7630 // Find the invoke call and mark all of the callee-saved registers as 7631 // 'implicit defined' so that they're spilled. This prevents code from 7632 // moving instructions to before the EH block, where they will never be 7633 // executed. 7634 for (MachineBasicBlock::reverse_iterator 7635 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 7636 if (!II->isCall()) continue; 7637 7638 DenseMap<unsigned, bool> DefRegs; 7639 for (MachineInstr::mop_iterator 7640 OI = II->operands_begin(), OE = II->operands_end(); 7641 OI != OE; ++OI) { 7642 if (!OI->isReg()) continue; 7643 DefRegs[OI->getReg()] = true; 7644 } 7645 7646 MachineInstrBuilder MIB(*MF, &*II); 7647 7648 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 7649 unsigned Reg = SavedRegs[i]; 7650 if (Subtarget->isThumb2() && 7651 !ARM::tGPRRegClass.contains(Reg) && 7652 !ARM::hGPRRegClass.contains(Reg)) 7653 continue; 7654 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 7655 continue; 7656 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 7657 continue; 7658 if (!DefRegs[Reg]) 7659 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 7660 } 7661 7662 break; 7663 } 7664 } 7665 7666 // Mark all former landing pads as non-landing pads. The dispatch is the only 7667 // landing pad now. 7668 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7669 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 7670 (*I)->setIsEHPad(false); 7671 7672 // The instruction is gone now. 7673 MI->eraseFromParent(); 7674 } 7675 7676 static 7677 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 7678 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 7679 E = MBB->succ_end(); I != E; ++I) 7680 if (*I != Succ) 7681 return *I; 7682 llvm_unreachable("Expecting a BB with two successors!"); 7683 } 7684 7685 /// Return the load opcode for a given load size. If load size >= 8, 7686 /// neon opcode will be returned. 7687 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 7688 if (LdSize >= 8) 7689 return LdSize == 16 ? ARM::VLD1q32wb_fixed 7690 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 7691 if (IsThumb1) 7692 return LdSize == 4 ? ARM::tLDRi 7693 : LdSize == 2 ? ARM::tLDRHi 7694 : LdSize == 1 ? ARM::tLDRBi : 0; 7695 if (IsThumb2) 7696 return LdSize == 4 ? ARM::t2LDR_POST 7697 : LdSize == 2 ? ARM::t2LDRH_POST 7698 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 7699 return LdSize == 4 ? ARM::LDR_POST_IMM 7700 : LdSize == 2 ? ARM::LDRH_POST 7701 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 7702 } 7703 7704 /// Return the store opcode for a given store size. If store size >= 8, 7705 /// neon opcode will be returned. 7706 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 7707 if (StSize >= 8) 7708 return StSize == 16 ? ARM::VST1q32wb_fixed 7709 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 7710 if (IsThumb1) 7711 return StSize == 4 ? ARM::tSTRi 7712 : StSize == 2 ? ARM::tSTRHi 7713 : StSize == 1 ? ARM::tSTRBi : 0; 7714 if (IsThumb2) 7715 return StSize == 4 ? ARM::t2STR_POST 7716 : StSize == 2 ? ARM::t2STRH_POST 7717 : StSize == 1 ? ARM::t2STRB_POST : 0; 7718 return StSize == 4 ? ARM::STR_POST_IMM 7719 : StSize == 2 ? ARM::STRH_POST 7720 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 7721 } 7722 7723 /// Emit a post-increment load operation with given size. The instructions 7724 /// will be added to BB at Pos. 7725 static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos, 7726 const TargetInstrInfo *TII, DebugLoc dl, 7727 unsigned LdSize, unsigned Data, unsigned AddrIn, 7728 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 7729 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 7730 assert(LdOpc != 0 && "Should have a load opcode"); 7731 if (LdSize >= 8) { 7732 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7733 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7734 .addImm(0)); 7735 } else if (IsThumb1) { 7736 // load + update AddrIn 7737 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7738 .addReg(AddrIn).addImm(0)); 7739 MachineInstrBuilder MIB = 7740 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 7741 MIB = AddDefaultT1CC(MIB); 7742 MIB.addReg(AddrIn).addImm(LdSize); 7743 AddDefaultPred(MIB); 7744 } else if (IsThumb2) { 7745 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7746 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7747 .addImm(LdSize)); 7748 } else { // arm 7749 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 7750 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 7751 .addReg(0).addImm(LdSize)); 7752 } 7753 } 7754 7755 /// Emit a post-increment store operation with given size. The instructions 7756 /// will be added to BB at Pos. 7757 static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos, 7758 const TargetInstrInfo *TII, DebugLoc dl, 7759 unsigned StSize, unsigned Data, unsigned AddrIn, 7760 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 7761 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 7762 assert(StOpc != 0 && "Should have a store opcode"); 7763 if (StSize >= 8) { 7764 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7765 .addReg(AddrIn).addImm(0).addReg(Data)); 7766 } else if (IsThumb1) { 7767 // store + update AddrIn 7768 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data) 7769 .addReg(AddrIn).addImm(0)); 7770 MachineInstrBuilder MIB = 7771 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 7772 MIB = AddDefaultT1CC(MIB); 7773 MIB.addReg(AddrIn).addImm(StSize); 7774 AddDefaultPred(MIB); 7775 } else if (IsThumb2) { 7776 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7777 .addReg(Data).addReg(AddrIn).addImm(StSize)); 7778 } else { // arm 7779 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 7780 .addReg(Data).addReg(AddrIn).addReg(0) 7781 .addImm(StSize)); 7782 } 7783 } 7784 7785 MachineBasicBlock * 7786 ARMTargetLowering::EmitStructByval(MachineInstr *MI, 7787 MachineBasicBlock *BB) const { 7788 // This pseudo instruction has 3 operands: dst, src, size 7789 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 7790 // Otherwise, we will generate unrolled scalar copies. 7791 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7792 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7793 MachineFunction::iterator It = ++BB->getIterator(); 7794 7795 unsigned dest = MI->getOperand(0).getReg(); 7796 unsigned src = MI->getOperand(1).getReg(); 7797 unsigned SizeVal = MI->getOperand(2).getImm(); 7798 unsigned Align = MI->getOperand(3).getImm(); 7799 DebugLoc dl = MI->getDebugLoc(); 7800 7801 MachineFunction *MF = BB->getParent(); 7802 MachineRegisterInfo &MRI = MF->getRegInfo(); 7803 unsigned UnitSize = 0; 7804 const TargetRegisterClass *TRC = nullptr; 7805 const TargetRegisterClass *VecTRC = nullptr; 7806 7807 bool IsThumb1 = Subtarget->isThumb1Only(); 7808 bool IsThumb2 = Subtarget->isThumb2(); 7809 7810 if (Align & 1) { 7811 UnitSize = 1; 7812 } else if (Align & 2) { 7813 UnitSize = 2; 7814 } else { 7815 // Check whether we can use NEON instructions. 7816 if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) && 7817 Subtarget->hasNEON()) { 7818 if ((Align % 16 == 0) && SizeVal >= 16) 7819 UnitSize = 16; 7820 else if ((Align % 8 == 0) && SizeVal >= 8) 7821 UnitSize = 8; 7822 } 7823 // Can't use NEON instructions. 7824 if (UnitSize == 0) 7825 UnitSize = 4; 7826 } 7827 7828 // Select the correct opcode and register class for unit size load/store 7829 bool IsNeon = UnitSize >= 8; 7830 TRC = (IsThumb1 || IsThumb2) ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 7831 if (IsNeon) 7832 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 7833 : UnitSize == 8 ? &ARM::DPRRegClass 7834 : nullptr; 7835 7836 unsigned BytesLeft = SizeVal % UnitSize; 7837 unsigned LoopSize = SizeVal - BytesLeft; 7838 7839 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 7840 // Use LDR and STR to copy. 7841 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 7842 // [destOut] = STR_POST(scratch, destIn, UnitSize) 7843 unsigned srcIn = src; 7844 unsigned destIn = dest; 7845 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 7846 unsigned srcOut = MRI.createVirtualRegister(TRC); 7847 unsigned destOut = MRI.createVirtualRegister(TRC); 7848 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 7849 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 7850 IsThumb1, IsThumb2); 7851 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 7852 IsThumb1, IsThumb2); 7853 srcIn = srcOut; 7854 destIn = destOut; 7855 } 7856 7857 // Handle the leftover bytes with LDRB and STRB. 7858 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 7859 // [destOut] = STRB_POST(scratch, destIn, 1) 7860 for (unsigned i = 0; i < BytesLeft; i++) { 7861 unsigned srcOut = MRI.createVirtualRegister(TRC); 7862 unsigned destOut = MRI.createVirtualRegister(TRC); 7863 unsigned scratch = MRI.createVirtualRegister(TRC); 7864 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 7865 IsThumb1, IsThumb2); 7866 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 7867 IsThumb1, IsThumb2); 7868 srcIn = srcOut; 7869 destIn = destOut; 7870 } 7871 MI->eraseFromParent(); // The instruction is gone now. 7872 return BB; 7873 } 7874 7875 // Expand the pseudo op to a loop. 7876 // thisMBB: 7877 // ... 7878 // movw varEnd, # --> with thumb2 7879 // movt varEnd, # 7880 // ldrcp varEnd, idx --> without thumb2 7881 // fallthrough --> loopMBB 7882 // loopMBB: 7883 // PHI varPhi, varEnd, varLoop 7884 // PHI srcPhi, src, srcLoop 7885 // PHI destPhi, dst, destLoop 7886 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7887 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 7888 // subs varLoop, varPhi, #UnitSize 7889 // bne loopMBB 7890 // fallthrough --> exitMBB 7891 // exitMBB: 7892 // epilogue to handle left-over bytes 7893 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7894 // [destOut] = STRB_POST(scratch, destLoop, 1) 7895 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7896 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7897 MF->insert(It, loopMBB); 7898 MF->insert(It, exitMBB); 7899 7900 // Transfer the remainder of BB and its successor edges to exitMBB. 7901 exitMBB->splice(exitMBB->begin(), BB, 7902 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7903 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7904 7905 // Load an immediate to varEnd. 7906 unsigned varEnd = MRI.createVirtualRegister(TRC); 7907 if (Subtarget->useMovt(*MF)) { 7908 unsigned Vtmp = varEnd; 7909 if ((LoopSize & 0xFFFF0000) != 0) 7910 Vtmp = MRI.createVirtualRegister(TRC); 7911 AddDefaultPred(BuildMI(BB, dl, 7912 TII->get(IsThumb2 ? ARM::t2MOVi16 : ARM::MOVi16), 7913 Vtmp).addImm(LoopSize & 0xFFFF)); 7914 7915 if ((LoopSize & 0xFFFF0000) != 0) 7916 AddDefaultPred(BuildMI(BB, dl, 7917 TII->get(IsThumb2 ? ARM::t2MOVTi16 : ARM::MOVTi16), 7918 varEnd) 7919 .addReg(Vtmp) 7920 .addImm(LoopSize >> 16)); 7921 } else { 7922 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7923 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7924 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 7925 7926 // MachineConstantPool wants an explicit alignment. 7927 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7928 if (Align == 0) 7929 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7930 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7931 7932 if (IsThumb1) 7933 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg( 7934 varEnd, RegState::Define).addConstantPoolIndex(Idx)); 7935 else 7936 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg( 7937 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0)); 7938 } 7939 BB->addSuccessor(loopMBB); 7940 7941 // Generate the loop body: 7942 // varPhi = PHI(varLoop, varEnd) 7943 // srcPhi = PHI(srcLoop, src) 7944 // destPhi = PHI(destLoop, dst) 7945 MachineBasicBlock *entryBB = BB; 7946 BB = loopMBB; 7947 unsigned varLoop = MRI.createVirtualRegister(TRC); 7948 unsigned varPhi = MRI.createVirtualRegister(TRC); 7949 unsigned srcLoop = MRI.createVirtualRegister(TRC); 7950 unsigned srcPhi = MRI.createVirtualRegister(TRC); 7951 unsigned destLoop = MRI.createVirtualRegister(TRC); 7952 unsigned destPhi = MRI.createVirtualRegister(TRC); 7953 7954 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 7955 .addReg(varLoop).addMBB(loopMBB) 7956 .addReg(varEnd).addMBB(entryBB); 7957 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 7958 .addReg(srcLoop).addMBB(loopMBB) 7959 .addReg(src).addMBB(entryBB); 7960 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 7961 .addReg(destLoop).addMBB(loopMBB) 7962 .addReg(dest).addMBB(entryBB); 7963 7964 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7965 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 7966 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 7967 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 7968 IsThumb1, IsThumb2); 7969 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 7970 IsThumb1, IsThumb2); 7971 7972 // Decrement loop variable by UnitSize. 7973 if (IsThumb1) { 7974 MachineInstrBuilder MIB = 7975 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop); 7976 MIB = AddDefaultT1CC(MIB); 7977 MIB.addReg(varPhi).addImm(UnitSize); 7978 AddDefaultPred(MIB); 7979 } else { 7980 MachineInstrBuilder MIB = 7981 BuildMI(*BB, BB->end(), dl, 7982 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 7983 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 7984 MIB->getOperand(5).setReg(ARM::CPSR); 7985 MIB->getOperand(5).setIsDef(true); 7986 } 7987 BuildMI(*BB, BB->end(), dl, 7988 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7989 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 7990 7991 // loopMBB can loop back to loopMBB or fall through to exitMBB. 7992 BB->addSuccessor(loopMBB); 7993 BB->addSuccessor(exitMBB); 7994 7995 // Add epilogue to handle BytesLeft. 7996 BB = exitMBB; 7997 MachineInstr *StartOfExit = exitMBB->begin(); 7998 7999 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 8000 // [destOut] = STRB_POST(scratch, destLoop, 1) 8001 unsigned srcIn = srcLoop; 8002 unsigned destIn = destLoop; 8003 for (unsigned i = 0; i < BytesLeft; i++) { 8004 unsigned srcOut = MRI.createVirtualRegister(TRC); 8005 unsigned destOut = MRI.createVirtualRegister(TRC); 8006 unsigned scratch = MRI.createVirtualRegister(TRC); 8007 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 8008 IsThumb1, IsThumb2); 8009 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 8010 IsThumb1, IsThumb2); 8011 srcIn = srcOut; 8012 destIn = destOut; 8013 } 8014 8015 MI->eraseFromParent(); // The instruction is gone now. 8016 return BB; 8017 } 8018 8019 MachineBasicBlock * 8020 ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI, 8021 MachineBasicBlock *MBB) const { 8022 const TargetMachine &TM = getTargetMachine(); 8023 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 8024 DebugLoc DL = MI->getDebugLoc(); 8025 8026 assert(Subtarget->isTargetWindows() && 8027 "__chkstk is only supported on Windows"); 8028 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 8029 8030 // __chkstk takes the number of words to allocate on the stack in R4, and 8031 // returns the stack adjustment in number of bytes in R4. This will not 8032 // clober any other registers (other than the obvious lr). 8033 // 8034 // Although, technically, IP should be considered a register which may be 8035 // clobbered, the call itself will not touch it. Windows on ARM is a pure 8036 // thumb-2 environment, so there is no interworking required. As a result, we 8037 // do not expect a veneer to be emitted by the linker, clobbering IP. 8038 // 8039 // Each module receives its own copy of __chkstk, so no import thunk is 8040 // required, again, ensuring that IP is not clobbered. 8041 // 8042 // Finally, although some linkers may theoretically provide a trampoline for 8043 // out of range calls (which is quite common due to a 32M range limitation of 8044 // branches for Thumb), we can generate the long-call version via 8045 // -mcmodel=large, alleviating the need for the trampoline which may clobber 8046 // IP. 8047 8048 switch (TM.getCodeModel()) { 8049 case CodeModel::Small: 8050 case CodeModel::Medium: 8051 case CodeModel::Default: 8052 case CodeModel::Kernel: 8053 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 8054 .addImm((unsigned)ARMCC::AL).addReg(0) 8055 .addExternalSymbol("__chkstk") 8056 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8057 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8058 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8059 break; 8060 case CodeModel::Large: 8061 case CodeModel::JITDefault: { 8062 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 8063 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 8064 8065 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 8066 .addExternalSymbol("__chkstk"); 8067 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 8068 .addImm((unsigned)ARMCC::AL).addReg(0) 8069 .addReg(Reg, RegState::Kill) 8070 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8071 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8072 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8073 break; 8074 } 8075 } 8076 8077 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), 8078 ARM::SP) 8079 .addReg(ARM::SP, RegState::Kill) 8080 .addReg(ARM::R4, RegState::Kill) 8081 .setMIFlags(MachineInstr::FrameSetup))); 8082 8083 MI->eraseFromParent(); 8084 return MBB; 8085 } 8086 8087 MachineBasicBlock * 8088 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr *MI, 8089 MachineBasicBlock *MBB) const { 8090 DebugLoc DL = MI->getDebugLoc(); 8091 MachineFunction *MF = MBB->getParent(); 8092 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8093 8094 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 8095 MF->insert(++MBB->getIterator(), ContBB); 8096 ContBB->splice(ContBB->begin(), MBB, 8097 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8098 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 8099 8100 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 8101 MF->push_back(TrapBB); 8102 BuildMI(TrapBB, DL, TII->get(ARM::t2UDF)).addImm(249); 8103 MBB->addSuccessor(TrapBB); 8104 8105 BuildMI(*MBB, MI, DL, TII->get(ARM::tCBZ)) 8106 .addReg(MI->getOperand(0).getReg()) 8107 .addMBB(TrapBB); 8108 AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::t2B)).addMBB(ContBB)); 8109 MBB->addSuccessor(ContBB); 8110 8111 MI->eraseFromParent(); 8112 return ContBB; 8113 } 8114 8115 MachineBasicBlock * 8116 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 8117 MachineBasicBlock *BB) const { 8118 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8119 DebugLoc dl = MI->getDebugLoc(); 8120 bool isThumb2 = Subtarget->isThumb2(); 8121 switch (MI->getOpcode()) { 8122 default: { 8123 MI->dump(); 8124 llvm_unreachable("Unexpected instr type to insert"); 8125 } 8126 // The Thumb2 pre-indexed stores have the same MI operands, they just 8127 // define them differently in the .td files from the isel patterns, so 8128 // they need pseudos. 8129 case ARM::t2STR_preidx: 8130 MI->setDesc(TII->get(ARM::t2STR_PRE)); 8131 return BB; 8132 case ARM::t2STRB_preidx: 8133 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 8134 return BB; 8135 case ARM::t2STRH_preidx: 8136 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 8137 return BB; 8138 8139 case ARM::STRi_preidx: 8140 case ARM::STRBi_preidx: { 8141 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 8142 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 8143 // Decode the offset. 8144 unsigned Offset = MI->getOperand(4).getImm(); 8145 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 8146 Offset = ARM_AM::getAM2Offset(Offset); 8147 if (isSub) 8148 Offset = -Offset; 8149 8150 MachineMemOperand *MMO = *MI->memoperands_begin(); 8151 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 8152 .addOperand(MI->getOperand(0)) // Rn_wb 8153 .addOperand(MI->getOperand(1)) // Rt 8154 .addOperand(MI->getOperand(2)) // Rn 8155 .addImm(Offset) // offset (skip GPR==zero_reg) 8156 .addOperand(MI->getOperand(5)) // pred 8157 .addOperand(MI->getOperand(6)) 8158 .addMemOperand(MMO); 8159 MI->eraseFromParent(); 8160 return BB; 8161 } 8162 case ARM::STRr_preidx: 8163 case ARM::STRBr_preidx: 8164 case ARM::STRH_preidx: { 8165 unsigned NewOpc; 8166 switch (MI->getOpcode()) { 8167 default: llvm_unreachable("unexpected opcode!"); 8168 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 8169 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 8170 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 8171 } 8172 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 8173 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 8174 MIB.addOperand(MI->getOperand(i)); 8175 MI->eraseFromParent(); 8176 return BB; 8177 } 8178 8179 case ARM::tMOVCCr_pseudo: { 8180 // To "insert" a SELECT_CC instruction, we actually have to insert the 8181 // diamond control-flow pattern. The incoming instruction knows the 8182 // destination vreg to set, the condition code register to branch on, the 8183 // true/false values to select between, and a branch opcode to use. 8184 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8185 MachineFunction::iterator It = ++BB->getIterator(); 8186 8187 // thisMBB: 8188 // ... 8189 // TrueVal = ... 8190 // cmpTY ccX, r1, r2 8191 // bCC copy1MBB 8192 // fallthrough --> copy0MBB 8193 MachineBasicBlock *thisMBB = BB; 8194 MachineFunction *F = BB->getParent(); 8195 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8196 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8197 F->insert(It, copy0MBB); 8198 F->insert(It, sinkMBB); 8199 8200 // Transfer the remainder of BB and its successor edges to sinkMBB. 8201 sinkMBB->splice(sinkMBB->begin(), BB, 8202 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8203 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8204 8205 BB->addSuccessor(copy0MBB); 8206 BB->addSuccessor(sinkMBB); 8207 8208 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 8209 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 8210 8211 // copy0MBB: 8212 // %FalseValue = ... 8213 // # fallthrough to sinkMBB 8214 BB = copy0MBB; 8215 8216 // Update machine-CFG edges 8217 BB->addSuccessor(sinkMBB); 8218 8219 // sinkMBB: 8220 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8221 // ... 8222 BB = sinkMBB; 8223 BuildMI(*BB, BB->begin(), dl, 8224 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 8225 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 8226 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 8227 8228 MI->eraseFromParent(); // The pseudo instruction is gone now. 8229 return BB; 8230 } 8231 8232 case ARM::BCCi64: 8233 case ARM::BCCZi64: { 8234 // If there is an unconditional branch to the other successor, remove it. 8235 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8236 8237 // Compare both parts that make up the double comparison separately for 8238 // equality. 8239 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 8240 8241 unsigned LHS1 = MI->getOperand(1).getReg(); 8242 unsigned LHS2 = MI->getOperand(2).getReg(); 8243 if (RHSisZero) { 8244 AddDefaultPred(BuildMI(BB, dl, 8245 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8246 .addReg(LHS1).addImm(0)); 8247 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8248 .addReg(LHS2).addImm(0) 8249 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8250 } else { 8251 unsigned RHS1 = MI->getOperand(3).getReg(); 8252 unsigned RHS2 = MI->getOperand(4).getReg(); 8253 AddDefaultPred(BuildMI(BB, dl, 8254 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8255 .addReg(LHS1).addReg(RHS1)); 8256 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8257 .addReg(LHS2).addReg(RHS2) 8258 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8259 } 8260 8261 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 8262 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 8263 if (MI->getOperand(0).getImm() == ARMCC::NE) 8264 std::swap(destMBB, exitMBB); 8265 8266 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 8267 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 8268 if (isThumb2) 8269 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 8270 else 8271 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 8272 8273 MI->eraseFromParent(); // The pseudo instruction is gone now. 8274 return BB; 8275 } 8276 8277 case ARM::Int_eh_sjlj_setjmp: 8278 case ARM::Int_eh_sjlj_setjmp_nofp: 8279 case ARM::tInt_eh_sjlj_setjmp: 8280 case ARM::t2Int_eh_sjlj_setjmp: 8281 case ARM::t2Int_eh_sjlj_setjmp_nofp: 8282 return BB; 8283 8284 case ARM::Int_eh_sjlj_setup_dispatch: 8285 EmitSjLjDispatchBlock(MI, BB); 8286 return BB; 8287 8288 case ARM::ABS: 8289 case ARM::t2ABS: { 8290 // To insert an ABS instruction, we have to insert the 8291 // diamond control-flow pattern. The incoming instruction knows the 8292 // source vreg to test against 0, the destination vreg to set, 8293 // the condition code register to branch on, the 8294 // true/false values to select between, and a branch opcode to use. 8295 // It transforms 8296 // V1 = ABS V0 8297 // into 8298 // V2 = MOVS V0 8299 // BCC (branch to SinkBB if V0 >= 0) 8300 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 8301 // SinkBB: V1 = PHI(V2, V3) 8302 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8303 MachineFunction::iterator BBI = ++BB->getIterator(); 8304 MachineFunction *Fn = BB->getParent(); 8305 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8306 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8307 Fn->insert(BBI, RSBBB); 8308 Fn->insert(BBI, SinkBB); 8309 8310 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 8311 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 8312 bool ABSSrcKIll = MI->getOperand(1).isKill(); 8313 bool isThumb2 = Subtarget->isThumb2(); 8314 MachineRegisterInfo &MRI = Fn->getRegInfo(); 8315 // In Thumb mode S must not be specified if source register is the SP or 8316 // PC and if destination register is the SP, so restrict register class 8317 unsigned NewRsbDstReg = 8318 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 8319 8320 // Transfer the remainder of BB and its successor edges to sinkMBB. 8321 SinkBB->splice(SinkBB->begin(), BB, 8322 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8323 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 8324 8325 BB->addSuccessor(RSBBB); 8326 BB->addSuccessor(SinkBB); 8327 8328 // fall through to SinkMBB 8329 RSBBB->addSuccessor(SinkBB); 8330 8331 // insert a cmp at the end of BB 8332 AddDefaultPred(BuildMI(BB, dl, 8333 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8334 .addReg(ABSSrcReg).addImm(0)); 8335 8336 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 8337 BuildMI(BB, dl, 8338 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 8339 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 8340 8341 // insert rsbri in RSBBB 8342 // Note: BCC and rsbri will be converted into predicated rsbmi 8343 // by if-conversion pass 8344 BuildMI(*RSBBB, RSBBB->begin(), dl, 8345 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 8346 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 8347 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 8348 8349 // insert PHI in SinkBB, 8350 // reuse ABSDstReg to not change uses of ABS instruction 8351 BuildMI(*SinkBB, SinkBB->begin(), dl, 8352 TII->get(ARM::PHI), ABSDstReg) 8353 .addReg(NewRsbDstReg).addMBB(RSBBB) 8354 .addReg(ABSSrcReg).addMBB(BB); 8355 8356 // remove ABS instruction 8357 MI->eraseFromParent(); 8358 8359 // return last added BB 8360 return SinkBB; 8361 } 8362 case ARM::COPY_STRUCT_BYVAL_I32: 8363 ++NumLoopByVals; 8364 return EmitStructByval(MI, BB); 8365 case ARM::WIN__CHKSTK: 8366 return EmitLowered__chkstk(MI, BB); 8367 case ARM::WIN__DBZCHK: 8368 return EmitLowered__dbzchk(MI, BB); 8369 } 8370 } 8371 8372 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers 8373 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 8374 /// instead of as a custom inserter because we need the use list from the SDNode. 8375 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 8376 MachineInstr *MI, const SDNode *Node) { 8377 bool isThumb1 = Subtarget->isThumb1Only(); 8378 8379 DebugLoc DL = MI->getDebugLoc(); 8380 MachineFunction *MF = MI->getParent()->getParent(); 8381 MachineRegisterInfo &MRI = MF->getRegInfo(); 8382 MachineInstrBuilder MIB(*MF, MI); 8383 8384 // If the new dst/src is unused mark it as dead. 8385 if (!Node->hasAnyUseOfValue(0)) { 8386 MI->getOperand(0).setIsDead(true); 8387 } 8388 if (!Node->hasAnyUseOfValue(1)) { 8389 MI->getOperand(1).setIsDead(true); 8390 } 8391 8392 // The MEMCPY both defines and kills the scratch registers. 8393 for (unsigned I = 0; I != MI->getOperand(4).getImm(); ++I) { 8394 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 8395 : &ARM::GPRRegClass); 8396 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 8397 } 8398 } 8399 8400 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 8401 SDNode *Node) const { 8402 if (MI->getOpcode() == ARM::MEMCPY) { 8403 attachMEMCPYScratchRegs(Subtarget, MI, Node); 8404 return; 8405 } 8406 8407 const MCInstrDesc *MCID = &MI->getDesc(); 8408 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 8409 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 8410 // operand is still set to noreg. If needed, set the optional operand's 8411 // register to CPSR, and remove the redundant implicit def. 8412 // 8413 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 8414 8415 // Rename pseudo opcodes. 8416 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 8417 if (NewOpc) { 8418 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 8419 MCID = &TII->get(NewOpc); 8420 8421 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 8422 "converted opcode should be the same except for cc_out"); 8423 8424 MI->setDesc(*MCID); 8425 8426 // Add the optional cc_out operand 8427 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 8428 } 8429 unsigned ccOutIdx = MCID->getNumOperands() - 1; 8430 8431 // Any ARM instruction that sets the 's' bit should specify an optional 8432 // "cc_out" operand in the last operand position. 8433 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 8434 assert(!NewOpc && "Optional cc_out operand required"); 8435 return; 8436 } 8437 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 8438 // since we already have an optional CPSR def. 8439 bool definesCPSR = false; 8440 bool deadCPSR = false; 8441 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 8442 i != e; ++i) { 8443 const MachineOperand &MO = MI->getOperand(i); 8444 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 8445 definesCPSR = true; 8446 if (MO.isDead()) 8447 deadCPSR = true; 8448 MI->RemoveOperand(i); 8449 break; 8450 } 8451 } 8452 if (!definesCPSR) { 8453 assert(!NewOpc && "Optional cc_out operand required"); 8454 return; 8455 } 8456 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 8457 if (deadCPSR) { 8458 assert(!MI->getOperand(ccOutIdx).getReg() && 8459 "expect uninitialized optional cc_out operand"); 8460 return; 8461 } 8462 8463 // If this instruction was defined with an optional CPSR def and its dag node 8464 // had a live implicit CPSR def, then activate the optional CPSR def. 8465 MachineOperand &MO = MI->getOperand(ccOutIdx); 8466 MO.setReg(ARM::CPSR); 8467 MO.setIsDef(true); 8468 } 8469 8470 //===----------------------------------------------------------------------===// 8471 // ARM Optimization Hooks 8472 //===----------------------------------------------------------------------===// 8473 8474 // Helper function that checks if N is a null or all ones constant. 8475 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 8476 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 8477 } 8478 8479 // Return true if N is conditionally 0 or all ones. 8480 // Detects these expressions where cc is an i1 value: 8481 // 8482 // (select cc 0, y) [AllOnes=0] 8483 // (select cc y, 0) [AllOnes=0] 8484 // (zext cc) [AllOnes=0] 8485 // (sext cc) [AllOnes=0/1] 8486 // (select cc -1, y) [AllOnes=1] 8487 // (select cc y, -1) [AllOnes=1] 8488 // 8489 // Invert is set when N is the null/all ones constant when CC is false. 8490 // OtherOp is set to the alternative value of N. 8491 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 8492 SDValue &CC, bool &Invert, 8493 SDValue &OtherOp, 8494 SelectionDAG &DAG) { 8495 switch (N->getOpcode()) { 8496 default: return false; 8497 case ISD::SELECT: { 8498 CC = N->getOperand(0); 8499 SDValue N1 = N->getOperand(1); 8500 SDValue N2 = N->getOperand(2); 8501 if (isZeroOrAllOnes(N1, AllOnes)) { 8502 Invert = false; 8503 OtherOp = N2; 8504 return true; 8505 } 8506 if (isZeroOrAllOnes(N2, AllOnes)) { 8507 Invert = true; 8508 OtherOp = N1; 8509 return true; 8510 } 8511 return false; 8512 } 8513 case ISD::ZERO_EXTEND: 8514 // (zext cc) can never be the all ones value. 8515 if (AllOnes) 8516 return false; 8517 // Fall through. 8518 case ISD::SIGN_EXTEND: { 8519 SDLoc dl(N); 8520 EVT VT = N->getValueType(0); 8521 CC = N->getOperand(0); 8522 if (CC.getValueType() != MVT::i1) 8523 return false; 8524 Invert = !AllOnes; 8525 if (AllOnes) 8526 // When looking for an AllOnes constant, N is an sext, and the 'other' 8527 // value is 0. 8528 OtherOp = DAG.getConstant(0, dl, VT); 8529 else if (N->getOpcode() == ISD::ZERO_EXTEND) 8530 // When looking for a 0 constant, N can be zext or sext. 8531 OtherOp = DAG.getConstant(1, dl, VT); 8532 else 8533 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 8534 VT); 8535 return true; 8536 } 8537 } 8538 } 8539 8540 // Combine a constant select operand into its use: 8541 // 8542 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8543 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8544 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 8545 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 8546 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8547 // 8548 // The transform is rejected if the select doesn't have a constant operand that 8549 // is null, or all ones when AllOnes is set. 8550 // 8551 // Also recognize sext/zext from i1: 8552 // 8553 // (add (zext cc), x) -> (select cc (add x, 1), x) 8554 // (add (sext cc), x) -> (select cc (add x, -1), x) 8555 // 8556 // These transformations eventually create predicated instructions. 8557 // 8558 // @param N The node to transform. 8559 // @param Slct The N operand that is a select. 8560 // @param OtherOp The other N operand (x above). 8561 // @param DCI Context. 8562 // @param AllOnes Require the select constant to be all ones instead of null. 8563 // @returns The new node, or SDValue() on failure. 8564 static 8565 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 8566 TargetLowering::DAGCombinerInfo &DCI, 8567 bool AllOnes = false) { 8568 SelectionDAG &DAG = DCI.DAG; 8569 EVT VT = N->getValueType(0); 8570 SDValue NonConstantVal; 8571 SDValue CCOp; 8572 bool SwapSelectOps; 8573 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 8574 NonConstantVal, DAG)) 8575 return SDValue(); 8576 8577 // Slct is now know to be the desired identity constant when CC is true. 8578 SDValue TrueVal = OtherOp; 8579 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 8580 OtherOp, NonConstantVal); 8581 // Unless SwapSelectOps says CC should be false. 8582 if (SwapSelectOps) 8583 std::swap(TrueVal, FalseVal); 8584 8585 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 8586 CCOp, TrueVal, FalseVal); 8587 } 8588 8589 // Attempt combineSelectAndUse on each operand of a commutative operator N. 8590 static 8591 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 8592 TargetLowering::DAGCombinerInfo &DCI) { 8593 SDValue N0 = N->getOperand(0); 8594 SDValue N1 = N->getOperand(1); 8595 if (N0.getNode()->hasOneUse()) 8596 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 8597 return Result; 8598 if (N1.getNode()->hasOneUse()) 8599 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 8600 return Result; 8601 return SDValue(); 8602 } 8603 8604 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 8605 // (only after legalization). 8606 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 8607 TargetLowering::DAGCombinerInfo &DCI, 8608 const ARMSubtarget *Subtarget) { 8609 8610 // Only perform optimization if after legalize, and if NEON is available. We 8611 // also expected both operands to be BUILD_VECTORs. 8612 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 8613 || N0.getOpcode() != ISD::BUILD_VECTOR 8614 || N1.getOpcode() != ISD::BUILD_VECTOR) 8615 return SDValue(); 8616 8617 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 8618 EVT VT = N->getValueType(0); 8619 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 8620 return SDValue(); 8621 8622 // Check that the vector operands are of the right form. 8623 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 8624 // operands, where N is the size of the formed vector. 8625 // Each EXTRACT_VECTOR should have the same input vector and odd or even 8626 // index such that we have a pair wise add pattern. 8627 8628 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 8629 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8630 return SDValue(); 8631 SDValue Vec = N0->getOperand(0)->getOperand(0); 8632 SDNode *V = Vec.getNode(); 8633 unsigned nextIndex = 0; 8634 8635 // For each operands to the ADD which are BUILD_VECTORs, 8636 // check to see if each of their operands are an EXTRACT_VECTOR with 8637 // the same vector and appropriate index. 8638 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 8639 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 8640 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 8641 8642 SDValue ExtVec0 = N0->getOperand(i); 8643 SDValue ExtVec1 = N1->getOperand(i); 8644 8645 // First operand is the vector, verify its the same. 8646 if (V != ExtVec0->getOperand(0).getNode() || 8647 V != ExtVec1->getOperand(0).getNode()) 8648 return SDValue(); 8649 8650 // Second is the constant, verify its correct. 8651 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 8652 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 8653 8654 // For the constant, we want to see all the even or all the odd. 8655 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 8656 || C1->getZExtValue() != nextIndex+1) 8657 return SDValue(); 8658 8659 // Increment index. 8660 nextIndex+=2; 8661 } else 8662 return SDValue(); 8663 } 8664 8665 // Create VPADDL node. 8666 SelectionDAG &DAG = DCI.DAG; 8667 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8668 8669 SDLoc dl(N); 8670 8671 // Build operand list. 8672 SmallVector<SDValue, 8> Ops; 8673 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 8674 TLI.getPointerTy(DAG.getDataLayout()))); 8675 8676 // Input is the vector. 8677 Ops.push_back(Vec); 8678 8679 // Get widened type and narrowed type. 8680 MVT widenType; 8681 unsigned numElem = VT.getVectorNumElements(); 8682 8683 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 8684 switch (inputLaneType.getSimpleVT().SimpleTy) { 8685 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 8686 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 8687 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 8688 default: 8689 llvm_unreachable("Invalid vector element type for padd optimization."); 8690 } 8691 8692 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 8693 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 8694 return DAG.getNode(ExtOp, dl, VT, tmp); 8695 } 8696 8697 static SDValue findMUL_LOHI(SDValue V) { 8698 if (V->getOpcode() == ISD::UMUL_LOHI || 8699 V->getOpcode() == ISD::SMUL_LOHI) 8700 return V; 8701 return SDValue(); 8702 } 8703 8704 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 8705 TargetLowering::DAGCombinerInfo &DCI, 8706 const ARMSubtarget *Subtarget) { 8707 8708 if (Subtarget->isThumb1Only()) return SDValue(); 8709 8710 // Only perform the checks after legalize when the pattern is available. 8711 if (DCI.isBeforeLegalize()) return SDValue(); 8712 8713 // Look for multiply add opportunities. 8714 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 8715 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 8716 // a glue link from the first add to the second add. 8717 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 8718 // a S/UMLAL instruction. 8719 // UMUL_LOHI 8720 // / :lo \ :hi 8721 // / \ [no multiline comment] 8722 // loAdd -> ADDE | 8723 // \ :glue / 8724 // \ / 8725 // ADDC <- hiAdd 8726 // 8727 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 8728 SDValue AddcOp0 = AddcNode->getOperand(0); 8729 SDValue AddcOp1 = AddcNode->getOperand(1); 8730 8731 // Check if the two operands are from the same mul_lohi node. 8732 if (AddcOp0.getNode() == AddcOp1.getNode()) 8733 return SDValue(); 8734 8735 assert(AddcNode->getNumValues() == 2 && 8736 AddcNode->getValueType(0) == MVT::i32 && 8737 "Expect ADDC with two result values. First: i32"); 8738 8739 // Check that we have a glued ADDC node. 8740 if (AddcNode->getValueType(1) != MVT::Glue) 8741 return SDValue(); 8742 8743 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 8744 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 8745 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 8746 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 8747 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 8748 return SDValue(); 8749 8750 // Look for the glued ADDE. 8751 SDNode* AddeNode = AddcNode->getGluedUser(); 8752 if (!AddeNode) 8753 return SDValue(); 8754 8755 // Make sure it is really an ADDE. 8756 if (AddeNode->getOpcode() != ISD::ADDE) 8757 return SDValue(); 8758 8759 assert(AddeNode->getNumOperands() == 3 && 8760 AddeNode->getOperand(2).getValueType() == MVT::Glue && 8761 "ADDE node has the wrong inputs"); 8762 8763 // Check for the triangle shape. 8764 SDValue AddeOp0 = AddeNode->getOperand(0); 8765 SDValue AddeOp1 = AddeNode->getOperand(1); 8766 8767 // Make sure that the ADDE operands are not coming from the same node. 8768 if (AddeOp0.getNode() == AddeOp1.getNode()) 8769 return SDValue(); 8770 8771 // Find the MUL_LOHI node walking up ADDE's operands. 8772 bool IsLeftOperandMUL = false; 8773 SDValue MULOp = findMUL_LOHI(AddeOp0); 8774 if (MULOp == SDValue()) 8775 MULOp = findMUL_LOHI(AddeOp1); 8776 else 8777 IsLeftOperandMUL = true; 8778 if (MULOp == SDValue()) 8779 return SDValue(); 8780 8781 // Figure out the right opcode. 8782 unsigned Opc = MULOp->getOpcode(); 8783 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 8784 8785 // Figure out the high and low input values to the MLAL node. 8786 SDValue* HiAdd = nullptr; 8787 SDValue* LoMul = nullptr; 8788 SDValue* LowAdd = nullptr; 8789 8790 // Ensure that ADDE is from high result of ISD::SMUL_LOHI. 8791 if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1))) 8792 return SDValue(); 8793 8794 if (IsLeftOperandMUL) 8795 HiAdd = &AddeOp1; 8796 else 8797 HiAdd = &AddeOp0; 8798 8799 8800 // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node 8801 // whose low result is fed to the ADDC we are checking. 8802 8803 if (AddcOp0 == MULOp.getValue(0)) { 8804 LoMul = &AddcOp0; 8805 LowAdd = &AddcOp1; 8806 } 8807 if (AddcOp1 == MULOp.getValue(0)) { 8808 LoMul = &AddcOp1; 8809 LowAdd = &AddcOp0; 8810 } 8811 8812 if (!LoMul) 8813 return SDValue(); 8814 8815 // Create the merged node. 8816 SelectionDAG &DAG = DCI.DAG; 8817 8818 // Build operand list. 8819 SmallVector<SDValue, 8> Ops; 8820 Ops.push_back(LoMul->getOperand(0)); 8821 Ops.push_back(LoMul->getOperand(1)); 8822 Ops.push_back(*LowAdd); 8823 Ops.push_back(*HiAdd); 8824 8825 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), 8826 DAG.getVTList(MVT::i32, MVT::i32), Ops); 8827 8828 // Replace the ADDs' nodes uses by the MLA node's values. 8829 SDValue HiMLALResult(MLALNode.getNode(), 1); 8830 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 8831 8832 SDValue LoMLALResult(MLALNode.getNode(), 0); 8833 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 8834 8835 // Return original node to notify the driver to stop replacing. 8836 SDValue resNode(AddcNode, 0); 8837 return resNode; 8838 } 8839 8840 /// PerformADDCCombine - Target-specific dag combine transform from 8841 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 8842 static SDValue PerformADDCCombine(SDNode *N, 8843 TargetLowering::DAGCombinerInfo &DCI, 8844 const ARMSubtarget *Subtarget) { 8845 8846 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 8847 8848 } 8849 8850 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 8851 /// operands N0 and N1. This is a helper for PerformADDCombine that is 8852 /// called with the default operands, and if that fails, with commuted 8853 /// operands. 8854 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 8855 TargetLowering::DAGCombinerInfo &DCI, 8856 const ARMSubtarget *Subtarget){ 8857 8858 // Attempt to create vpaddl for this add. 8859 if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget)) 8860 return Result; 8861 8862 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8863 if (N0.getNode()->hasOneUse()) 8864 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 8865 return Result; 8866 return SDValue(); 8867 } 8868 8869 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 8870 /// 8871 static SDValue PerformADDCombine(SDNode *N, 8872 TargetLowering::DAGCombinerInfo &DCI, 8873 const ARMSubtarget *Subtarget) { 8874 SDValue N0 = N->getOperand(0); 8875 SDValue N1 = N->getOperand(1); 8876 8877 // First try with the default operand order. 8878 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 8879 return Result; 8880 8881 // If that didn't work, try again with the operands commuted. 8882 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 8883 } 8884 8885 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 8886 /// 8887 static SDValue PerformSUBCombine(SDNode *N, 8888 TargetLowering::DAGCombinerInfo &DCI) { 8889 SDValue N0 = N->getOperand(0); 8890 SDValue N1 = N->getOperand(1); 8891 8892 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8893 if (N1.getNode()->hasOneUse()) 8894 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 8895 return Result; 8896 8897 return SDValue(); 8898 } 8899 8900 /// PerformVMULCombine 8901 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 8902 /// special multiplier accumulator forwarding. 8903 /// vmul d3, d0, d2 8904 /// vmla d3, d1, d2 8905 /// is faster than 8906 /// vadd d3, d0, d1 8907 /// vmul d3, d3, d2 8908 // However, for (A + B) * (A + B), 8909 // vadd d2, d0, d1 8910 // vmul d3, d0, d2 8911 // vmla d3, d1, d2 8912 // is slower than 8913 // vadd d2, d0, d1 8914 // vmul d3, d2, d2 8915 static SDValue PerformVMULCombine(SDNode *N, 8916 TargetLowering::DAGCombinerInfo &DCI, 8917 const ARMSubtarget *Subtarget) { 8918 if (!Subtarget->hasVMLxForwarding()) 8919 return SDValue(); 8920 8921 SelectionDAG &DAG = DCI.DAG; 8922 SDValue N0 = N->getOperand(0); 8923 SDValue N1 = N->getOperand(1); 8924 unsigned Opcode = N0.getOpcode(); 8925 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8926 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 8927 Opcode = N1.getOpcode(); 8928 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8929 Opcode != ISD::FADD && Opcode != ISD::FSUB) 8930 return SDValue(); 8931 std::swap(N0, N1); 8932 } 8933 8934 if (N0 == N1) 8935 return SDValue(); 8936 8937 EVT VT = N->getValueType(0); 8938 SDLoc DL(N); 8939 SDValue N00 = N0->getOperand(0); 8940 SDValue N01 = N0->getOperand(1); 8941 return DAG.getNode(Opcode, DL, VT, 8942 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 8943 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 8944 } 8945 8946 static SDValue PerformMULCombine(SDNode *N, 8947 TargetLowering::DAGCombinerInfo &DCI, 8948 const ARMSubtarget *Subtarget) { 8949 SelectionDAG &DAG = DCI.DAG; 8950 8951 if (Subtarget->isThumb1Only()) 8952 return SDValue(); 8953 8954 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8955 return SDValue(); 8956 8957 EVT VT = N->getValueType(0); 8958 if (VT.is64BitVector() || VT.is128BitVector()) 8959 return PerformVMULCombine(N, DCI, Subtarget); 8960 if (VT != MVT::i32) 8961 return SDValue(); 8962 8963 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8964 if (!C) 8965 return SDValue(); 8966 8967 int64_t MulAmt = C->getSExtValue(); 8968 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 8969 8970 ShiftAmt = ShiftAmt & (32 - 1); 8971 SDValue V = N->getOperand(0); 8972 SDLoc DL(N); 8973 8974 SDValue Res; 8975 MulAmt >>= ShiftAmt; 8976 8977 if (MulAmt >= 0) { 8978 if (isPowerOf2_32(MulAmt - 1)) { 8979 // (mul x, 2^N + 1) => (add (shl x, N), x) 8980 Res = DAG.getNode(ISD::ADD, DL, VT, 8981 V, 8982 DAG.getNode(ISD::SHL, DL, VT, 8983 V, 8984 DAG.getConstant(Log2_32(MulAmt - 1), DL, 8985 MVT::i32))); 8986 } else if (isPowerOf2_32(MulAmt + 1)) { 8987 // (mul x, 2^N - 1) => (sub (shl x, N), x) 8988 Res = DAG.getNode(ISD::SUB, DL, VT, 8989 DAG.getNode(ISD::SHL, DL, VT, 8990 V, 8991 DAG.getConstant(Log2_32(MulAmt + 1), DL, 8992 MVT::i32)), 8993 V); 8994 } else 8995 return SDValue(); 8996 } else { 8997 uint64_t MulAmtAbs = -MulAmt; 8998 if (isPowerOf2_32(MulAmtAbs + 1)) { 8999 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 9000 Res = DAG.getNode(ISD::SUB, DL, VT, 9001 V, 9002 DAG.getNode(ISD::SHL, DL, VT, 9003 V, 9004 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 9005 MVT::i32))); 9006 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 9007 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 9008 Res = DAG.getNode(ISD::ADD, DL, VT, 9009 V, 9010 DAG.getNode(ISD::SHL, DL, VT, 9011 V, 9012 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 9013 MVT::i32))); 9014 Res = DAG.getNode(ISD::SUB, DL, VT, 9015 DAG.getConstant(0, DL, MVT::i32), Res); 9016 9017 } else 9018 return SDValue(); 9019 } 9020 9021 if (ShiftAmt != 0) 9022 Res = DAG.getNode(ISD::SHL, DL, VT, 9023 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 9024 9025 // Do not add new nodes to DAG combiner worklist. 9026 DCI.CombineTo(N, Res, false); 9027 return SDValue(); 9028 } 9029 9030 static SDValue PerformANDCombine(SDNode *N, 9031 TargetLowering::DAGCombinerInfo &DCI, 9032 const ARMSubtarget *Subtarget) { 9033 9034 // Attempt to use immediate-form VBIC 9035 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9036 SDLoc dl(N); 9037 EVT VT = N->getValueType(0); 9038 SelectionDAG &DAG = DCI.DAG; 9039 9040 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9041 return SDValue(); 9042 9043 APInt SplatBits, SplatUndef; 9044 unsigned SplatBitSize; 9045 bool HasAnyUndefs; 9046 if (BVN && 9047 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9048 if (SplatBitSize <= 64) { 9049 EVT VbicVT; 9050 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 9051 SplatUndef.getZExtValue(), SplatBitSize, 9052 DAG, dl, VbicVT, VT.is128BitVector(), 9053 OtherModImm); 9054 if (Val.getNode()) { 9055 SDValue Input = 9056 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 9057 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 9058 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 9059 } 9060 } 9061 } 9062 9063 if (!Subtarget->isThumb1Only()) { 9064 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 9065 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 9066 return Result; 9067 } 9068 9069 return SDValue(); 9070 } 9071 9072 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 9073 static SDValue PerformORCombine(SDNode *N, 9074 TargetLowering::DAGCombinerInfo &DCI, 9075 const ARMSubtarget *Subtarget) { 9076 // Attempt to use immediate-form VORR 9077 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9078 SDLoc dl(N); 9079 EVT VT = N->getValueType(0); 9080 SelectionDAG &DAG = DCI.DAG; 9081 9082 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9083 return SDValue(); 9084 9085 APInt SplatBits, SplatUndef; 9086 unsigned SplatBitSize; 9087 bool HasAnyUndefs; 9088 if (BVN && Subtarget->hasNEON() && 9089 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9090 if (SplatBitSize <= 64) { 9091 EVT VorrVT; 9092 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 9093 SplatUndef.getZExtValue(), SplatBitSize, 9094 DAG, dl, VorrVT, VT.is128BitVector(), 9095 OtherModImm); 9096 if (Val.getNode()) { 9097 SDValue Input = 9098 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 9099 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 9100 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 9101 } 9102 } 9103 } 9104 9105 if (!Subtarget->isThumb1Only()) { 9106 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 9107 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9108 return Result; 9109 } 9110 9111 // The code below optimizes (or (and X, Y), Z). 9112 // The AND operand needs to have a single user to make these optimizations 9113 // profitable. 9114 SDValue N0 = N->getOperand(0); 9115 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 9116 return SDValue(); 9117 SDValue N1 = N->getOperand(1); 9118 9119 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 9120 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 9121 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 9122 APInt SplatUndef; 9123 unsigned SplatBitSize; 9124 bool HasAnyUndefs; 9125 9126 APInt SplatBits0, SplatBits1; 9127 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 9128 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 9129 // Ensure that the second operand of both ands are constants 9130 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 9131 HasAnyUndefs) && !HasAnyUndefs) { 9132 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 9133 HasAnyUndefs) && !HasAnyUndefs) { 9134 // Ensure that the bit width of the constants are the same and that 9135 // the splat arguments are logical inverses as per the pattern we 9136 // are trying to simplify. 9137 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 9138 SplatBits0 == ~SplatBits1) { 9139 // Canonicalize the vector type to make instruction selection 9140 // simpler. 9141 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 9142 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 9143 N0->getOperand(1), 9144 N0->getOperand(0), 9145 N1->getOperand(0)); 9146 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9147 } 9148 } 9149 } 9150 } 9151 9152 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 9153 // reasonable. 9154 9155 // BFI is only available on V6T2+ 9156 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 9157 return SDValue(); 9158 9159 SDLoc DL(N); 9160 // 1) or (and A, mask), val => ARMbfi A, val, mask 9161 // iff (val & mask) == val 9162 // 9163 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9164 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 9165 // && mask == ~mask2 9166 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 9167 // && ~mask == mask2 9168 // (i.e., copy a bitfield value into another bitfield of the same width) 9169 9170 if (VT != MVT::i32) 9171 return SDValue(); 9172 9173 SDValue N00 = N0.getOperand(0); 9174 9175 // The value and the mask need to be constants so we can verify this is 9176 // actually a bitfield set. If the mask is 0xffff, we can do better 9177 // via a movt instruction, so don't use BFI in that case. 9178 SDValue MaskOp = N0.getOperand(1); 9179 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 9180 if (!MaskC) 9181 return SDValue(); 9182 unsigned Mask = MaskC->getZExtValue(); 9183 if (Mask == 0xffff) 9184 return SDValue(); 9185 SDValue Res; 9186 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 9187 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 9188 if (N1C) { 9189 unsigned Val = N1C->getZExtValue(); 9190 if ((Val & ~Mask) != Val) 9191 return SDValue(); 9192 9193 if (ARM::isBitFieldInvertedMask(Mask)) { 9194 Val >>= countTrailingZeros(~Mask); 9195 9196 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 9197 DAG.getConstant(Val, DL, MVT::i32), 9198 DAG.getConstant(Mask, DL, MVT::i32)); 9199 9200 // Do not add new nodes to DAG combiner worklist. 9201 DCI.CombineTo(N, Res, false); 9202 return SDValue(); 9203 } 9204 } else if (N1.getOpcode() == ISD::AND) { 9205 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9206 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9207 if (!N11C) 9208 return SDValue(); 9209 unsigned Mask2 = N11C->getZExtValue(); 9210 9211 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 9212 // as is to match. 9213 if (ARM::isBitFieldInvertedMask(Mask) && 9214 (Mask == ~Mask2)) { 9215 // The pack halfword instruction works better for masks that fit it, 9216 // so use that when it's available. 9217 if (Subtarget->hasT2ExtractPack() && 9218 (Mask == 0xffff || Mask == 0xffff0000)) 9219 return SDValue(); 9220 // 2a 9221 unsigned amt = countTrailingZeros(Mask2); 9222 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 9223 DAG.getConstant(amt, DL, MVT::i32)); 9224 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 9225 DAG.getConstant(Mask, DL, MVT::i32)); 9226 // Do not add new nodes to DAG combiner worklist. 9227 DCI.CombineTo(N, Res, false); 9228 return SDValue(); 9229 } else if (ARM::isBitFieldInvertedMask(~Mask) && 9230 (~Mask == Mask2)) { 9231 // The pack halfword instruction works better for masks that fit it, 9232 // so use that when it's available. 9233 if (Subtarget->hasT2ExtractPack() && 9234 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 9235 return SDValue(); 9236 // 2b 9237 unsigned lsb = countTrailingZeros(Mask); 9238 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 9239 DAG.getConstant(lsb, DL, MVT::i32)); 9240 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 9241 DAG.getConstant(Mask2, DL, MVT::i32)); 9242 // Do not add new nodes to DAG combiner worklist. 9243 DCI.CombineTo(N, Res, false); 9244 return SDValue(); 9245 } 9246 } 9247 9248 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 9249 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 9250 ARM::isBitFieldInvertedMask(~Mask)) { 9251 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 9252 // where lsb(mask) == #shamt and masked bits of B are known zero. 9253 SDValue ShAmt = N00.getOperand(1); 9254 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9255 unsigned LSB = countTrailingZeros(Mask); 9256 if (ShAmtC != LSB) 9257 return SDValue(); 9258 9259 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 9260 DAG.getConstant(~Mask, DL, MVT::i32)); 9261 9262 // Do not add new nodes to DAG combiner worklist. 9263 DCI.CombineTo(N, Res, false); 9264 } 9265 9266 return SDValue(); 9267 } 9268 9269 static SDValue PerformXORCombine(SDNode *N, 9270 TargetLowering::DAGCombinerInfo &DCI, 9271 const ARMSubtarget *Subtarget) { 9272 EVT VT = N->getValueType(0); 9273 SelectionDAG &DAG = DCI.DAG; 9274 9275 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9276 return SDValue(); 9277 9278 if (!Subtarget->isThumb1Only()) { 9279 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 9280 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9281 return Result; 9282 } 9283 9284 return SDValue(); 9285 } 9286 9287 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 9288 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 9289 // their position in "to" (Rd). 9290 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 9291 assert(N->getOpcode() == ARMISD::BFI); 9292 9293 SDValue From = N->getOperand(1); 9294 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 9295 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 9296 9297 // If the Base came from a SHR #C, we can deduce that it is really testing bit 9298 // #C in the base of the SHR. 9299 if (From->getOpcode() == ISD::SRL && 9300 isa<ConstantSDNode>(From->getOperand(1))) { 9301 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 9302 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 9303 FromMask <<= Shift.getLimitedValue(31); 9304 From = From->getOperand(0); 9305 } 9306 9307 return From; 9308 } 9309 9310 // If A and B contain one contiguous set of bits, does A | B == A . B? 9311 // 9312 // Neither A nor B must be zero. 9313 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 9314 unsigned LastActiveBitInA = A.countTrailingZeros(); 9315 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 9316 return LastActiveBitInA - 1 == FirstActiveBitInB; 9317 } 9318 9319 static SDValue FindBFIToCombineWith(SDNode *N) { 9320 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 9321 // if one exists. 9322 APInt ToMask, FromMask; 9323 SDValue From = ParseBFI(N, ToMask, FromMask); 9324 SDValue To = N->getOperand(0); 9325 9326 // Now check for a compatible BFI to merge with. We can pass through BFIs that 9327 // aren't compatible, but not if they set the same bit in their destination as 9328 // we do (or that of any BFI we're going to combine with). 9329 SDValue V = To; 9330 APInt CombinedToMask = ToMask; 9331 while (V.getOpcode() == ARMISD::BFI) { 9332 APInt NewToMask, NewFromMask; 9333 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 9334 if (NewFrom != From) { 9335 // This BFI has a different base. Keep going. 9336 CombinedToMask |= NewToMask; 9337 V = V.getOperand(0); 9338 continue; 9339 } 9340 9341 // Do the written bits conflict with any we've seen so far? 9342 if ((NewToMask & CombinedToMask).getBoolValue()) 9343 // Conflicting bits - bail out because going further is unsafe. 9344 return SDValue(); 9345 9346 // Are the new bits contiguous when combined with the old bits? 9347 if (BitsProperlyConcatenate(ToMask, NewToMask) && 9348 BitsProperlyConcatenate(FromMask, NewFromMask)) 9349 return V; 9350 if (BitsProperlyConcatenate(NewToMask, ToMask) && 9351 BitsProperlyConcatenate(NewFromMask, FromMask)) 9352 return V; 9353 9354 // We've seen a write to some bits, so track it. 9355 CombinedToMask |= NewToMask; 9356 // Keep going... 9357 V = V.getOperand(0); 9358 } 9359 9360 return SDValue(); 9361 } 9362 9363 static SDValue PerformBFICombine(SDNode *N, 9364 TargetLowering::DAGCombinerInfo &DCI) { 9365 SDValue N1 = N->getOperand(1); 9366 if (N1.getOpcode() == ISD::AND) { 9367 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 9368 // the bits being cleared by the AND are not demanded by the BFI. 9369 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9370 if (!N11C) 9371 return SDValue(); 9372 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 9373 unsigned LSB = countTrailingZeros(~InvMask); 9374 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 9375 assert(Width < 9376 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 9377 "undefined behavior"); 9378 unsigned Mask = (1u << Width) - 1; 9379 unsigned Mask2 = N11C->getZExtValue(); 9380 if ((Mask & (~Mask2)) == 0) 9381 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 9382 N->getOperand(0), N1.getOperand(0), 9383 N->getOperand(2)); 9384 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 9385 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 9386 // Keep track of any consecutive bits set that all come from the same base 9387 // value. We can combine these together into a single BFI. 9388 SDValue CombineBFI = FindBFIToCombineWith(N); 9389 if (CombineBFI == SDValue()) 9390 return SDValue(); 9391 9392 // We've found a BFI. 9393 APInt ToMask1, FromMask1; 9394 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 9395 9396 APInt ToMask2, FromMask2; 9397 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 9398 assert(From1 == From2); 9399 (void)From2; 9400 9401 // First, unlink CombineBFI. 9402 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 9403 // Then create a new BFI, combining the two together. 9404 APInt NewFromMask = FromMask1 | FromMask2; 9405 APInt NewToMask = ToMask1 | ToMask2; 9406 9407 EVT VT = N->getValueType(0); 9408 SDLoc dl(N); 9409 9410 if (NewFromMask[0] == 0) 9411 From1 = DCI.DAG.getNode( 9412 ISD::SRL, dl, VT, From1, 9413 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 9414 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 9415 DCI.DAG.getConstant(~NewToMask, dl, VT)); 9416 } 9417 return SDValue(); 9418 } 9419 9420 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 9421 /// ARMISD::VMOVRRD. 9422 static SDValue PerformVMOVRRDCombine(SDNode *N, 9423 TargetLowering::DAGCombinerInfo &DCI, 9424 const ARMSubtarget *Subtarget) { 9425 // vmovrrd(vmovdrr x, y) -> x,y 9426 SDValue InDouble = N->getOperand(0); 9427 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) 9428 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 9429 9430 // vmovrrd(load f64) -> (load i32), (load i32) 9431 SDNode *InNode = InDouble.getNode(); 9432 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 9433 InNode->getValueType(0) == MVT::f64 && 9434 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 9435 !cast<LoadSDNode>(InNode)->isVolatile()) { 9436 // TODO: Should this be done for non-FrameIndex operands? 9437 LoadSDNode *LD = cast<LoadSDNode>(InNode); 9438 9439 SelectionDAG &DAG = DCI.DAG; 9440 SDLoc DL(LD); 9441 SDValue BasePtr = LD->getBasePtr(); 9442 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 9443 LD->getPointerInfo(), LD->isVolatile(), 9444 LD->isNonTemporal(), LD->isInvariant(), 9445 LD->getAlignment()); 9446 9447 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 9448 DAG.getConstant(4, DL, MVT::i32)); 9449 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 9450 LD->getPointerInfo(), LD->isVolatile(), 9451 LD->isNonTemporal(), LD->isInvariant(), 9452 std::min(4U, LD->getAlignment() / 2)); 9453 9454 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 9455 if (DCI.DAG.getDataLayout().isBigEndian()) 9456 std::swap (NewLD1, NewLD2); 9457 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 9458 return Result; 9459 } 9460 9461 return SDValue(); 9462 } 9463 9464 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 9465 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 9466 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 9467 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 9468 SDValue Op0 = N->getOperand(0); 9469 SDValue Op1 = N->getOperand(1); 9470 if (Op0.getOpcode() == ISD::BITCAST) 9471 Op0 = Op0.getOperand(0); 9472 if (Op1.getOpcode() == ISD::BITCAST) 9473 Op1 = Op1.getOperand(0); 9474 if (Op0.getOpcode() == ARMISD::VMOVRRD && 9475 Op0.getNode() == Op1.getNode() && 9476 Op0.getResNo() == 0 && Op1.getResNo() == 1) 9477 return DAG.getNode(ISD::BITCAST, SDLoc(N), 9478 N->getValueType(0), Op0.getOperand(0)); 9479 return SDValue(); 9480 } 9481 9482 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 9483 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 9484 /// i64 vector to have f64 elements, since the value can then be loaded 9485 /// directly into a VFP register. 9486 static bool hasNormalLoadOperand(SDNode *N) { 9487 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 9488 for (unsigned i = 0; i < NumElts; ++i) { 9489 SDNode *Elt = N->getOperand(i).getNode(); 9490 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 9491 return true; 9492 } 9493 return false; 9494 } 9495 9496 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 9497 /// ISD::BUILD_VECTOR. 9498 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 9499 TargetLowering::DAGCombinerInfo &DCI, 9500 const ARMSubtarget *Subtarget) { 9501 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 9502 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 9503 // into a pair of GPRs, which is fine when the value is used as a scalar, 9504 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 9505 SelectionDAG &DAG = DCI.DAG; 9506 if (N->getNumOperands() == 2) 9507 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 9508 return RV; 9509 9510 // Load i64 elements as f64 values so that type legalization does not split 9511 // them up into i32 values. 9512 EVT VT = N->getValueType(0); 9513 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 9514 return SDValue(); 9515 SDLoc dl(N); 9516 SmallVector<SDValue, 8> Ops; 9517 unsigned NumElts = VT.getVectorNumElements(); 9518 for (unsigned i = 0; i < NumElts; ++i) { 9519 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 9520 Ops.push_back(V); 9521 // Make the DAGCombiner fold the bitcast. 9522 DCI.AddToWorklist(V.getNode()); 9523 } 9524 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 9525 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 9526 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 9527 } 9528 9529 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 9530 static SDValue 9531 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9532 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 9533 // At that time, we may have inserted bitcasts from integer to float. 9534 // If these bitcasts have survived DAGCombine, change the lowering of this 9535 // BUILD_VECTOR in something more vector friendly, i.e., that does not 9536 // force to use floating point types. 9537 9538 // Make sure we can change the type of the vector. 9539 // This is possible iff: 9540 // 1. The vector is only used in a bitcast to a integer type. I.e., 9541 // 1.1. Vector is used only once. 9542 // 1.2. Use is a bit convert to an integer type. 9543 // 2. The size of its operands are 32-bits (64-bits are not legal). 9544 EVT VT = N->getValueType(0); 9545 EVT EltVT = VT.getVectorElementType(); 9546 9547 // Check 1.1. and 2. 9548 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 9549 return SDValue(); 9550 9551 // By construction, the input type must be float. 9552 assert(EltVT == MVT::f32 && "Unexpected type!"); 9553 9554 // Check 1.2. 9555 SDNode *Use = *N->use_begin(); 9556 if (Use->getOpcode() != ISD::BITCAST || 9557 Use->getValueType(0).isFloatingPoint()) 9558 return SDValue(); 9559 9560 // Check profitability. 9561 // Model is, if more than half of the relevant operands are bitcast from 9562 // i32, turn the build_vector into a sequence of insert_vector_elt. 9563 // Relevant operands are everything that is not statically 9564 // (i.e., at compile time) bitcasted. 9565 unsigned NumOfBitCastedElts = 0; 9566 unsigned NumElts = VT.getVectorNumElements(); 9567 unsigned NumOfRelevantElts = NumElts; 9568 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 9569 SDValue Elt = N->getOperand(Idx); 9570 if (Elt->getOpcode() == ISD::BITCAST) { 9571 // Assume only bit cast to i32 will go away. 9572 if (Elt->getOperand(0).getValueType() == MVT::i32) 9573 ++NumOfBitCastedElts; 9574 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 9575 // Constants are statically casted, thus do not count them as 9576 // relevant operands. 9577 --NumOfRelevantElts; 9578 } 9579 9580 // Check if more than half of the elements require a non-free bitcast. 9581 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 9582 return SDValue(); 9583 9584 SelectionDAG &DAG = DCI.DAG; 9585 // Create the new vector type. 9586 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 9587 // Check if the type is legal. 9588 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9589 if (!TLI.isTypeLegal(VecVT)) 9590 return SDValue(); 9591 9592 // Combine: 9593 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 9594 // => BITCAST INSERT_VECTOR_ELT 9595 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 9596 // (BITCAST EN), N. 9597 SDValue Vec = DAG.getUNDEF(VecVT); 9598 SDLoc dl(N); 9599 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 9600 SDValue V = N->getOperand(Idx); 9601 if (V.isUndef()) 9602 continue; 9603 if (V.getOpcode() == ISD::BITCAST && 9604 V->getOperand(0).getValueType() == MVT::i32) 9605 // Fold obvious case. 9606 V = V.getOperand(0); 9607 else { 9608 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 9609 // Make the DAGCombiner fold the bitcasts. 9610 DCI.AddToWorklist(V.getNode()); 9611 } 9612 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 9613 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 9614 } 9615 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 9616 // Make the DAGCombiner fold the bitcasts. 9617 DCI.AddToWorklist(Vec.getNode()); 9618 return Vec; 9619 } 9620 9621 /// PerformInsertEltCombine - Target-specific dag combine xforms for 9622 /// ISD::INSERT_VECTOR_ELT. 9623 static SDValue PerformInsertEltCombine(SDNode *N, 9624 TargetLowering::DAGCombinerInfo &DCI) { 9625 // Bitcast an i64 load inserted into a vector to f64. 9626 // Otherwise, the i64 value will be legalized to a pair of i32 values. 9627 EVT VT = N->getValueType(0); 9628 SDNode *Elt = N->getOperand(1).getNode(); 9629 if (VT.getVectorElementType() != MVT::i64 || 9630 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 9631 return SDValue(); 9632 9633 SelectionDAG &DAG = DCI.DAG; 9634 SDLoc dl(N); 9635 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 9636 VT.getVectorNumElements()); 9637 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 9638 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 9639 // Make the DAGCombiner fold the bitcasts. 9640 DCI.AddToWorklist(Vec.getNode()); 9641 DCI.AddToWorklist(V.getNode()); 9642 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 9643 Vec, V, N->getOperand(2)); 9644 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 9645 } 9646 9647 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 9648 /// ISD::VECTOR_SHUFFLE. 9649 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 9650 // The LLVM shufflevector instruction does not require the shuffle mask 9651 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 9652 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 9653 // operands do not match the mask length, they are extended by concatenating 9654 // them with undef vectors. That is probably the right thing for other 9655 // targets, but for NEON it is better to concatenate two double-register 9656 // size vector operands into a single quad-register size vector. Do that 9657 // transformation here: 9658 // shuffle(concat(v1, undef), concat(v2, undef)) -> 9659 // shuffle(concat(v1, v2), undef) 9660 SDValue Op0 = N->getOperand(0); 9661 SDValue Op1 = N->getOperand(1); 9662 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 9663 Op1.getOpcode() != ISD::CONCAT_VECTORS || 9664 Op0.getNumOperands() != 2 || 9665 Op1.getNumOperands() != 2) 9666 return SDValue(); 9667 SDValue Concat0Op1 = Op0.getOperand(1); 9668 SDValue Concat1Op1 = Op1.getOperand(1); 9669 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 9670 return SDValue(); 9671 // Skip the transformation if any of the types are illegal. 9672 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9673 EVT VT = N->getValueType(0); 9674 if (!TLI.isTypeLegal(VT) || 9675 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 9676 !TLI.isTypeLegal(Concat1Op1.getValueType())) 9677 return SDValue(); 9678 9679 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 9680 Op0.getOperand(0), Op1.getOperand(0)); 9681 // Translate the shuffle mask. 9682 SmallVector<int, 16> NewMask; 9683 unsigned NumElts = VT.getVectorNumElements(); 9684 unsigned HalfElts = NumElts/2; 9685 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 9686 for (unsigned n = 0; n < NumElts; ++n) { 9687 int MaskElt = SVN->getMaskElt(n); 9688 int NewElt = -1; 9689 if (MaskElt < (int)HalfElts) 9690 NewElt = MaskElt; 9691 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 9692 NewElt = HalfElts + MaskElt - NumElts; 9693 NewMask.push_back(NewElt); 9694 } 9695 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 9696 DAG.getUNDEF(VT), NewMask.data()); 9697 } 9698 9699 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 9700 /// NEON load/store intrinsics, and generic vector load/stores, to merge 9701 /// base address updates. 9702 /// For generic load/stores, the memory type is assumed to be a vector. 9703 /// The caller is assumed to have checked legality. 9704 static SDValue CombineBaseUpdate(SDNode *N, 9705 TargetLowering::DAGCombinerInfo &DCI) { 9706 SelectionDAG &DAG = DCI.DAG; 9707 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 9708 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 9709 const bool isStore = N->getOpcode() == ISD::STORE; 9710 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 9711 SDValue Addr = N->getOperand(AddrOpIdx); 9712 MemSDNode *MemN = cast<MemSDNode>(N); 9713 SDLoc dl(N); 9714 9715 // Search for a use of the address operand that is an increment. 9716 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 9717 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 9718 SDNode *User = *UI; 9719 if (User->getOpcode() != ISD::ADD || 9720 UI.getUse().getResNo() != Addr.getResNo()) 9721 continue; 9722 9723 // Check that the add is independent of the load/store. Otherwise, folding 9724 // it would create a cycle. 9725 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 9726 continue; 9727 9728 // Find the new opcode for the updating load/store. 9729 bool isLoadOp = true; 9730 bool isLaneOp = false; 9731 unsigned NewOpc = 0; 9732 unsigned NumVecs = 0; 9733 if (isIntrinsic) { 9734 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9735 switch (IntNo) { 9736 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 9737 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 9738 NumVecs = 1; break; 9739 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 9740 NumVecs = 2; break; 9741 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 9742 NumVecs = 3; break; 9743 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 9744 NumVecs = 4; break; 9745 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 9746 NumVecs = 2; isLaneOp = true; break; 9747 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 9748 NumVecs = 3; isLaneOp = true; break; 9749 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 9750 NumVecs = 4; isLaneOp = true; break; 9751 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 9752 NumVecs = 1; isLoadOp = false; break; 9753 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 9754 NumVecs = 2; isLoadOp = false; break; 9755 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 9756 NumVecs = 3; isLoadOp = false; break; 9757 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 9758 NumVecs = 4; isLoadOp = false; break; 9759 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 9760 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 9761 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 9762 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 9763 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 9764 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 9765 } 9766 } else { 9767 isLaneOp = true; 9768 switch (N->getOpcode()) { 9769 default: llvm_unreachable("unexpected opcode for Neon base update"); 9770 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 9771 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 9772 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 9773 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 9774 NumVecs = 1; isLaneOp = false; break; 9775 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 9776 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 9777 } 9778 } 9779 9780 // Find the size of memory referenced by the load/store. 9781 EVT VecTy; 9782 if (isLoadOp) { 9783 VecTy = N->getValueType(0); 9784 } else if (isIntrinsic) { 9785 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 9786 } else { 9787 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 9788 VecTy = N->getOperand(1).getValueType(); 9789 } 9790 9791 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 9792 if (isLaneOp) 9793 NumBytes /= VecTy.getVectorNumElements(); 9794 9795 // If the increment is a constant, it must match the memory ref size. 9796 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 9797 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 9798 uint64_t IncVal = CInc->getZExtValue(); 9799 if (IncVal != NumBytes) 9800 continue; 9801 } else if (NumBytes >= 3 * 16) { 9802 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 9803 // separate instructions that make it harder to use a non-constant update. 9804 continue; 9805 } 9806 9807 // OK, we found an ADD we can fold into the base update. 9808 // Now, create a _UPD node, taking care of not breaking alignment. 9809 9810 EVT AlignedVecTy = VecTy; 9811 unsigned Alignment = MemN->getAlignment(); 9812 9813 // If this is a less-than-standard-aligned load/store, change the type to 9814 // match the standard alignment. 9815 // The alignment is overlooked when selecting _UPD variants; and it's 9816 // easier to introduce bitcasts here than fix that. 9817 // There are 3 ways to get to this base-update combine: 9818 // - intrinsics: they are assumed to be properly aligned (to the standard 9819 // alignment of the memory type), so we don't need to do anything. 9820 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 9821 // intrinsics, so, likewise, there's nothing to do. 9822 // - generic load/store instructions: the alignment is specified as an 9823 // explicit operand, rather than implicitly as the standard alignment 9824 // of the memory type (like the intrisics). We need to change the 9825 // memory type to match the explicit alignment. That way, we don't 9826 // generate non-standard-aligned ARMISD::VLDx nodes. 9827 if (isa<LSBaseSDNode>(N)) { 9828 if (Alignment == 0) 9829 Alignment = 1; 9830 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 9831 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 9832 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 9833 assert(!isLaneOp && "Unexpected generic load/store lane."); 9834 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 9835 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 9836 } 9837 // Don't set an explicit alignment on regular load/stores that we want 9838 // to transform to VLD/VST 1_UPD nodes. 9839 // This matches the behavior of regular load/stores, which only get an 9840 // explicit alignment if the MMO alignment is larger than the standard 9841 // alignment of the memory type. 9842 // Intrinsics, however, always get an explicit alignment, set to the 9843 // alignment of the MMO. 9844 Alignment = 1; 9845 } 9846 9847 // Create the new updating load/store node. 9848 // First, create an SDVTList for the new updating node's results. 9849 EVT Tys[6]; 9850 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 9851 unsigned n; 9852 for (n = 0; n < NumResultVecs; ++n) 9853 Tys[n] = AlignedVecTy; 9854 Tys[n++] = MVT::i32; 9855 Tys[n] = MVT::Other; 9856 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 9857 9858 // Then, gather the new node's operands. 9859 SmallVector<SDValue, 8> Ops; 9860 Ops.push_back(N->getOperand(0)); // incoming chain 9861 Ops.push_back(N->getOperand(AddrOpIdx)); 9862 Ops.push_back(Inc); 9863 9864 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 9865 // Try to match the intrinsic's signature 9866 Ops.push_back(StN->getValue()); 9867 } else { 9868 // Loads (and of course intrinsics) match the intrinsics' signature, 9869 // so just add all but the alignment operand. 9870 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) 9871 Ops.push_back(N->getOperand(i)); 9872 } 9873 9874 // For all node types, the alignment operand is always the last one. 9875 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 9876 9877 // If this is a non-standard-aligned STORE, the penultimate operand is the 9878 // stored value. Bitcast it to the aligned type. 9879 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 9880 SDValue &StVal = Ops[Ops.size()-2]; 9881 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 9882 } 9883 9884 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, 9885 Ops, AlignedVecTy, 9886 MemN->getMemOperand()); 9887 9888 // Update the uses. 9889 SmallVector<SDValue, 5> NewResults; 9890 for (unsigned i = 0; i < NumResultVecs; ++i) 9891 NewResults.push_back(SDValue(UpdN.getNode(), i)); 9892 9893 // If this is an non-standard-aligned LOAD, the first result is the loaded 9894 // value. Bitcast it to the expected result type. 9895 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 9896 SDValue &LdVal = NewResults[0]; 9897 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 9898 } 9899 9900 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 9901 DCI.CombineTo(N, NewResults); 9902 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 9903 9904 break; 9905 } 9906 return SDValue(); 9907 } 9908 9909 static SDValue PerformVLDCombine(SDNode *N, 9910 TargetLowering::DAGCombinerInfo &DCI) { 9911 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 9912 return SDValue(); 9913 9914 return CombineBaseUpdate(N, DCI); 9915 } 9916 9917 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 9918 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 9919 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 9920 /// return true. 9921 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9922 SelectionDAG &DAG = DCI.DAG; 9923 EVT VT = N->getValueType(0); 9924 // vldN-dup instructions only support 64-bit vectors for N > 1. 9925 if (!VT.is64BitVector()) 9926 return false; 9927 9928 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 9929 SDNode *VLD = N->getOperand(0).getNode(); 9930 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 9931 return false; 9932 unsigned NumVecs = 0; 9933 unsigned NewOpc = 0; 9934 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 9935 if (IntNo == Intrinsic::arm_neon_vld2lane) { 9936 NumVecs = 2; 9937 NewOpc = ARMISD::VLD2DUP; 9938 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 9939 NumVecs = 3; 9940 NewOpc = ARMISD::VLD3DUP; 9941 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 9942 NumVecs = 4; 9943 NewOpc = ARMISD::VLD4DUP; 9944 } else { 9945 return false; 9946 } 9947 9948 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 9949 // numbers match the load. 9950 unsigned VLDLaneNo = 9951 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 9952 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9953 UI != UE; ++UI) { 9954 // Ignore uses of the chain result. 9955 if (UI.getUse().getResNo() == NumVecs) 9956 continue; 9957 SDNode *User = *UI; 9958 if (User->getOpcode() != ARMISD::VDUPLANE || 9959 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 9960 return false; 9961 } 9962 9963 // Create the vldN-dup node. 9964 EVT Tys[5]; 9965 unsigned n; 9966 for (n = 0; n < NumVecs; ++n) 9967 Tys[n] = VT; 9968 Tys[n] = MVT::Other; 9969 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 9970 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 9971 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 9972 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 9973 Ops, VLDMemInt->getMemoryVT(), 9974 VLDMemInt->getMemOperand()); 9975 9976 // Update the uses. 9977 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9978 UI != UE; ++UI) { 9979 unsigned ResNo = UI.getUse().getResNo(); 9980 // Ignore uses of the chain result. 9981 if (ResNo == NumVecs) 9982 continue; 9983 SDNode *User = *UI; 9984 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 9985 } 9986 9987 // Now the vldN-lane intrinsic is dead except for its chain result. 9988 // Update uses of the chain. 9989 std::vector<SDValue> VLDDupResults; 9990 for (unsigned n = 0; n < NumVecs; ++n) 9991 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 9992 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 9993 DCI.CombineTo(VLD, VLDDupResults); 9994 9995 return true; 9996 } 9997 9998 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 9999 /// ARMISD::VDUPLANE. 10000 static SDValue PerformVDUPLANECombine(SDNode *N, 10001 TargetLowering::DAGCombinerInfo &DCI) { 10002 SDValue Op = N->getOperand(0); 10003 10004 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 10005 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 10006 if (CombineVLDDUP(N, DCI)) 10007 return SDValue(N, 0); 10008 10009 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 10010 // redundant. Ignore bit_converts for now; element sizes are checked below. 10011 while (Op.getOpcode() == ISD::BITCAST) 10012 Op = Op.getOperand(0); 10013 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 10014 return SDValue(); 10015 10016 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 10017 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 10018 // The canonical VMOV for a zero vector uses a 32-bit element size. 10019 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10020 unsigned EltBits; 10021 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 10022 EltSize = 8; 10023 EVT VT = N->getValueType(0); 10024 if (EltSize > VT.getVectorElementType().getSizeInBits()) 10025 return SDValue(); 10026 10027 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 10028 } 10029 10030 static SDValue PerformLOADCombine(SDNode *N, 10031 TargetLowering::DAGCombinerInfo &DCI) { 10032 EVT VT = N->getValueType(0); 10033 10034 // If this is a legal vector load, try to combine it into a VLD1_UPD. 10035 if (ISD::isNormalLoad(N) && VT.isVector() && 10036 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10037 return CombineBaseUpdate(N, DCI); 10038 10039 return SDValue(); 10040 } 10041 10042 /// PerformSTORECombine - Target-specific dag combine xforms for 10043 /// ISD::STORE. 10044 static SDValue PerformSTORECombine(SDNode *N, 10045 TargetLowering::DAGCombinerInfo &DCI) { 10046 StoreSDNode *St = cast<StoreSDNode>(N); 10047 if (St->isVolatile()) 10048 return SDValue(); 10049 10050 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 10051 // pack all of the elements in one place. Next, store to memory in fewer 10052 // chunks. 10053 SDValue StVal = St->getValue(); 10054 EVT VT = StVal.getValueType(); 10055 if (St->isTruncatingStore() && VT.isVector()) { 10056 SelectionDAG &DAG = DCI.DAG; 10057 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10058 EVT StVT = St->getMemoryVT(); 10059 unsigned NumElems = VT.getVectorNumElements(); 10060 assert(StVT != VT && "Cannot truncate to the same type"); 10061 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 10062 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 10063 10064 // From, To sizes and ElemCount must be pow of two 10065 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 10066 10067 // We are going to use the original vector elt for storing. 10068 // Accumulated smaller vector elements must be a multiple of the store size. 10069 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 10070 10071 unsigned SizeRatio = FromEltSz / ToEltSz; 10072 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 10073 10074 // Create a type on which we perform the shuffle. 10075 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 10076 NumElems*SizeRatio); 10077 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 10078 10079 SDLoc DL(St); 10080 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 10081 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 10082 for (unsigned i = 0; i < NumElems; ++i) 10083 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() 10084 ? (i + 1) * SizeRatio - 1 10085 : i * SizeRatio; 10086 10087 // Can't shuffle using an illegal type. 10088 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 10089 10090 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 10091 DAG.getUNDEF(WideVec.getValueType()), 10092 ShuffleVec.data()); 10093 // At this point all of the data is stored at the bottom of the 10094 // register. We now need to save it to mem. 10095 10096 // Find the largest store unit 10097 MVT StoreType = MVT::i8; 10098 for (MVT Tp : MVT::integer_valuetypes()) { 10099 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 10100 StoreType = Tp; 10101 } 10102 // Didn't find a legal store type. 10103 if (!TLI.isTypeLegal(StoreType)) 10104 return SDValue(); 10105 10106 // Bitcast the original vector into a vector of store-size units 10107 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 10108 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 10109 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 10110 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 10111 SmallVector<SDValue, 8> Chains; 10112 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 10113 TLI.getPointerTy(DAG.getDataLayout())); 10114 SDValue BasePtr = St->getBasePtr(); 10115 10116 // Perform one or more big stores into memory. 10117 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 10118 for (unsigned I = 0; I < E; I++) { 10119 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 10120 StoreType, ShuffWide, 10121 DAG.getIntPtrConstant(I, DL)); 10122 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 10123 St->getPointerInfo(), St->isVolatile(), 10124 St->isNonTemporal(), St->getAlignment()); 10125 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 10126 Increment); 10127 Chains.push_back(Ch); 10128 } 10129 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 10130 } 10131 10132 if (!ISD::isNormalStore(St)) 10133 return SDValue(); 10134 10135 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 10136 // ARM stores of arguments in the same cache line. 10137 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 10138 StVal.getNode()->hasOneUse()) { 10139 SelectionDAG &DAG = DCI.DAG; 10140 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 10141 SDLoc DL(St); 10142 SDValue BasePtr = St->getBasePtr(); 10143 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 10144 StVal.getNode()->getOperand(isBigEndian ? 1 : 0 ), 10145 BasePtr, St->getPointerInfo(), St->isVolatile(), 10146 St->isNonTemporal(), St->getAlignment()); 10147 10148 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 10149 DAG.getConstant(4, DL, MVT::i32)); 10150 return DAG.getStore(NewST1.getValue(0), DL, 10151 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 10152 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 10153 St->isNonTemporal(), 10154 std::min(4U, St->getAlignment() / 2)); 10155 } 10156 10157 if (StVal.getValueType() == MVT::i64 && 10158 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 10159 10160 // Bitcast an i64 store extracted from a vector to f64. 10161 // Otherwise, the i64 value will be legalized to a pair of i32 values. 10162 SelectionDAG &DAG = DCI.DAG; 10163 SDLoc dl(StVal); 10164 SDValue IntVec = StVal.getOperand(0); 10165 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 10166 IntVec.getValueType().getVectorNumElements()); 10167 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 10168 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 10169 Vec, StVal.getOperand(1)); 10170 dl = SDLoc(N); 10171 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 10172 // Make the DAGCombiner fold the bitcasts. 10173 DCI.AddToWorklist(Vec.getNode()); 10174 DCI.AddToWorklist(ExtElt.getNode()); 10175 DCI.AddToWorklist(V.getNode()); 10176 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 10177 St->getPointerInfo(), St->isVolatile(), 10178 St->isNonTemporal(), St->getAlignment(), 10179 St->getAAInfo()); 10180 } 10181 10182 // If this is a legal vector store, try to combine it into a VST1_UPD. 10183 if (ISD::isNormalStore(N) && VT.isVector() && 10184 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10185 return CombineBaseUpdate(N, DCI); 10186 10187 return SDValue(); 10188 } 10189 10190 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 10191 /// can replace combinations of VMUL and VCVT (floating-point to integer) 10192 /// when the VMUL has a constant operand that is a power of 2. 10193 /// 10194 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10195 /// vmul.f32 d16, d17, d16 10196 /// vcvt.s32.f32 d16, d16 10197 /// becomes: 10198 /// vcvt.s32.f32 d16, d16, #3 10199 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 10200 const ARMSubtarget *Subtarget) { 10201 if (!Subtarget->hasNEON()) 10202 return SDValue(); 10203 10204 SDValue Op = N->getOperand(0); 10205 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 10206 Op.getOpcode() != ISD::FMUL) 10207 return SDValue(); 10208 10209 SDValue ConstVec = Op->getOperand(1); 10210 if (!isa<BuildVectorSDNode>(ConstVec)) 10211 return SDValue(); 10212 10213 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 10214 uint32_t FloatBits = FloatTy.getSizeInBits(); 10215 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 10216 uint32_t IntBits = IntTy.getSizeInBits(); 10217 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10218 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10219 // These instructions only exist converting from f32 to i32. We can handle 10220 // smaller integers by generating an extra truncate, but larger ones would 10221 // be lossy. We also can't handle more then 4 lanes, since these intructions 10222 // only support v2i32/v4i32 types. 10223 return SDValue(); 10224 } 10225 10226 BitVector UndefElements; 10227 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10228 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10229 if (C == -1 || C == 0 || C > 32) 10230 return SDValue(); 10231 10232 SDLoc dl(N); 10233 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 10234 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 10235 Intrinsic::arm_neon_vcvtfp2fxu; 10236 SDValue FixConv = DAG.getNode( 10237 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10238 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 10239 DAG.getConstant(C, dl, MVT::i32)); 10240 10241 if (IntBits < FloatBits) 10242 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 10243 10244 return FixConv; 10245 } 10246 10247 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 10248 /// can replace combinations of VCVT (integer to floating-point) and VDIV 10249 /// when the VDIV has a constant operand that is a power of 2. 10250 /// 10251 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10252 /// vcvt.f32.s32 d16, d16 10253 /// vdiv.f32 d16, d17, d16 10254 /// becomes: 10255 /// vcvt.f32.s32 d16, d16, #3 10256 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 10257 const ARMSubtarget *Subtarget) { 10258 if (!Subtarget->hasNEON()) 10259 return SDValue(); 10260 10261 SDValue Op = N->getOperand(0); 10262 unsigned OpOpcode = Op.getNode()->getOpcode(); 10263 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 10264 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 10265 return SDValue(); 10266 10267 SDValue ConstVec = N->getOperand(1); 10268 if (!isa<BuildVectorSDNode>(ConstVec)) 10269 return SDValue(); 10270 10271 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 10272 uint32_t FloatBits = FloatTy.getSizeInBits(); 10273 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 10274 uint32_t IntBits = IntTy.getSizeInBits(); 10275 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10276 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10277 // These instructions only exist converting from i32 to f32. We can handle 10278 // smaller integers by generating an extra extend, but larger ones would 10279 // be lossy. We also can't handle more then 4 lanes, since these intructions 10280 // only support v2i32/v4i32 types. 10281 return SDValue(); 10282 } 10283 10284 BitVector UndefElements; 10285 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10286 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10287 if (C == -1 || C == 0 || C > 32) 10288 return SDValue(); 10289 10290 SDLoc dl(N); 10291 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 10292 SDValue ConvInput = Op.getOperand(0); 10293 if (IntBits < FloatBits) 10294 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 10295 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10296 ConvInput); 10297 10298 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 10299 Intrinsic::arm_neon_vcvtfxu2fp; 10300 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 10301 Op.getValueType(), 10302 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 10303 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 10304 } 10305 10306 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 10307 /// operand of a vector shift operation, where all the elements of the 10308 /// build_vector must have the same constant integer value. 10309 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 10310 // Ignore bit_converts. 10311 while (Op.getOpcode() == ISD::BITCAST) 10312 Op = Op.getOperand(0); 10313 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 10314 APInt SplatBits, SplatUndef; 10315 unsigned SplatBitSize; 10316 bool HasAnyUndefs; 10317 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 10318 HasAnyUndefs, ElementBits) || 10319 SplatBitSize > ElementBits) 10320 return false; 10321 Cnt = SplatBits.getSExtValue(); 10322 return true; 10323 } 10324 10325 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 10326 /// operand of a vector shift left operation. That value must be in the range: 10327 /// 0 <= Value < ElementBits for a left shift; or 10328 /// 0 <= Value <= ElementBits for a long left shift. 10329 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 10330 assert(VT.isVector() && "vector shift count is not a vector type"); 10331 int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); 10332 if (! getVShiftImm(Op, ElementBits, Cnt)) 10333 return false; 10334 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 10335 } 10336 10337 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 10338 /// operand of a vector shift right operation. For a shift opcode, the value 10339 /// is positive, but for an intrinsic the value count must be negative. The 10340 /// absolute value must be in the range: 10341 /// 1 <= |Value| <= ElementBits for a right shift; or 10342 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 10343 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 10344 int64_t &Cnt) { 10345 assert(VT.isVector() && "vector shift count is not a vector type"); 10346 int64_t ElementBits = VT.getVectorElementType().getSizeInBits(); 10347 if (! getVShiftImm(Op, ElementBits, Cnt)) 10348 return false; 10349 if (!isIntrinsic) 10350 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 10351 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { 10352 Cnt = -Cnt; 10353 return true; 10354 } 10355 return false; 10356 } 10357 10358 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 10359 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 10360 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10361 switch (IntNo) { 10362 default: 10363 // Don't do anything for most intrinsics. 10364 break; 10365 10366 // Vector shifts: check for immediate versions and lower them. 10367 // Note: This is done during DAG combining instead of DAG legalizing because 10368 // the build_vectors for 64-bit vector element shift counts are generally 10369 // not legal, and it is hard to see their values after they get legalized to 10370 // loads from a constant pool. 10371 case Intrinsic::arm_neon_vshifts: 10372 case Intrinsic::arm_neon_vshiftu: 10373 case Intrinsic::arm_neon_vrshifts: 10374 case Intrinsic::arm_neon_vrshiftu: 10375 case Intrinsic::arm_neon_vrshiftn: 10376 case Intrinsic::arm_neon_vqshifts: 10377 case Intrinsic::arm_neon_vqshiftu: 10378 case Intrinsic::arm_neon_vqshiftsu: 10379 case Intrinsic::arm_neon_vqshiftns: 10380 case Intrinsic::arm_neon_vqshiftnu: 10381 case Intrinsic::arm_neon_vqshiftnsu: 10382 case Intrinsic::arm_neon_vqrshiftns: 10383 case Intrinsic::arm_neon_vqrshiftnu: 10384 case Intrinsic::arm_neon_vqrshiftnsu: { 10385 EVT VT = N->getOperand(1).getValueType(); 10386 int64_t Cnt; 10387 unsigned VShiftOpc = 0; 10388 10389 switch (IntNo) { 10390 case Intrinsic::arm_neon_vshifts: 10391 case Intrinsic::arm_neon_vshiftu: 10392 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 10393 VShiftOpc = ARMISD::VSHL; 10394 break; 10395 } 10396 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 10397 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 10398 ARMISD::VSHRs : ARMISD::VSHRu); 10399 break; 10400 } 10401 return SDValue(); 10402 10403 case Intrinsic::arm_neon_vrshifts: 10404 case Intrinsic::arm_neon_vrshiftu: 10405 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 10406 break; 10407 return SDValue(); 10408 10409 case Intrinsic::arm_neon_vqshifts: 10410 case Intrinsic::arm_neon_vqshiftu: 10411 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10412 break; 10413 return SDValue(); 10414 10415 case Intrinsic::arm_neon_vqshiftsu: 10416 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10417 break; 10418 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 10419 10420 case Intrinsic::arm_neon_vrshiftn: 10421 case Intrinsic::arm_neon_vqshiftns: 10422 case Intrinsic::arm_neon_vqshiftnu: 10423 case Intrinsic::arm_neon_vqshiftnsu: 10424 case Intrinsic::arm_neon_vqrshiftns: 10425 case Intrinsic::arm_neon_vqrshiftnu: 10426 case Intrinsic::arm_neon_vqrshiftnsu: 10427 // Narrowing shifts require an immediate right shift. 10428 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 10429 break; 10430 llvm_unreachable("invalid shift count for narrowing vector shift " 10431 "intrinsic"); 10432 10433 default: 10434 llvm_unreachable("unhandled vector shift"); 10435 } 10436 10437 switch (IntNo) { 10438 case Intrinsic::arm_neon_vshifts: 10439 case Intrinsic::arm_neon_vshiftu: 10440 // Opcode already set above. 10441 break; 10442 case Intrinsic::arm_neon_vrshifts: 10443 VShiftOpc = ARMISD::VRSHRs; break; 10444 case Intrinsic::arm_neon_vrshiftu: 10445 VShiftOpc = ARMISD::VRSHRu; break; 10446 case Intrinsic::arm_neon_vrshiftn: 10447 VShiftOpc = ARMISD::VRSHRN; break; 10448 case Intrinsic::arm_neon_vqshifts: 10449 VShiftOpc = ARMISD::VQSHLs; break; 10450 case Intrinsic::arm_neon_vqshiftu: 10451 VShiftOpc = ARMISD::VQSHLu; break; 10452 case Intrinsic::arm_neon_vqshiftsu: 10453 VShiftOpc = ARMISD::VQSHLsu; break; 10454 case Intrinsic::arm_neon_vqshiftns: 10455 VShiftOpc = ARMISD::VQSHRNs; break; 10456 case Intrinsic::arm_neon_vqshiftnu: 10457 VShiftOpc = ARMISD::VQSHRNu; break; 10458 case Intrinsic::arm_neon_vqshiftnsu: 10459 VShiftOpc = ARMISD::VQSHRNsu; break; 10460 case Intrinsic::arm_neon_vqrshiftns: 10461 VShiftOpc = ARMISD::VQRSHRNs; break; 10462 case Intrinsic::arm_neon_vqrshiftnu: 10463 VShiftOpc = ARMISD::VQRSHRNu; break; 10464 case Intrinsic::arm_neon_vqrshiftnsu: 10465 VShiftOpc = ARMISD::VQRSHRNsu; break; 10466 } 10467 10468 SDLoc dl(N); 10469 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 10470 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 10471 } 10472 10473 case Intrinsic::arm_neon_vshiftins: { 10474 EVT VT = N->getOperand(1).getValueType(); 10475 int64_t Cnt; 10476 unsigned VShiftOpc = 0; 10477 10478 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 10479 VShiftOpc = ARMISD::VSLI; 10480 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 10481 VShiftOpc = ARMISD::VSRI; 10482 else { 10483 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 10484 } 10485 10486 SDLoc dl(N); 10487 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 10488 N->getOperand(1), N->getOperand(2), 10489 DAG.getConstant(Cnt, dl, MVT::i32)); 10490 } 10491 10492 case Intrinsic::arm_neon_vqrshifts: 10493 case Intrinsic::arm_neon_vqrshiftu: 10494 // No immediate versions of these to check for. 10495 break; 10496 } 10497 10498 return SDValue(); 10499 } 10500 10501 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 10502 /// lowers them. As with the vector shift intrinsics, this is done during DAG 10503 /// combining instead of DAG legalizing because the build_vectors for 64-bit 10504 /// vector element shift counts are generally not legal, and it is hard to see 10505 /// their values after they get legalized to loads from a constant pool. 10506 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 10507 const ARMSubtarget *ST) { 10508 EVT VT = N->getValueType(0); 10509 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 10510 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 10511 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 10512 SDValue N1 = N->getOperand(1); 10513 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 10514 SDValue N0 = N->getOperand(0); 10515 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 10516 DAG.MaskedValueIsZero(N0.getOperand(0), 10517 APInt::getHighBitsSet(32, 16))) 10518 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 10519 } 10520 } 10521 10522 // Nothing to be done for scalar shifts. 10523 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10524 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 10525 return SDValue(); 10526 10527 assert(ST->hasNEON() && "unexpected vector shift"); 10528 int64_t Cnt; 10529 10530 switch (N->getOpcode()) { 10531 default: llvm_unreachable("unexpected shift opcode"); 10532 10533 case ISD::SHL: 10534 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 10535 SDLoc dl(N); 10536 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), 10537 DAG.getConstant(Cnt, dl, MVT::i32)); 10538 } 10539 break; 10540 10541 case ISD::SRA: 10542 case ISD::SRL: 10543 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 10544 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 10545 ARMISD::VSHRs : ARMISD::VSHRu); 10546 SDLoc dl(N); 10547 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 10548 DAG.getConstant(Cnt, dl, MVT::i32)); 10549 } 10550 } 10551 return SDValue(); 10552 } 10553 10554 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 10555 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 10556 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 10557 const ARMSubtarget *ST) { 10558 SDValue N0 = N->getOperand(0); 10559 10560 // Check for sign- and zero-extensions of vector extract operations of 8- 10561 // and 16-bit vector elements. NEON supports these directly. They are 10562 // handled during DAG combining because type legalization will promote them 10563 // to 32-bit types and it is messy to recognize the operations after that. 10564 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 10565 SDValue Vec = N0.getOperand(0); 10566 SDValue Lane = N0.getOperand(1); 10567 EVT VT = N->getValueType(0); 10568 EVT EltVT = N0.getValueType(); 10569 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10570 10571 if (VT == MVT::i32 && 10572 (EltVT == MVT::i8 || EltVT == MVT::i16) && 10573 TLI.isTypeLegal(Vec.getValueType()) && 10574 isa<ConstantSDNode>(Lane)) { 10575 10576 unsigned Opc = 0; 10577 switch (N->getOpcode()) { 10578 default: llvm_unreachable("unexpected opcode"); 10579 case ISD::SIGN_EXTEND: 10580 Opc = ARMISD::VGETLANEs; 10581 break; 10582 case ISD::ZERO_EXTEND: 10583 case ISD::ANY_EXTEND: 10584 Opc = ARMISD::VGETLANEu; 10585 break; 10586 } 10587 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 10588 } 10589 } 10590 10591 return SDValue(); 10592 } 10593 10594 static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero, 10595 APInt &KnownOne) { 10596 if (Op.getOpcode() == ARMISD::BFI) { 10597 // Conservatively, we can recurse down the first operand 10598 // and just mask out all affected bits. 10599 computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne); 10600 10601 // The operand to BFI is already a mask suitable for removing the bits it 10602 // sets. 10603 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 10604 const APInt &Mask = CI->getAPIntValue(); 10605 KnownZero &= Mask; 10606 KnownOne &= Mask; 10607 return; 10608 } 10609 if (Op.getOpcode() == ARMISD::CMOV) { 10610 APInt KZ2(KnownZero.getBitWidth(), 0); 10611 APInt KO2(KnownOne.getBitWidth(), 0); 10612 computeKnownBits(DAG, Op.getOperand(1), KnownZero, KnownOne); 10613 computeKnownBits(DAG, Op.getOperand(2), KZ2, KO2); 10614 10615 KnownZero &= KZ2; 10616 KnownOne &= KO2; 10617 return; 10618 } 10619 return DAG.computeKnownBits(Op, KnownZero, KnownOne); 10620 } 10621 10622 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 10623 // If we have a CMOV, OR and AND combination such as: 10624 // if (x & CN) 10625 // y |= CM; 10626 // 10627 // And: 10628 // * CN is a single bit; 10629 // * All bits covered by CM are known zero in y 10630 // 10631 // Then we can convert this into a sequence of BFI instructions. This will 10632 // always be a win if CM is a single bit, will always be no worse than the 10633 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 10634 // three bits (due to the extra IT instruction). 10635 10636 SDValue Op0 = CMOV->getOperand(0); 10637 SDValue Op1 = CMOV->getOperand(1); 10638 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 10639 auto CC = CCNode->getAPIntValue().getLimitedValue(); 10640 SDValue CmpZ = CMOV->getOperand(4); 10641 10642 // The compare must be against zero. 10643 if (!isNullConstant(CmpZ->getOperand(1))) 10644 return SDValue(); 10645 10646 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 10647 SDValue And = CmpZ->getOperand(0); 10648 if (And->getOpcode() != ISD::AND) 10649 return SDValue(); 10650 ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1)); 10651 if (!AndC || !AndC->getAPIntValue().isPowerOf2()) 10652 return SDValue(); 10653 SDValue X = And->getOperand(0); 10654 10655 if (CC == ARMCC::EQ) { 10656 // We're performing an "equal to zero" compare. Swap the operands so we 10657 // canonicalize on a "not equal to zero" compare. 10658 std::swap(Op0, Op1); 10659 } else { 10660 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 10661 } 10662 10663 if (Op1->getOpcode() != ISD::OR) 10664 return SDValue(); 10665 10666 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 10667 if (!OrC) 10668 return SDValue(); 10669 SDValue Y = Op1->getOperand(0); 10670 10671 if (Op0 != Y) 10672 return SDValue(); 10673 10674 // Now, is it profitable to continue? 10675 APInt OrCI = OrC->getAPIntValue(); 10676 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 10677 if (OrCI.countPopulation() > Heuristic) 10678 return SDValue(); 10679 10680 // Lastly, can we determine that the bits defined by OrCI 10681 // are zero in Y? 10682 APInt KnownZero, KnownOne; 10683 computeKnownBits(DAG, Y, KnownZero, KnownOne); 10684 if ((OrCI & KnownZero) != OrCI) 10685 return SDValue(); 10686 10687 // OK, we can do the combine. 10688 SDValue V = Y; 10689 SDLoc dl(X); 10690 EVT VT = X.getValueType(); 10691 unsigned BitInX = AndC->getAPIntValue().logBase2(); 10692 10693 if (BitInX != 0) { 10694 // We must shift X first. 10695 X = DAG.getNode(ISD::SRL, dl, VT, X, 10696 DAG.getConstant(BitInX, dl, VT)); 10697 } 10698 10699 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 10700 BitInY < NumActiveBits; ++BitInY) { 10701 if (OrCI[BitInY] == 0) 10702 continue; 10703 APInt Mask(VT.getSizeInBits(), 0); 10704 Mask.setBit(BitInY); 10705 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 10706 // Confusingly, the operand is an *inverted* mask. 10707 DAG.getConstant(~Mask, dl, VT)); 10708 } 10709 10710 return V; 10711 } 10712 10713 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 10714 SDValue 10715 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 10716 SDValue Cmp = N->getOperand(4); 10717 if (Cmp.getOpcode() != ARMISD::CMPZ) 10718 // Only looking at NE cases. 10719 return SDValue(); 10720 10721 EVT VT = N->getValueType(0); 10722 SDLoc dl(N); 10723 SDValue LHS = Cmp.getOperand(0); 10724 SDValue RHS = Cmp.getOperand(1); 10725 SDValue Chain = N->getOperand(0); 10726 SDValue BB = N->getOperand(1); 10727 SDValue ARMcc = N->getOperand(2); 10728 ARMCC::CondCodes CC = 10729 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 10730 10731 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 10732 // -> (brcond Chain BB CC CPSR Cmp) 10733 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 10734 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 10735 LHS->getOperand(0)->hasOneUse()) { 10736 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 10737 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 10738 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 10739 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 10740 if ((LHS00C && LHS00C->getZExtValue() == 0) && 10741 (LHS01C && LHS01C->getZExtValue() == 1) && 10742 (LHS1C && LHS1C->getZExtValue() == 1) && 10743 (RHSC && RHSC->getZExtValue() == 0)) { 10744 return DAG.getNode( 10745 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 10746 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 10747 } 10748 } 10749 10750 return SDValue(); 10751 } 10752 10753 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 10754 SDValue 10755 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 10756 SDValue Cmp = N->getOperand(4); 10757 if (Cmp.getOpcode() != ARMISD::CMPZ) 10758 // Only looking at EQ and NE cases. 10759 return SDValue(); 10760 10761 EVT VT = N->getValueType(0); 10762 SDLoc dl(N); 10763 SDValue LHS = Cmp.getOperand(0); 10764 SDValue RHS = Cmp.getOperand(1); 10765 SDValue FalseVal = N->getOperand(0); 10766 SDValue TrueVal = N->getOperand(1); 10767 SDValue ARMcc = N->getOperand(2); 10768 ARMCC::CondCodes CC = 10769 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 10770 10771 // BFI is only available on V6T2+. 10772 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 10773 SDValue R = PerformCMOVToBFICombine(N, DAG); 10774 if (R) 10775 return R; 10776 } 10777 10778 // Simplify 10779 // mov r1, r0 10780 // cmp r1, x 10781 // mov r0, y 10782 // moveq r0, x 10783 // to 10784 // cmp r0, x 10785 // movne r0, y 10786 // 10787 // mov r1, r0 10788 // cmp r1, x 10789 // mov r0, x 10790 // movne r0, y 10791 // to 10792 // cmp r0, x 10793 // movne r0, y 10794 /// FIXME: Turn this into a target neutral optimization? 10795 SDValue Res; 10796 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 10797 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 10798 N->getOperand(3), Cmp); 10799 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 10800 SDValue ARMcc; 10801 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 10802 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 10803 N->getOperand(3), NewCmp); 10804 } 10805 10806 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 10807 // -> (cmov F T CC CPSR Cmp) 10808 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 10809 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 10810 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 10811 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 10812 if ((LHS0C && LHS0C->getZExtValue() == 0) && 10813 (LHS1C && LHS1C->getZExtValue() == 1) && 10814 (RHSC && RHSC->getZExtValue() == 0)) { 10815 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 10816 LHS->getOperand(2), LHS->getOperand(3), 10817 LHS->getOperand(4)); 10818 } 10819 } 10820 10821 if (Res.getNode()) { 10822 APInt KnownZero, KnownOne; 10823 DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne); 10824 // Capture demanded bits information that would be otherwise lost. 10825 if (KnownZero == 0xfffffffe) 10826 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10827 DAG.getValueType(MVT::i1)); 10828 else if (KnownZero == 0xffffff00) 10829 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10830 DAG.getValueType(MVT::i8)); 10831 else if (KnownZero == 0xffff0000) 10832 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10833 DAG.getValueType(MVT::i16)); 10834 } 10835 10836 return Res; 10837 } 10838 10839 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 10840 DAGCombinerInfo &DCI) const { 10841 switch (N->getOpcode()) { 10842 default: break; 10843 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 10844 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 10845 case ISD::SUB: return PerformSUBCombine(N, DCI); 10846 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 10847 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 10848 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 10849 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 10850 case ARMISD::BFI: return PerformBFICombine(N, DCI); 10851 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 10852 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 10853 case ISD::STORE: return PerformSTORECombine(N, DCI); 10854 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 10855 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 10856 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 10857 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 10858 case ISD::FP_TO_SINT: 10859 case ISD::FP_TO_UINT: 10860 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 10861 case ISD::FDIV: 10862 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 10863 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 10864 case ISD::SHL: 10865 case ISD::SRA: 10866 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 10867 case ISD::SIGN_EXTEND: 10868 case ISD::ZERO_EXTEND: 10869 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 10870 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 10871 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 10872 case ISD::LOAD: return PerformLOADCombine(N, DCI); 10873 case ARMISD::VLD2DUP: 10874 case ARMISD::VLD3DUP: 10875 case ARMISD::VLD4DUP: 10876 return PerformVLDCombine(N, DCI); 10877 case ARMISD::BUILD_VECTOR: 10878 return PerformARMBUILD_VECTORCombine(N, DCI); 10879 case ISD::INTRINSIC_VOID: 10880 case ISD::INTRINSIC_W_CHAIN: 10881 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10882 case Intrinsic::arm_neon_vld1: 10883 case Intrinsic::arm_neon_vld2: 10884 case Intrinsic::arm_neon_vld3: 10885 case Intrinsic::arm_neon_vld4: 10886 case Intrinsic::arm_neon_vld2lane: 10887 case Intrinsic::arm_neon_vld3lane: 10888 case Intrinsic::arm_neon_vld4lane: 10889 case Intrinsic::arm_neon_vst1: 10890 case Intrinsic::arm_neon_vst2: 10891 case Intrinsic::arm_neon_vst3: 10892 case Intrinsic::arm_neon_vst4: 10893 case Intrinsic::arm_neon_vst2lane: 10894 case Intrinsic::arm_neon_vst3lane: 10895 case Intrinsic::arm_neon_vst4lane: 10896 return PerformVLDCombine(N, DCI); 10897 default: break; 10898 } 10899 break; 10900 } 10901 return SDValue(); 10902 } 10903 10904 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 10905 EVT VT) const { 10906 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 10907 } 10908 10909 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 10910 unsigned, 10911 unsigned, 10912 bool *Fast) const { 10913 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 10914 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 10915 10916 switch (VT.getSimpleVT().SimpleTy) { 10917 default: 10918 return false; 10919 case MVT::i8: 10920 case MVT::i16: 10921 case MVT::i32: { 10922 // Unaligned access can use (for example) LRDB, LRDH, LDR 10923 if (AllowsUnaligned) { 10924 if (Fast) 10925 *Fast = Subtarget->hasV7Ops(); 10926 return true; 10927 } 10928 return false; 10929 } 10930 case MVT::f64: 10931 case MVT::v2f64: { 10932 // For any little-endian targets with neon, we can support unaligned ld/st 10933 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 10934 // A big-endian target may also explicitly support unaligned accesses 10935 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 10936 if (Fast) 10937 *Fast = true; 10938 return true; 10939 } 10940 return false; 10941 } 10942 } 10943 } 10944 10945 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 10946 unsigned AlignCheck) { 10947 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 10948 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 10949 } 10950 10951 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 10952 unsigned DstAlign, unsigned SrcAlign, 10953 bool IsMemset, bool ZeroMemset, 10954 bool MemcpyStrSrc, 10955 MachineFunction &MF) const { 10956 const Function *F = MF.getFunction(); 10957 10958 // See if we can use NEON instructions for this... 10959 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && 10960 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10961 bool Fast; 10962 if (Size >= 16 && 10963 (memOpAlign(SrcAlign, DstAlign, 16) || 10964 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { 10965 return MVT::v2f64; 10966 } else if (Size >= 8 && 10967 (memOpAlign(SrcAlign, DstAlign, 8) || 10968 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && 10969 Fast))) { 10970 return MVT::f64; 10971 } 10972 } 10973 10974 // Lowering to i32/i16 if the size permits. 10975 if (Size >= 4) 10976 return MVT::i32; 10977 else if (Size >= 2) 10978 return MVT::i16; 10979 10980 // Let the target-independent logic figure it out. 10981 return MVT::Other; 10982 } 10983 10984 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 10985 if (Val.getOpcode() != ISD::LOAD) 10986 return false; 10987 10988 EVT VT1 = Val.getValueType(); 10989 if (!VT1.isSimple() || !VT1.isInteger() || 10990 !VT2.isSimple() || !VT2.isInteger()) 10991 return false; 10992 10993 switch (VT1.getSimpleVT().SimpleTy) { 10994 default: break; 10995 case MVT::i1: 10996 case MVT::i8: 10997 case MVT::i16: 10998 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 10999 return true; 11000 } 11001 11002 return false; 11003 } 11004 11005 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 11006 EVT VT = ExtVal.getValueType(); 11007 11008 if (!isTypeLegal(VT)) 11009 return false; 11010 11011 // Don't create a loadext if we can fold the extension into a wide/long 11012 // instruction. 11013 // If there's more than one user instruction, the loadext is desirable no 11014 // matter what. There can be two uses by the same instruction. 11015 if (ExtVal->use_empty() || 11016 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 11017 return true; 11018 11019 SDNode *U = *ExtVal->use_begin(); 11020 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 11021 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) 11022 return false; 11023 11024 return true; 11025 } 11026 11027 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 11028 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11029 return false; 11030 11031 if (!isTypeLegal(EVT::getEVT(Ty1))) 11032 return false; 11033 11034 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 11035 11036 // Assuming the caller doesn't have a zeroext or signext return parameter, 11037 // truncation all the way down to i1 is valid. 11038 return true; 11039 } 11040 11041 11042 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 11043 if (V < 0) 11044 return false; 11045 11046 unsigned Scale = 1; 11047 switch (VT.getSimpleVT().SimpleTy) { 11048 default: return false; 11049 case MVT::i1: 11050 case MVT::i8: 11051 // Scale == 1; 11052 break; 11053 case MVT::i16: 11054 // Scale == 2; 11055 Scale = 2; 11056 break; 11057 case MVT::i32: 11058 // Scale == 4; 11059 Scale = 4; 11060 break; 11061 } 11062 11063 if ((V & (Scale - 1)) != 0) 11064 return false; 11065 V /= Scale; 11066 return V == (V & ((1LL << 5) - 1)); 11067 } 11068 11069 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 11070 const ARMSubtarget *Subtarget) { 11071 bool isNeg = false; 11072 if (V < 0) { 11073 isNeg = true; 11074 V = - V; 11075 } 11076 11077 switch (VT.getSimpleVT().SimpleTy) { 11078 default: return false; 11079 case MVT::i1: 11080 case MVT::i8: 11081 case MVT::i16: 11082 case MVT::i32: 11083 // + imm12 or - imm8 11084 if (isNeg) 11085 return V == (V & ((1LL << 8) - 1)); 11086 return V == (V & ((1LL << 12) - 1)); 11087 case MVT::f32: 11088 case MVT::f64: 11089 // Same as ARM mode. FIXME: NEON? 11090 if (!Subtarget->hasVFP2()) 11091 return false; 11092 if ((V & 3) != 0) 11093 return false; 11094 V >>= 2; 11095 return V == (V & ((1LL << 8) - 1)); 11096 } 11097 } 11098 11099 /// isLegalAddressImmediate - Return true if the integer value can be used 11100 /// as the offset of the target addressing mode for load / store of the 11101 /// given type. 11102 static bool isLegalAddressImmediate(int64_t V, EVT VT, 11103 const ARMSubtarget *Subtarget) { 11104 if (V == 0) 11105 return true; 11106 11107 if (!VT.isSimple()) 11108 return false; 11109 11110 if (Subtarget->isThumb1Only()) 11111 return isLegalT1AddressImmediate(V, VT); 11112 else if (Subtarget->isThumb2()) 11113 return isLegalT2AddressImmediate(V, VT, Subtarget); 11114 11115 // ARM mode. 11116 if (V < 0) 11117 V = - V; 11118 switch (VT.getSimpleVT().SimpleTy) { 11119 default: return false; 11120 case MVT::i1: 11121 case MVT::i8: 11122 case MVT::i32: 11123 // +- imm12 11124 return V == (V & ((1LL << 12) - 1)); 11125 case MVT::i16: 11126 // +- imm8 11127 return V == (V & ((1LL << 8) - 1)); 11128 case MVT::f32: 11129 case MVT::f64: 11130 if (!Subtarget->hasVFP2()) // FIXME: NEON? 11131 return false; 11132 if ((V & 3) != 0) 11133 return false; 11134 V >>= 2; 11135 return V == (V & ((1LL << 8) - 1)); 11136 } 11137 } 11138 11139 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 11140 EVT VT) const { 11141 int Scale = AM.Scale; 11142 if (Scale < 0) 11143 return false; 11144 11145 switch (VT.getSimpleVT().SimpleTy) { 11146 default: return false; 11147 case MVT::i1: 11148 case MVT::i8: 11149 case MVT::i16: 11150 case MVT::i32: 11151 if (Scale == 1) 11152 return true; 11153 // r + r << imm 11154 Scale = Scale & ~1; 11155 return Scale == 2 || Scale == 4 || Scale == 8; 11156 case MVT::i64: 11157 // r + r 11158 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11159 return true; 11160 return false; 11161 case MVT::isVoid: 11162 // Note, we allow "void" uses (basically, uses that aren't loads or 11163 // stores), because arm allows folding a scale into many arithmetic 11164 // operations. This should be made more precise and revisited later. 11165 11166 // Allow r << imm, but the imm has to be a multiple of two. 11167 if (Scale & 1) return false; 11168 return isPowerOf2_32(Scale); 11169 } 11170 } 11171 11172 /// isLegalAddressingMode - Return true if the addressing mode represented 11173 /// by AM is legal for this target, for a load/store of the specified type. 11174 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11175 const AddrMode &AM, Type *Ty, 11176 unsigned AS) const { 11177 EVT VT = getValueType(DL, Ty, true); 11178 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 11179 return false; 11180 11181 // Can never fold addr of global into load/store. 11182 if (AM.BaseGV) 11183 return false; 11184 11185 switch (AM.Scale) { 11186 case 0: // no scale reg, must be "r+i" or "r", or "i". 11187 break; 11188 case 1: 11189 if (Subtarget->isThumb1Only()) 11190 return false; 11191 // FALL THROUGH. 11192 default: 11193 // ARM doesn't support any R+R*scale+imm addr modes. 11194 if (AM.BaseOffs) 11195 return false; 11196 11197 if (!VT.isSimple()) 11198 return false; 11199 11200 if (Subtarget->isThumb2()) 11201 return isLegalT2ScaledAddressingMode(AM, VT); 11202 11203 int Scale = AM.Scale; 11204 switch (VT.getSimpleVT().SimpleTy) { 11205 default: return false; 11206 case MVT::i1: 11207 case MVT::i8: 11208 case MVT::i32: 11209 if (Scale < 0) Scale = -Scale; 11210 if (Scale == 1) 11211 return true; 11212 // r + r << imm 11213 return isPowerOf2_32(Scale & ~1); 11214 case MVT::i16: 11215 case MVT::i64: 11216 // r + r 11217 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11218 return true; 11219 return false; 11220 11221 case MVT::isVoid: 11222 // Note, we allow "void" uses (basically, uses that aren't loads or 11223 // stores), because arm allows folding a scale into many arithmetic 11224 // operations. This should be made more precise and revisited later. 11225 11226 // Allow r << imm, but the imm has to be a multiple of two. 11227 if (Scale & 1) return false; 11228 return isPowerOf2_32(Scale); 11229 } 11230 } 11231 return true; 11232 } 11233 11234 /// isLegalICmpImmediate - Return true if the specified immediate is legal 11235 /// icmp immediate, that is the target has icmp instructions which can compare 11236 /// a register against the immediate without having to materialize the 11237 /// immediate into a register. 11238 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11239 // Thumb2 and ARM modes can use cmn for negative immediates. 11240 if (!Subtarget->isThumb()) 11241 return ARM_AM::getSOImmVal(std::abs(Imm)) != -1; 11242 if (Subtarget->isThumb2()) 11243 return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1; 11244 // Thumb1 doesn't have cmn, and only 8-bit immediates. 11245 return Imm >= 0 && Imm <= 255; 11246 } 11247 11248 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 11249 /// *or sub* immediate, that is the target has add or sub instructions which can 11250 /// add a register with the immediate without having to materialize the 11251 /// immediate into a register. 11252 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11253 // Same encoding for add/sub, just flip the sign. 11254 int64_t AbsImm = std::abs(Imm); 11255 if (!Subtarget->isThumb()) 11256 return ARM_AM::getSOImmVal(AbsImm) != -1; 11257 if (Subtarget->isThumb2()) 11258 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 11259 // Thumb1 only has 8-bit unsigned immediate. 11260 return AbsImm >= 0 && AbsImm <= 255; 11261 } 11262 11263 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 11264 bool isSEXTLoad, SDValue &Base, 11265 SDValue &Offset, bool &isInc, 11266 SelectionDAG &DAG) { 11267 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11268 return false; 11269 11270 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 11271 // AddressingMode 3 11272 Base = Ptr->getOperand(0); 11273 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11274 int RHSC = (int)RHS->getZExtValue(); 11275 if (RHSC < 0 && RHSC > -256) { 11276 assert(Ptr->getOpcode() == ISD::ADD); 11277 isInc = false; 11278 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11279 return true; 11280 } 11281 } 11282 isInc = (Ptr->getOpcode() == ISD::ADD); 11283 Offset = Ptr->getOperand(1); 11284 return true; 11285 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 11286 // AddressingMode 2 11287 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11288 int RHSC = (int)RHS->getZExtValue(); 11289 if (RHSC < 0 && RHSC > -0x1000) { 11290 assert(Ptr->getOpcode() == ISD::ADD); 11291 isInc = false; 11292 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11293 Base = Ptr->getOperand(0); 11294 return true; 11295 } 11296 } 11297 11298 if (Ptr->getOpcode() == ISD::ADD) { 11299 isInc = true; 11300 ARM_AM::ShiftOpc ShOpcVal= 11301 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 11302 if (ShOpcVal != ARM_AM::no_shift) { 11303 Base = Ptr->getOperand(1); 11304 Offset = Ptr->getOperand(0); 11305 } else { 11306 Base = Ptr->getOperand(0); 11307 Offset = Ptr->getOperand(1); 11308 } 11309 return true; 11310 } 11311 11312 isInc = (Ptr->getOpcode() == ISD::ADD); 11313 Base = Ptr->getOperand(0); 11314 Offset = Ptr->getOperand(1); 11315 return true; 11316 } 11317 11318 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 11319 return false; 11320 } 11321 11322 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 11323 bool isSEXTLoad, SDValue &Base, 11324 SDValue &Offset, bool &isInc, 11325 SelectionDAG &DAG) { 11326 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11327 return false; 11328 11329 Base = Ptr->getOperand(0); 11330 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11331 int RHSC = (int)RHS->getZExtValue(); 11332 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 11333 assert(Ptr->getOpcode() == ISD::ADD); 11334 isInc = false; 11335 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11336 return true; 11337 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 11338 isInc = Ptr->getOpcode() == ISD::ADD; 11339 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11340 return true; 11341 } 11342 } 11343 11344 return false; 11345 } 11346 11347 /// getPreIndexedAddressParts - returns true by value, base pointer and 11348 /// offset pointer and addressing mode by reference if the node's address 11349 /// can be legally represented as pre-indexed load / store address. 11350 bool 11351 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 11352 SDValue &Offset, 11353 ISD::MemIndexedMode &AM, 11354 SelectionDAG &DAG) const { 11355 if (Subtarget->isThumb1Only()) 11356 return false; 11357 11358 EVT VT; 11359 SDValue Ptr; 11360 bool isSEXTLoad = false; 11361 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11362 Ptr = LD->getBasePtr(); 11363 VT = LD->getMemoryVT(); 11364 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11365 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11366 Ptr = ST->getBasePtr(); 11367 VT = ST->getMemoryVT(); 11368 } else 11369 return false; 11370 11371 bool isInc; 11372 bool isLegal = false; 11373 if (Subtarget->isThumb2()) 11374 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11375 Offset, isInc, DAG); 11376 else 11377 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11378 Offset, isInc, DAG); 11379 if (!isLegal) 11380 return false; 11381 11382 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 11383 return true; 11384 } 11385 11386 /// getPostIndexedAddressParts - returns true by value, base pointer and 11387 /// offset pointer and addressing mode by reference if this node can be 11388 /// combined with a load / store to form a post-indexed load / store. 11389 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 11390 SDValue &Base, 11391 SDValue &Offset, 11392 ISD::MemIndexedMode &AM, 11393 SelectionDAG &DAG) const { 11394 if (Subtarget->isThumb1Only()) 11395 return false; 11396 11397 EVT VT; 11398 SDValue Ptr; 11399 bool isSEXTLoad = false; 11400 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11401 VT = LD->getMemoryVT(); 11402 Ptr = LD->getBasePtr(); 11403 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11404 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11405 VT = ST->getMemoryVT(); 11406 Ptr = ST->getBasePtr(); 11407 } else 11408 return false; 11409 11410 bool isInc; 11411 bool isLegal = false; 11412 if (Subtarget->isThumb2()) 11413 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11414 isInc, DAG); 11415 else 11416 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11417 isInc, DAG); 11418 if (!isLegal) 11419 return false; 11420 11421 if (Ptr != Base) { 11422 // Swap base ptr and offset to catch more post-index load / store when 11423 // it's legal. In Thumb2 mode, offset must be an immediate. 11424 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 11425 !Subtarget->isThumb2()) 11426 std::swap(Base, Offset); 11427 11428 // Post-indexed load / store update the base pointer. 11429 if (Ptr != Base) 11430 return false; 11431 } 11432 11433 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 11434 return true; 11435 } 11436 11437 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 11438 APInt &KnownZero, 11439 APInt &KnownOne, 11440 const SelectionDAG &DAG, 11441 unsigned Depth) const { 11442 unsigned BitWidth = KnownOne.getBitWidth(); 11443 KnownZero = KnownOne = APInt(BitWidth, 0); 11444 switch (Op.getOpcode()) { 11445 default: break; 11446 case ARMISD::ADDC: 11447 case ARMISD::ADDE: 11448 case ARMISD::SUBC: 11449 case ARMISD::SUBE: 11450 // These nodes' second result is a boolean 11451 if (Op.getResNo() == 0) 11452 break; 11453 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 11454 break; 11455 case ARMISD::CMOV: { 11456 // Bits are known zero/one if known on the LHS and RHS. 11457 DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 11458 if (KnownZero == 0 && KnownOne == 0) return; 11459 11460 APInt KnownZeroRHS, KnownOneRHS; 11461 DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 11462 KnownZero &= KnownZeroRHS; 11463 KnownOne &= KnownOneRHS; 11464 return; 11465 } 11466 case ISD::INTRINSIC_W_CHAIN: { 11467 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 11468 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 11469 switch (IntID) { 11470 default: return; 11471 case Intrinsic::arm_ldaex: 11472 case Intrinsic::arm_ldrex: { 11473 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 11474 unsigned MemBits = VT.getScalarType().getSizeInBits(); 11475 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 11476 return; 11477 } 11478 } 11479 } 11480 } 11481 } 11482 11483 //===----------------------------------------------------------------------===// 11484 // ARM Inline Assembly Support 11485 //===----------------------------------------------------------------------===// 11486 11487 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 11488 // Looking for "rev" which is V6+. 11489 if (!Subtarget->hasV6Ops()) 11490 return false; 11491 11492 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 11493 std::string AsmStr = IA->getAsmString(); 11494 SmallVector<StringRef, 4> AsmPieces; 11495 SplitString(AsmStr, AsmPieces, ";\n"); 11496 11497 switch (AsmPieces.size()) { 11498 default: return false; 11499 case 1: 11500 AsmStr = AsmPieces[0]; 11501 AsmPieces.clear(); 11502 SplitString(AsmStr, AsmPieces, " \t,"); 11503 11504 // rev $0, $1 11505 if (AsmPieces.size() == 3 && 11506 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 11507 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 11508 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 11509 if (Ty && Ty->getBitWidth() == 32) 11510 return IntrinsicLowering::LowerToByteSwap(CI); 11511 } 11512 break; 11513 } 11514 11515 return false; 11516 } 11517 11518 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 11519 // At this point, we have to lower this constraint to something else, so we 11520 // lower it to an "r" or "w". However, by doing this we will force the result 11521 // to be in register, while the X constraint is much more permissive. 11522 // 11523 // Although we are correct (we are free to emit anything, without 11524 // constraints), we might break use cases that would expect us to be more 11525 // efficient and emit something else. 11526 if (!Subtarget->hasVFP2()) 11527 return "r"; 11528 if (ConstraintVT.isFloatingPoint()) 11529 return "w"; 11530 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 11531 (ConstraintVT.getSizeInBits() == 64 || 11532 ConstraintVT.getSizeInBits() == 128)) 11533 return "w"; 11534 11535 return "r"; 11536 } 11537 11538 /// getConstraintType - Given a constraint letter, return the type of 11539 /// constraint it is for this target. 11540 ARMTargetLowering::ConstraintType 11541 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 11542 if (Constraint.size() == 1) { 11543 switch (Constraint[0]) { 11544 default: break; 11545 case 'l': return C_RegisterClass; 11546 case 'w': return C_RegisterClass; 11547 case 'h': return C_RegisterClass; 11548 case 'x': return C_RegisterClass; 11549 case 't': return C_RegisterClass; 11550 case 'j': return C_Other; // Constant for movw. 11551 // An address with a single base register. Due to the way we 11552 // currently handle addresses it is the same as an 'r' memory constraint. 11553 case 'Q': return C_Memory; 11554 } 11555 } else if (Constraint.size() == 2) { 11556 switch (Constraint[0]) { 11557 default: break; 11558 // All 'U+' constraints are addresses. 11559 case 'U': return C_Memory; 11560 } 11561 } 11562 return TargetLowering::getConstraintType(Constraint); 11563 } 11564 11565 /// Examine constraint type and operand type and determine a weight value. 11566 /// This object must already have been set up with the operand type 11567 /// and the current alternative constraint selected. 11568 TargetLowering::ConstraintWeight 11569 ARMTargetLowering::getSingleConstraintMatchWeight( 11570 AsmOperandInfo &info, const char *constraint) const { 11571 ConstraintWeight weight = CW_Invalid; 11572 Value *CallOperandVal = info.CallOperandVal; 11573 // If we don't have a value, we can't do a match, 11574 // but allow it at the lowest weight. 11575 if (!CallOperandVal) 11576 return CW_Default; 11577 Type *type = CallOperandVal->getType(); 11578 // Look at the constraint type. 11579 switch (*constraint) { 11580 default: 11581 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 11582 break; 11583 case 'l': 11584 if (type->isIntegerTy()) { 11585 if (Subtarget->isThumb()) 11586 weight = CW_SpecificReg; 11587 else 11588 weight = CW_Register; 11589 } 11590 break; 11591 case 'w': 11592 if (type->isFloatingPointTy()) 11593 weight = CW_Register; 11594 break; 11595 } 11596 return weight; 11597 } 11598 11599 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 11600 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 11601 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 11602 if (Constraint.size() == 1) { 11603 // GCC ARM Constraint Letters 11604 switch (Constraint[0]) { 11605 case 'l': // Low regs or general regs. 11606 if (Subtarget->isThumb()) 11607 return RCPair(0U, &ARM::tGPRRegClass); 11608 return RCPair(0U, &ARM::GPRRegClass); 11609 case 'h': // High regs or no regs. 11610 if (Subtarget->isThumb()) 11611 return RCPair(0U, &ARM::hGPRRegClass); 11612 break; 11613 case 'r': 11614 if (Subtarget->isThumb1Only()) 11615 return RCPair(0U, &ARM::tGPRRegClass); 11616 return RCPair(0U, &ARM::GPRRegClass); 11617 case 'w': 11618 if (VT == MVT::Other) 11619 break; 11620 if (VT == MVT::f32) 11621 return RCPair(0U, &ARM::SPRRegClass); 11622 if (VT.getSizeInBits() == 64) 11623 return RCPair(0U, &ARM::DPRRegClass); 11624 if (VT.getSizeInBits() == 128) 11625 return RCPair(0U, &ARM::QPRRegClass); 11626 break; 11627 case 'x': 11628 if (VT == MVT::Other) 11629 break; 11630 if (VT == MVT::f32) 11631 return RCPair(0U, &ARM::SPR_8RegClass); 11632 if (VT.getSizeInBits() == 64) 11633 return RCPair(0U, &ARM::DPR_8RegClass); 11634 if (VT.getSizeInBits() == 128) 11635 return RCPair(0U, &ARM::QPR_8RegClass); 11636 break; 11637 case 't': 11638 if (VT == MVT::f32) 11639 return RCPair(0U, &ARM::SPRRegClass); 11640 break; 11641 } 11642 } 11643 if (StringRef("{cc}").equals_lower(Constraint)) 11644 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 11645 11646 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11647 } 11648 11649 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 11650 /// vector. If it is invalid, don't add anything to Ops. 11651 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 11652 std::string &Constraint, 11653 std::vector<SDValue>&Ops, 11654 SelectionDAG &DAG) const { 11655 SDValue Result; 11656 11657 // Currently only support length 1 constraints. 11658 if (Constraint.length() != 1) return; 11659 11660 char ConstraintLetter = Constraint[0]; 11661 switch (ConstraintLetter) { 11662 default: break; 11663 case 'j': 11664 case 'I': case 'J': case 'K': case 'L': 11665 case 'M': case 'N': case 'O': 11666 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 11667 if (!C) 11668 return; 11669 11670 int64_t CVal64 = C->getSExtValue(); 11671 int CVal = (int) CVal64; 11672 // None of these constraints allow values larger than 32 bits. Check 11673 // that the value fits in an int. 11674 if (CVal != CVal64) 11675 return; 11676 11677 switch (ConstraintLetter) { 11678 case 'j': 11679 // Constant suitable for movw, must be between 0 and 11680 // 65535. 11681 if (Subtarget->hasV6T2Ops()) 11682 if (CVal >= 0 && CVal <= 65535) 11683 break; 11684 return; 11685 case 'I': 11686 if (Subtarget->isThumb1Only()) { 11687 // This must be a constant between 0 and 255, for ADD 11688 // immediates. 11689 if (CVal >= 0 && CVal <= 255) 11690 break; 11691 } else if (Subtarget->isThumb2()) { 11692 // A constant that can be used as an immediate value in a 11693 // data-processing instruction. 11694 if (ARM_AM::getT2SOImmVal(CVal) != -1) 11695 break; 11696 } else { 11697 // A constant that can be used as an immediate value in a 11698 // data-processing instruction. 11699 if (ARM_AM::getSOImmVal(CVal) != -1) 11700 break; 11701 } 11702 return; 11703 11704 case 'J': 11705 if (Subtarget->isThumb1Only()) { 11706 // This must be a constant between -255 and -1, for negated ADD 11707 // immediates. This can be used in GCC with an "n" modifier that 11708 // prints the negated value, for use with SUB instructions. It is 11709 // not useful otherwise but is implemented for compatibility. 11710 if (CVal >= -255 && CVal <= -1) 11711 break; 11712 } else { 11713 // This must be a constant between -4095 and 4095. It is not clear 11714 // what this constraint is intended for. Implemented for 11715 // compatibility with GCC. 11716 if (CVal >= -4095 && CVal <= 4095) 11717 break; 11718 } 11719 return; 11720 11721 case 'K': 11722 if (Subtarget->isThumb1Only()) { 11723 // A 32-bit value where only one byte has a nonzero value. Exclude 11724 // zero to match GCC. This constraint is used by GCC internally for 11725 // constants that can be loaded with a move/shift combination. 11726 // It is not useful otherwise but is implemented for compatibility. 11727 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 11728 break; 11729 } else if (Subtarget->isThumb2()) { 11730 // A constant whose bitwise inverse can be used as an immediate 11731 // value in a data-processing instruction. This can be used in GCC 11732 // with a "B" modifier that prints the inverted value, for use with 11733 // BIC and MVN instructions. It is not useful otherwise but is 11734 // implemented for compatibility. 11735 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 11736 break; 11737 } else { 11738 // A constant whose bitwise inverse can be used as an immediate 11739 // value in a data-processing instruction. This can be used in GCC 11740 // with a "B" modifier that prints the inverted value, for use with 11741 // BIC and MVN instructions. It is not useful otherwise but is 11742 // implemented for compatibility. 11743 if (ARM_AM::getSOImmVal(~CVal) != -1) 11744 break; 11745 } 11746 return; 11747 11748 case 'L': 11749 if (Subtarget->isThumb1Only()) { 11750 // This must be a constant between -7 and 7, 11751 // for 3-operand ADD/SUB immediate instructions. 11752 if (CVal >= -7 && CVal < 7) 11753 break; 11754 } else if (Subtarget->isThumb2()) { 11755 // A constant whose negation can be used as an immediate value in a 11756 // data-processing instruction. This can be used in GCC with an "n" 11757 // modifier that prints the negated value, for use with SUB 11758 // instructions. It is not useful otherwise but is implemented for 11759 // compatibility. 11760 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 11761 break; 11762 } else { 11763 // A constant whose negation can be used as an immediate value in a 11764 // data-processing instruction. This can be used in GCC with an "n" 11765 // modifier that prints the negated value, for use with SUB 11766 // instructions. It is not useful otherwise but is implemented for 11767 // compatibility. 11768 if (ARM_AM::getSOImmVal(-CVal) != -1) 11769 break; 11770 } 11771 return; 11772 11773 case 'M': 11774 if (Subtarget->isThumb1Only()) { 11775 // This must be a multiple of 4 between 0 and 1020, for 11776 // ADD sp + immediate. 11777 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 11778 break; 11779 } else { 11780 // A power of two or a constant between 0 and 32. This is used in 11781 // GCC for the shift amount on shifted register operands, but it is 11782 // useful in general for any shift amounts. 11783 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 11784 break; 11785 } 11786 return; 11787 11788 case 'N': 11789 if (Subtarget->isThumb()) { // FIXME thumb2 11790 // This must be a constant between 0 and 31, for shift amounts. 11791 if (CVal >= 0 && CVal <= 31) 11792 break; 11793 } 11794 return; 11795 11796 case 'O': 11797 if (Subtarget->isThumb()) { // FIXME thumb2 11798 // This must be a multiple of 4 between -508 and 508, for 11799 // ADD/SUB sp = sp + immediate. 11800 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 11801 break; 11802 } 11803 return; 11804 } 11805 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 11806 break; 11807 } 11808 11809 if (Result.getNode()) { 11810 Ops.push_back(Result); 11811 return; 11812 } 11813 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11814 } 11815 11816 static RTLIB::Libcall getDivRemLibcall( 11817 const SDNode *N, MVT::SimpleValueType SVT) { 11818 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 11819 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 11820 "Unhandled Opcode in getDivRemLibcall"); 11821 bool isSigned = N->getOpcode() == ISD::SDIVREM || 11822 N->getOpcode() == ISD::SREM; 11823 RTLIB::Libcall LC; 11824 switch (SVT) { 11825 default: llvm_unreachable("Unexpected request for libcall!"); 11826 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 11827 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 11828 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 11829 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 11830 } 11831 return LC; 11832 } 11833 11834 static TargetLowering::ArgListTy getDivRemArgList( 11835 const SDNode *N, LLVMContext *Context) { 11836 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 11837 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 11838 "Unhandled Opcode in getDivRemArgList"); 11839 bool isSigned = N->getOpcode() == ISD::SDIVREM || 11840 N->getOpcode() == ISD::SREM; 11841 TargetLowering::ArgListTy Args; 11842 TargetLowering::ArgListEntry Entry; 11843 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 11844 EVT ArgVT = N->getOperand(i).getValueType(); 11845 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 11846 Entry.Node = N->getOperand(i); 11847 Entry.Ty = ArgTy; 11848 Entry.isSExt = isSigned; 11849 Entry.isZExt = !isSigned; 11850 Args.push_back(Entry); 11851 } 11852 return Args; 11853 } 11854 11855 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 11856 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 11857 Subtarget->isTargetGNUAEABI()) && 11858 "Register-based DivRem lowering only"); 11859 unsigned Opcode = Op->getOpcode(); 11860 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 11861 "Invalid opcode for Div/Rem lowering"); 11862 bool isSigned = (Opcode == ISD::SDIVREM); 11863 EVT VT = Op->getValueType(0); 11864 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 11865 11866 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 11867 VT.getSimpleVT().SimpleTy); 11868 SDValue InChain = DAG.getEntryNode(); 11869 11870 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 11871 DAG.getContext()); 11872 11873 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 11874 getPointerTy(DAG.getDataLayout())); 11875 11876 Type *RetTy = (Type*)StructType::get(Ty, Ty, nullptr); 11877 11878 SDLoc dl(Op); 11879 TargetLowering::CallLoweringInfo CLI(DAG); 11880 CLI.setDebugLoc(dl).setChain(InChain) 11881 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args), 0) 11882 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 11883 11884 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 11885 return CallInfo.first; 11886 } 11887 11888 // Lowers REM using divmod helpers 11889 // see RTABI section 4.2/4.3 11890 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 11891 // Build return types (div and rem) 11892 std::vector<Type*> RetTyParams; 11893 Type *RetTyElement; 11894 11895 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 11896 default: llvm_unreachable("Unexpected request for libcall!"); 11897 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 11898 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 11899 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 11900 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 11901 } 11902 11903 RetTyParams.push_back(RetTyElement); 11904 RetTyParams.push_back(RetTyElement); 11905 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 11906 Type *RetTy = StructType::get(*DAG.getContext(), ret); 11907 11908 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 11909 SimpleTy); 11910 SDValue InChain = DAG.getEntryNode(); 11911 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext()); 11912 bool isSigned = N->getOpcode() == ISD::SREM; 11913 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 11914 getPointerTy(DAG.getDataLayout())); 11915 11916 // Lower call 11917 CallLoweringInfo CLI(DAG); 11918 CLI.setChain(InChain) 11919 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args), 0) 11920 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 11921 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 11922 11923 // Return second (rem) result operand (first contains div) 11924 SDNode *ResNode = CallResult.first.getNode(); 11925 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 11926 return ResNode->getOperand(1); 11927 } 11928 11929 SDValue 11930 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 11931 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 11932 SDLoc DL(Op); 11933 11934 // Get the inputs. 11935 SDValue Chain = Op.getOperand(0); 11936 SDValue Size = Op.getOperand(1); 11937 11938 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 11939 DAG.getConstant(2, DL, MVT::i32)); 11940 11941 SDValue Flag; 11942 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 11943 Flag = Chain.getValue(1); 11944 11945 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 11946 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 11947 11948 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 11949 Chain = NewSP.getValue(1); 11950 11951 SDValue Ops[2] = { NewSP, Chain }; 11952 return DAG.getMergeValues(Ops, DL); 11953 } 11954 11955 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 11956 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && 11957 "Unexpected type for custom-lowering FP_EXTEND"); 11958 11959 RTLIB::Libcall LC; 11960 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 11961 11962 SDValue SrcVal = Op.getOperand(0); 11963 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 11964 SDLoc(Op)).first; 11965 } 11966 11967 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 11968 assert(Op.getOperand(0).getValueType() == MVT::f64 && 11969 Subtarget->isFPOnlySP() && 11970 "Unexpected type for custom-lowering FP_ROUND"); 11971 11972 RTLIB::Libcall LC; 11973 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 11974 11975 SDValue SrcVal = Op.getOperand(0); 11976 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 11977 SDLoc(Op)).first; 11978 } 11979 11980 bool 11981 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11982 // The ARM target isn't yet aware of offsets. 11983 return false; 11984 } 11985 11986 bool ARM::isBitFieldInvertedMask(unsigned v) { 11987 if (v == 0xffffffff) 11988 return false; 11989 11990 // there can be 1's on either or both "outsides", all the "inside" 11991 // bits must be 0's 11992 return isShiftedMask_32(~v); 11993 } 11994 11995 /// isFPImmLegal - Returns true if the target can instruction select the 11996 /// specified FP immediate natively. If false, the legalizer will 11997 /// materialize the FP immediate as a load from a constant pool. 11998 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 11999 if (!Subtarget->hasVFP3()) 12000 return false; 12001 if (VT == MVT::f32) 12002 return ARM_AM::getFP32Imm(Imm) != -1; 12003 if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) 12004 return ARM_AM::getFP64Imm(Imm) != -1; 12005 return false; 12006 } 12007 12008 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 12009 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 12010 /// specified in the intrinsic calls. 12011 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 12012 const CallInst &I, 12013 unsigned Intrinsic) const { 12014 switch (Intrinsic) { 12015 case Intrinsic::arm_neon_vld1: 12016 case Intrinsic::arm_neon_vld2: 12017 case Intrinsic::arm_neon_vld3: 12018 case Intrinsic::arm_neon_vld4: 12019 case Intrinsic::arm_neon_vld2lane: 12020 case Intrinsic::arm_neon_vld3lane: 12021 case Intrinsic::arm_neon_vld4lane: { 12022 Info.opc = ISD::INTRINSIC_W_CHAIN; 12023 // Conservatively set memVT to the entire set of vectors loaded. 12024 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12025 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 12026 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12027 Info.ptrVal = I.getArgOperand(0); 12028 Info.offset = 0; 12029 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12030 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12031 Info.vol = false; // volatile loads with NEON intrinsics not supported 12032 Info.readMem = true; 12033 Info.writeMem = false; 12034 return true; 12035 } 12036 case Intrinsic::arm_neon_vst1: 12037 case Intrinsic::arm_neon_vst2: 12038 case Intrinsic::arm_neon_vst3: 12039 case Intrinsic::arm_neon_vst4: 12040 case Intrinsic::arm_neon_vst2lane: 12041 case Intrinsic::arm_neon_vst3lane: 12042 case Intrinsic::arm_neon_vst4lane: { 12043 Info.opc = ISD::INTRINSIC_VOID; 12044 // Conservatively set memVT to the entire set of vectors stored. 12045 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12046 unsigned NumElts = 0; 12047 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 12048 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 12049 if (!ArgTy->isVectorTy()) 12050 break; 12051 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 12052 } 12053 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12054 Info.ptrVal = I.getArgOperand(0); 12055 Info.offset = 0; 12056 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12057 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12058 Info.vol = false; // volatile stores with NEON intrinsics not supported 12059 Info.readMem = false; 12060 Info.writeMem = true; 12061 return true; 12062 } 12063 case Intrinsic::arm_ldaex: 12064 case Intrinsic::arm_ldrex: { 12065 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12066 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 12067 Info.opc = ISD::INTRINSIC_W_CHAIN; 12068 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12069 Info.ptrVal = I.getArgOperand(0); 12070 Info.offset = 0; 12071 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12072 Info.vol = true; 12073 Info.readMem = true; 12074 Info.writeMem = false; 12075 return true; 12076 } 12077 case Intrinsic::arm_stlex: 12078 case Intrinsic::arm_strex: { 12079 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12080 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 12081 Info.opc = ISD::INTRINSIC_W_CHAIN; 12082 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12083 Info.ptrVal = I.getArgOperand(1); 12084 Info.offset = 0; 12085 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12086 Info.vol = true; 12087 Info.readMem = false; 12088 Info.writeMem = true; 12089 return true; 12090 } 12091 case Intrinsic::arm_stlexd: 12092 case Intrinsic::arm_strexd: { 12093 Info.opc = ISD::INTRINSIC_W_CHAIN; 12094 Info.memVT = MVT::i64; 12095 Info.ptrVal = I.getArgOperand(2); 12096 Info.offset = 0; 12097 Info.align = 8; 12098 Info.vol = true; 12099 Info.readMem = false; 12100 Info.writeMem = true; 12101 return true; 12102 } 12103 case Intrinsic::arm_ldaexd: 12104 case Intrinsic::arm_ldrexd: { 12105 Info.opc = ISD::INTRINSIC_W_CHAIN; 12106 Info.memVT = MVT::i64; 12107 Info.ptrVal = I.getArgOperand(0); 12108 Info.offset = 0; 12109 Info.align = 8; 12110 Info.vol = true; 12111 Info.readMem = true; 12112 Info.writeMem = false; 12113 return true; 12114 } 12115 default: 12116 break; 12117 } 12118 12119 return false; 12120 } 12121 12122 /// \brief Returns true if it is beneficial to convert a load of a constant 12123 /// to just the constant itself. 12124 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 12125 Type *Ty) const { 12126 assert(Ty->isIntegerTy()); 12127 12128 unsigned Bits = Ty->getPrimitiveSizeInBits(); 12129 if (Bits == 0 || Bits > 32) 12130 return false; 12131 return true; 12132 } 12133 12134 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 12135 ARM_MB::MemBOpt Domain) const { 12136 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12137 12138 // First, if the target has no DMB, see what fallback we can use. 12139 if (!Subtarget->hasDataBarrier()) { 12140 // Some ARMv6 cpus can support data barriers with an mcr instruction. 12141 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 12142 // here. 12143 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 12144 Function *MCR = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 12145 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 12146 Builder.getInt32(0), Builder.getInt32(7), 12147 Builder.getInt32(10), Builder.getInt32(5)}; 12148 return Builder.CreateCall(MCR, args); 12149 } else { 12150 // Instead of using barriers, atomic accesses on these subtargets use 12151 // libcalls. 12152 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 12153 } 12154 } else { 12155 Function *DMB = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 12156 // Only a full system barrier exists in the M-class architectures. 12157 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 12158 Constant *CDomain = Builder.getInt32(Domain); 12159 return Builder.CreateCall(DMB, CDomain); 12160 } 12161 } 12162 12163 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 12164 Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 12165 AtomicOrdering Ord, bool IsStore, 12166 bool IsLoad) const { 12167 switch (Ord) { 12168 case AtomicOrdering::NotAtomic: 12169 case AtomicOrdering::Unordered: 12170 llvm_unreachable("Invalid fence: unordered/non-atomic"); 12171 case AtomicOrdering::Monotonic: 12172 case AtomicOrdering::Acquire: 12173 return nullptr; // Nothing to do 12174 case AtomicOrdering::SequentiallyConsistent: 12175 if (!IsStore) 12176 return nullptr; // Nothing to do 12177 /*FALLTHROUGH*/ 12178 case AtomicOrdering::Release: 12179 case AtomicOrdering::AcquireRelease: 12180 if (Subtarget->isSwift()) 12181 return makeDMB(Builder, ARM_MB::ISHST); 12182 // FIXME: add a comment with a link to documentation justifying this. 12183 else 12184 return makeDMB(Builder, ARM_MB::ISH); 12185 } 12186 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 12187 } 12188 12189 Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 12190 AtomicOrdering Ord, bool IsStore, 12191 bool IsLoad) const { 12192 switch (Ord) { 12193 case AtomicOrdering::NotAtomic: 12194 case AtomicOrdering::Unordered: 12195 llvm_unreachable("Invalid fence: unordered/not-atomic"); 12196 case AtomicOrdering::Monotonic: 12197 case AtomicOrdering::Release: 12198 return nullptr; // Nothing to do 12199 case AtomicOrdering::Acquire: 12200 case AtomicOrdering::AcquireRelease: 12201 case AtomicOrdering::SequentiallyConsistent: 12202 return makeDMB(Builder, ARM_MB::ISH); 12203 } 12204 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 12205 } 12206 12207 // Loads and stores less than 64-bits are already atomic; ones above that 12208 // are doomed anyway, so defer to the default libcall and blame the OS when 12209 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12210 // anything for those. 12211 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 12212 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 12213 return (Size == 64) && !Subtarget->isMClass(); 12214 } 12215 12216 // Loads and stores less than 64-bits are already atomic; ones above that 12217 // are doomed anyway, so defer to the default libcall and blame the OS when 12218 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12219 // anything for those. 12220 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 12221 // guarantee, see DDI0406C ARM architecture reference manual, 12222 // sections A8.8.72-74 LDRD) 12223 TargetLowering::AtomicExpansionKind 12224 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 12225 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 12226 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 12227 : AtomicExpansionKind::None; 12228 } 12229 12230 // For the real atomic operations, we have ldrex/strex up to 32 bits, 12231 // and up to 64 bits on the non-M profiles 12232 TargetLowering::AtomicExpansionKind 12233 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 12234 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 12235 return (Size <= (Subtarget->isMClass() ? 32U : 64U)) 12236 ? AtomicExpansionKind::LLSC 12237 : AtomicExpansionKind::None; 12238 } 12239 12240 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( 12241 AtomicCmpXchgInst *AI) const { 12242 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 12243 // implement cmpxchg without spilling. If the address being exchanged is also 12244 // on the stack and close enough to the spill slot, this can lead to a 12245 // situation where the monitor always gets cleared and the atomic operation 12246 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 12247 return getTargetMachine().getOptLevel() != 0; 12248 } 12249 12250 bool ARMTargetLowering::shouldInsertFencesForAtomic( 12251 const Instruction *I) const { 12252 return InsertFencesForAtomic; 12253 } 12254 12255 // This has so far only been implemented for MachO. 12256 bool ARMTargetLowering::useLoadStackGuardNode() const { 12257 return Subtarget->isTargetMachO(); 12258 } 12259 12260 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 12261 unsigned &Cost) const { 12262 // If we do not have NEON, vector types are not natively supported. 12263 if (!Subtarget->hasNEON()) 12264 return false; 12265 12266 // Floating point values and vector values map to the same register file. 12267 // Therefore, although we could do a store extract of a vector type, this is 12268 // better to leave at float as we have more freedom in the addressing mode for 12269 // those. 12270 if (VectorTy->isFPOrFPVectorTy()) 12271 return false; 12272 12273 // If the index is unknown at compile time, this is very expensive to lower 12274 // and it is not possible to combine the store with the extract. 12275 if (!isa<ConstantInt>(Idx)) 12276 return false; 12277 12278 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 12279 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); 12280 // We can do a store + vector extract on any vector that fits perfectly in a D 12281 // or Q register. 12282 if (BitWidth == 64 || BitWidth == 128) { 12283 Cost = 0; 12284 return true; 12285 } 12286 return false; 12287 } 12288 12289 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 12290 return Subtarget->hasV6T2Ops(); 12291 } 12292 12293 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 12294 return Subtarget->hasV6T2Ops(); 12295 } 12296 12297 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 12298 AtomicOrdering Ord) const { 12299 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12300 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 12301 bool IsAcquire = isAcquireOrStronger(Ord); 12302 12303 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 12304 // intrinsic must return {i32, i32} and we have to recombine them into a 12305 // single i64 here. 12306 if (ValTy->getPrimitiveSizeInBits() == 64) { 12307 Intrinsic::ID Int = 12308 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 12309 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int); 12310 12311 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12312 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 12313 12314 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 12315 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 12316 if (!Subtarget->isLittle()) 12317 std::swap (Lo, Hi); 12318 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 12319 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 12320 return Builder.CreateOr( 12321 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 12322 } 12323 12324 Type *Tys[] = { Addr->getType() }; 12325 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 12326 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys); 12327 12328 return Builder.CreateTruncOrBitCast( 12329 Builder.CreateCall(Ldrex, Addr), 12330 cast<PointerType>(Addr->getType())->getElementType()); 12331 } 12332 12333 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 12334 IRBuilder<> &Builder) const { 12335 if (!Subtarget->hasV7Ops()) 12336 return; 12337 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12338 Builder.CreateCall(llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 12339 } 12340 12341 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 12342 Value *Addr, 12343 AtomicOrdering Ord) const { 12344 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12345 bool IsRelease = isReleaseOrStronger(Ord); 12346 12347 // Since the intrinsics must have legal type, the i64 intrinsics take two 12348 // parameters: "i32, i32". We must marshal Val into the appropriate form 12349 // before the call. 12350 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 12351 Intrinsic::ID Int = 12352 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 12353 Function *Strex = Intrinsic::getDeclaration(M, Int); 12354 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 12355 12356 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 12357 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 12358 if (!Subtarget->isLittle()) 12359 std::swap (Lo, Hi); 12360 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12361 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 12362 } 12363 12364 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 12365 Type *Tys[] = { Addr->getType() }; 12366 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 12367 12368 return Builder.CreateCall( 12369 Strex, {Builder.CreateZExtOrBitCast( 12370 Val, Strex->getFunctionType()->getParamType(0)), 12371 Addr}); 12372 } 12373 12374 /// \brief Lower an interleaved load into a vldN intrinsic. 12375 /// 12376 /// E.g. Lower an interleaved load (Factor = 2): 12377 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 12378 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 12379 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 12380 /// 12381 /// Into: 12382 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 12383 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 12384 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 12385 bool ARMTargetLowering::lowerInterleavedLoad( 12386 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 12387 ArrayRef<unsigned> Indices, unsigned Factor) const { 12388 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 12389 "Invalid interleave factor"); 12390 assert(!Shuffles.empty() && "Empty shufflevector input"); 12391 assert(Shuffles.size() == Indices.size() && 12392 "Unmatched number of shufflevectors and indices"); 12393 12394 VectorType *VecTy = Shuffles[0]->getType(); 12395 Type *EltTy = VecTy->getVectorElementType(); 12396 12397 const DataLayout &DL = LI->getModule()->getDataLayout(); 12398 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 12399 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 12400 12401 // Skip if we do not have NEON and skip illegal vector types and vector types 12402 // with i64/f64 elements (vldN doesn't support i64/f64 elements). 12403 if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits) 12404 return false; 12405 12406 // A pointer vector can not be the return type of the ldN intrinsics. Need to 12407 // load integer vectors first and then convert to pointer vectors. 12408 if (EltTy->isPointerTy()) 12409 VecTy = 12410 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); 12411 12412 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 12413 Intrinsic::arm_neon_vld3, 12414 Intrinsic::arm_neon_vld4}; 12415 12416 IRBuilder<> Builder(LI); 12417 SmallVector<Value *, 2> Ops; 12418 12419 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 12420 Ops.push_back(Builder.CreateBitCast(LI->getPointerOperand(), Int8Ptr)); 12421 Ops.push_back(Builder.getInt32(LI->getAlignment())); 12422 12423 Type *Tys[] = { VecTy, Int8Ptr }; 12424 Function *VldnFunc = 12425 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 12426 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN"); 12427 12428 // Replace uses of each shufflevector with the corresponding vector loaded 12429 // by ldN. 12430 for (unsigned i = 0; i < Shuffles.size(); i++) { 12431 ShuffleVectorInst *SV = Shuffles[i]; 12432 unsigned Index = Indices[i]; 12433 12434 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 12435 12436 // Convert the integer vector to pointer vector if the element is pointer. 12437 if (EltTy->isPointerTy()) 12438 SubVec = Builder.CreateIntToPtr(SubVec, SV->getType()); 12439 12440 SV->replaceAllUsesWith(SubVec); 12441 } 12442 12443 return true; 12444 } 12445 12446 /// \brief Get a mask consisting of sequential integers starting from \p Start. 12447 /// 12448 /// I.e. <Start, Start + 1, ..., Start + NumElts - 1> 12449 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start, 12450 unsigned NumElts) { 12451 SmallVector<Constant *, 16> Mask; 12452 for (unsigned i = 0; i < NumElts; i++) 12453 Mask.push_back(Builder.getInt32(Start + i)); 12454 12455 return ConstantVector::get(Mask); 12456 } 12457 12458 /// \brief Lower an interleaved store into a vstN intrinsic. 12459 /// 12460 /// E.g. Lower an interleaved store (Factor = 3): 12461 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 12462 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 12463 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 12464 /// 12465 /// Into: 12466 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 12467 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 12468 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 12469 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 12470 /// 12471 /// Note that the new shufflevectors will be removed and we'll only generate one 12472 /// vst3 instruction in CodeGen. 12473 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 12474 ShuffleVectorInst *SVI, 12475 unsigned Factor) const { 12476 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 12477 "Invalid interleave factor"); 12478 12479 VectorType *VecTy = SVI->getType(); 12480 assert(VecTy->getVectorNumElements() % Factor == 0 && 12481 "Invalid interleaved store"); 12482 12483 unsigned NumSubElts = VecTy->getVectorNumElements() / Factor; 12484 Type *EltTy = VecTy->getVectorElementType(); 12485 VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts); 12486 12487 const DataLayout &DL = SI->getModule()->getDataLayout(); 12488 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 12489 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 12490 12491 // Skip if we do not have NEON and skip illegal vector types and vector types 12492 // with i64/f64 elements (vstN doesn't support i64/f64 elements). 12493 if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128) || 12494 EltIs64Bits) 12495 return false; 12496 12497 Value *Op0 = SVI->getOperand(0); 12498 Value *Op1 = SVI->getOperand(1); 12499 IRBuilder<> Builder(SI); 12500 12501 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 12502 // vectors to integer vectors. 12503 if (EltTy->isPointerTy()) { 12504 Type *IntTy = DL.getIntPtrType(EltTy); 12505 12506 // Convert to the corresponding integer vector. 12507 Type *IntVecTy = 12508 VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); 12509 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 12510 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 12511 12512 SubVecTy = VectorType::get(IntTy, NumSubElts); 12513 } 12514 12515 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 12516 Intrinsic::arm_neon_vst3, 12517 Intrinsic::arm_neon_vst4}; 12518 SmallVector<Value *, 6> Ops; 12519 12520 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 12521 Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), Int8Ptr)); 12522 12523 Type *Tys[] = { Int8Ptr, SubVecTy }; 12524 Function *VstNFunc = Intrinsic::getDeclaration( 12525 SI->getModule(), StoreInts[Factor - 2], Tys); 12526 12527 // Split the shufflevector operands into sub vectors for the new vstN call. 12528 for (unsigned i = 0; i < Factor; i++) 12529 Ops.push_back(Builder.CreateShuffleVector( 12530 Op0, Op1, getSequentialMask(Builder, NumSubElts * i, NumSubElts))); 12531 12532 Ops.push_back(Builder.getInt32(SI->getAlignment())); 12533 Builder.CreateCall(VstNFunc, Ops); 12534 return true; 12535 } 12536 12537 enum HABaseType { 12538 HA_UNKNOWN = 0, 12539 HA_FLOAT, 12540 HA_DOUBLE, 12541 HA_VECT64, 12542 HA_VECT128 12543 }; 12544 12545 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 12546 uint64_t &Members) { 12547 if (auto *ST = dyn_cast<StructType>(Ty)) { 12548 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 12549 uint64_t SubMembers = 0; 12550 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 12551 return false; 12552 Members += SubMembers; 12553 } 12554 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 12555 uint64_t SubMembers = 0; 12556 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 12557 return false; 12558 Members += SubMembers * AT->getNumElements(); 12559 } else if (Ty->isFloatTy()) { 12560 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 12561 return false; 12562 Members = 1; 12563 Base = HA_FLOAT; 12564 } else if (Ty->isDoubleTy()) { 12565 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 12566 return false; 12567 Members = 1; 12568 Base = HA_DOUBLE; 12569 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 12570 Members = 1; 12571 switch (Base) { 12572 case HA_FLOAT: 12573 case HA_DOUBLE: 12574 return false; 12575 case HA_VECT64: 12576 return VT->getBitWidth() == 64; 12577 case HA_VECT128: 12578 return VT->getBitWidth() == 128; 12579 case HA_UNKNOWN: 12580 switch (VT->getBitWidth()) { 12581 case 64: 12582 Base = HA_VECT64; 12583 return true; 12584 case 128: 12585 Base = HA_VECT128; 12586 return true; 12587 default: 12588 return false; 12589 } 12590 } 12591 } 12592 12593 return (Members > 0 && Members <= 4); 12594 } 12595 12596 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 12597 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 12598 /// passing according to AAPCS rules. 12599 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 12600 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 12601 if (getEffectiveCallingConv(CallConv, isVarArg) != 12602 CallingConv::ARM_AAPCS_VFP) 12603 return false; 12604 12605 HABaseType Base = HA_UNKNOWN; 12606 uint64_t Members = 0; 12607 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 12608 DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 12609 12610 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 12611 return IsHA || IsIntArray; 12612 } 12613 12614 unsigned ARMTargetLowering::getExceptionPointerRegister( 12615 const Constant *PersonalityFn) const { 12616 // Platforms which do not use SjLj EH may return values in these registers 12617 // via the personality function. 12618 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; 12619 } 12620 12621 unsigned ARMTargetLowering::getExceptionSelectorRegister( 12622 const Constant *PersonalityFn) const { 12623 // Platforms which do not use SjLj EH may return values in these registers 12624 // via the personality function. 12625 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; 12626 } 12627 12628 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 12629 // Update IsSplitCSR in ARMFunctionInfo. 12630 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 12631 AFI->setIsSplitCSR(true); 12632 } 12633 12634 void ARMTargetLowering::insertCopiesSplitCSR( 12635 MachineBasicBlock *Entry, 12636 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 12637 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 12638 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 12639 if (!IStart) 12640 return; 12641 12642 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 12643 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 12644 MachineBasicBlock::iterator MBBI = Entry->begin(); 12645 for (const MCPhysReg *I = IStart; *I; ++I) { 12646 const TargetRegisterClass *RC = nullptr; 12647 if (ARM::GPRRegClass.contains(*I)) 12648 RC = &ARM::GPRRegClass; 12649 else if (ARM::DPRRegClass.contains(*I)) 12650 RC = &ARM::DPRRegClass; 12651 else 12652 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 12653 12654 unsigned NewVR = MRI->createVirtualRegister(RC); 12655 // Create copy from CSR to a virtual register. 12656 // FIXME: this currently does not emit CFI pseudo-instructions, it works 12657 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 12658 // nounwind. If we want to generalize this later, we may need to emit 12659 // CFI pseudo-instructions. 12660 assert(Entry->getParent()->getFunction()->hasFnAttribute( 12661 Attribute::NoUnwind) && 12662 "Function should be nounwind in insertCopiesSplitCSR!"); 12663 Entry->addLiveIn(*I); 12664 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 12665 .addReg(*I); 12666 12667 // Insert the copy-back instructions right before the terminator. 12668 for (auto *Exit : Exits) 12669 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 12670 TII->get(TargetOpcode::COPY), *I) 12671 .addReg(NewVR); 12672 } 12673 } 12674