1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARMISelLowering.h" 16 #include "ARMBaseInstrInfo.h" 17 #include "ARMBaseRegisterInfo.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMMachineFunctionInfo.h" 21 #include "ARMPerfectShuffle.h" 22 #include "ARMRegisterInfo.h" 23 #include "ARMSelectionDAGInfo.h" 24 #include "ARMSubtarget.h" 25 #include "MCTargetDesc/ARMAddressingModes.h" 26 #include "MCTargetDesc/ARMBaseInfo.h" 27 #include "Utils/ARMBaseInfo.h" 28 #include "llvm/ADT/APFloat.h" 29 #include "llvm/ADT/APInt.h" 30 #include "llvm/ADT/ArrayRef.h" 31 #include "llvm/ADT/BitVector.h" 32 #include "llvm/ADT/DenseMap.h" 33 #include "llvm/ADT/STLExtras.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/StringExtras.h" 38 #include "llvm/ADT/StringRef.h" 39 #include "llvm/ADT/StringSwitch.h" 40 #include "llvm/ADT/Triple.h" 41 #include "llvm/ADT/Twine.h" 42 #include "llvm/Analysis/VectorUtils.h" 43 #include "llvm/CodeGen/CallingConvLower.h" 44 #include "llvm/CodeGen/ISDOpcodes.h" 45 #include "llvm/CodeGen/IntrinsicLowering.h" 46 #include "llvm/CodeGen/MachineBasicBlock.h" 47 #include "llvm/CodeGen/MachineConstantPool.h" 48 #include "llvm/CodeGen/MachineFrameInfo.h" 49 #include "llvm/CodeGen/MachineFunction.h" 50 #include "llvm/CodeGen/MachineInstr.h" 51 #include "llvm/CodeGen/MachineInstrBuilder.h" 52 #include "llvm/CodeGen/MachineJumpTableInfo.h" 53 #include "llvm/CodeGen/MachineMemOperand.h" 54 #include "llvm/CodeGen/MachineOperand.h" 55 #include "llvm/CodeGen/MachineRegisterInfo.h" 56 #include "llvm/CodeGen/RuntimeLibcalls.h" 57 #include "llvm/CodeGen/SelectionDAG.h" 58 #include "llvm/CodeGen/SelectionDAGNodes.h" 59 #include "llvm/CodeGen/TargetInstrInfo.h" 60 #include "llvm/CodeGen/TargetLowering.h" 61 #include "llvm/CodeGen/TargetOpcodes.h" 62 #include "llvm/CodeGen/TargetRegisterInfo.h" 63 #include "llvm/CodeGen/TargetSubtargetInfo.h" 64 #include "llvm/CodeGen/ValueTypes.h" 65 #include "llvm/IR/Attributes.h" 66 #include "llvm/IR/CallingConv.h" 67 #include "llvm/IR/Constant.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DataLayout.h" 70 #include "llvm/IR/DebugLoc.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/Function.h" 73 #include "llvm/IR/GlobalAlias.h" 74 #include "llvm/IR/GlobalValue.h" 75 #include "llvm/IR/GlobalVariable.h" 76 #include "llvm/IR/IRBuilder.h" 77 #include "llvm/IR/InlineAsm.h" 78 #include "llvm/IR/Instruction.h" 79 #include "llvm/IR/Instructions.h" 80 #include "llvm/IR/IntrinsicInst.h" 81 #include "llvm/IR/Intrinsics.h" 82 #include "llvm/IR/Module.h" 83 #include "llvm/IR/Type.h" 84 #include "llvm/IR/User.h" 85 #include "llvm/IR/Value.h" 86 #include "llvm/MC/MCInstrDesc.h" 87 #include "llvm/MC/MCInstrItineraries.h" 88 #include "llvm/MC/MCRegisterInfo.h" 89 #include "llvm/MC/MCSchedule.h" 90 #include "llvm/Support/AtomicOrdering.h" 91 #include "llvm/Support/BranchProbability.h" 92 #include "llvm/Support/Casting.h" 93 #include "llvm/Support/CodeGen.h" 94 #include "llvm/Support/CommandLine.h" 95 #include "llvm/Support/Compiler.h" 96 #include "llvm/Support/Debug.h" 97 #include "llvm/Support/ErrorHandling.h" 98 #include "llvm/Support/KnownBits.h" 99 #include "llvm/Support/MachineValueType.h" 100 #include "llvm/Support/MathExtras.h" 101 #include "llvm/Support/raw_ostream.h" 102 #include "llvm/Target/TargetMachine.h" 103 #include "llvm/Target/TargetOptions.h" 104 #include <algorithm> 105 #include <cassert> 106 #include <cstdint> 107 #include <cstdlib> 108 #include <iterator> 109 #include <limits> 110 #include <string> 111 #include <tuple> 112 #include <utility> 113 #include <vector> 114 115 using namespace llvm; 116 117 #define DEBUG_TYPE "arm-isel" 118 119 STATISTIC(NumTailCalls, "Number of tail calls"); 120 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 121 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 122 STATISTIC(NumConstpoolPromoted, 123 "Number of constants with their storage promoted into constant pools"); 124 125 static cl::opt<bool> 126 ARMInterworking("arm-interworking", cl::Hidden, 127 cl::desc("Enable / disable ARM interworking (for debugging only)"), 128 cl::init(true)); 129 130 static cl::opt<bool> EnableConstpoolPromotion( 131 "arm-promote-constant", cl::Hidden, 132 cl::desc("Enable / disable promotion of unnamed_addr constants into " 133 "constant pools"), 134 cl::init(false)); // FIXME: set to true by default once PR32780 is fixed 135 static cl::opt<unsigned> ConstpoolPromotionMaxSize( 136 "arm-promote-constant-max-size", cl::Hidden, 137 cl::desc("Maximum size of constant to promote into a constant pool"), 138 cl::init(64)); 139 static cl::opt<unsigned> ConstpoolPromotionMaxTotal( 140 "arm-promote-constant-max-total", cl::Hidden, 141 cl::desc("Maximum size of ALL constants to promote into a constant pool"), 142 cl::init(128)); 143 144 // The APCS parameter registers. 145 static const MCPhysReg GPRArgRegs[] = { 146 ARM::R0, ARM::R1, ARM::R2, ARM::R3 147 }; 148 149 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 150 MVT PromotedBitwiseVT) { 151 if (VT != PromotedLdStVT) { 152 setOperationAction(ISD::LOAD, VT, Promote); 153 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 154 155 setOperationAction(ISD::STORE, VT, Promote); 156 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 157 } 158 159 MVT ElemTy = VT.getVectorElementType(); 160 if (ElemTy != MVT::f64) 161 setOperationAction(ISD::SETCC, VT, Custom); 162 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 163 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 164 if (ElemTy == MVT::i32) { 165 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 166 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 167 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 168 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 169 } else { 170 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 171 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 172 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 173 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 174 } 175 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 176 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 177 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 178 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 179 setOperationAction(ISD::SELECT, VT, Expand); 180 setOperationAction(ISD::SELECT_CC, VT, Expand); 181 setOperationAction(ISD::VSELECT, VT, Expand); 182 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 183 if (VT.isInteger()) { 184 setOperationAction(ISD::SHL, VT, Custom); 185 setOperationAction(ISD::SRA, VT, Custom); 186 setOperationAction(ISD::SRL, VT, Custom); 187 } 188 189 // Promote all bit-wise operations. 190 if (VT.isInteger() && VT != PromotedBitwiseVT) { 191 setOperationAction(ISD::AND, VT, Promote); 192 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 193 setOperationAction(ISD::OR, VT, Promote); 194 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 195 setOperationAction(ISD::XOR, VT, Promote); 196 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 197 } 198 199 // Neon does not support vector divide/remainder operations. 200 setOperationAction(ISD::SDIV, VT, Expand); 201 setOperationAction(ISD::UDIV, VT, Expand); 202 setOperationAction(ISD::FDIV, VT, Expand); 203 setOperationAction(ISD::SREM, VT, Expand); 204 setOperationAction(ISD::UREM, VT, Expand); 205 setOperationAction(ISD::FREM, VT, Expand); 206 207 if (!VT.isFloatingPoint() && 208 VT != MVT::v2i64 && VT != MVT::v1i64) 209 for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 210 setOperationAction(Opcode, VT, Legal); 211 } 212 213 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 214 addRegisterClass(VT, &ARM::DPRRegClass); 215 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 216 } 217 218 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 219 addRegisterClass(VT, &ARM::DPairRegClass); 220 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 221 } 222 223 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 224 const ARMSubtarget &STI) 225 : TargetLowering(TM), Subtarget(&STI) { 226 RegInfo = Subtarget->getRegisterInfo(); 227 Itins = Subtarget->getInstrItineraryData(); 228 229 setBooleanContents(ZeroOrOneBooleanContent); 230 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 231 232 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() && 233 !Subtarget->isTargetWatchOS()) { 234 bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard; 235 for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID) 236 setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID), 237 IsHFTarget ? CallingConv::ARM_AAPCS_VFP 238 : CallingConv::ARM_AAPCS); 239 } 240 241 if (Subtarget->isTargetMachO()) { 242 // Uses VFP for Thumb libfuncs if available. 243 if (Subtarget->isThumb() && Subtarget->hasVFP2() && 244 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 245 static const struct { 246 const RTLIB::Libcall Op; 247 const char * const Name; 248 const ISD::CondCode Cond; 249 } LibraryCalls[] = { 250 // Single-precision floating-point arithmetic. 251 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 252 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 253 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 254 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 255 256 // Double-precision floating-point arithmetic. 257 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 258 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 259 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 260 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 261 262 // Single-precision comparisons. 263 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 264 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 265 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 266 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 267 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 268 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 269 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 270 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ }, 271 272 // Double-precision comparisons. 273 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 274 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 275 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 276 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 277 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 278 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 279 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 280 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ }, 281 282 // Floating-point to integer conversions. 283 // i64 conversions are done via library routines even when generating VFP 284 // instructions, so use the same ones. 285 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 286 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 287 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 288 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 289 290 // Conversions between floating types. 291 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 292 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 293 294 // Integer to floating-point conversions. 295 // i64 conversions are done via library routines even when generating VFP 296 // instructions, so use the same ones. 297 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 298 // e.g., __floatunsidf vs. __floatunssidfvfp. 299 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 300 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 301 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 302 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 303 }; 304 305 for (const auto &LC : LibraryCalls) { 306 setLibcallName(LC.Op, LC.Name); 307 if (LC.Cond != ISD::SETCC_INVALID) 308 setCmpLibcallCC(LC.Op, LC.Cond); 309 } 310 } 311 } 312 313 // These libcalls are not available in 32-bit. 314 setLibcallName(RTLIB::SHL_I128, nullptr); 315 setLibcallName(RTLIB::SRL_I128, nullptr); 316 setLibcallName(RTLIB::SRA_I128, nullptr); 317 318 // RTLIB 319 if (Subtarget->isAAPCS_ABI() && 320 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 321 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { 322 static const struct { 323 const RTLIB::Libcall Op; 324 const char * const Name; 325 const CallingConv::ID CC; 326 const ISD::CondCode Cond; 327 } LibraryCalls[] = { 328 // Double-precision floating-point arithmetic helper functions 329 // RTABI chapter 4.1.2, Table 2 330 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 331 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 332 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 333 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 334 335 // Double-precision floating-point comparison helper functions 336 // RTABI chapter 4.1.2, Table 3 337 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 338 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 339 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 340 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 341 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 342 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 343 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 344 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 345 346 // Single-precision floating-point arithmetic helper functions 347 // RTABI chapter 4.1.2, Table 4 348 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 349 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 350 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 351 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 352 353 // Single-precision floating-point comparison helper functions 354 // RTABI chapter 4.1.2, Table 5 355 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 356 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 357 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 358 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 359 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 360 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 361 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 362 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 363 364 // Floating-point to integer conversions. 365 // RTABI chapter 4.1.2, Table 6 366 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 367 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 368 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 369 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 370 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 371 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 372 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 373 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 374 375 // Conversions between floating types. 376 // RTABI chapter 4.1.2, Table 7 377 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 378 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 379 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 380 381 // Integer to floating-point conversions. 382 // RTABI chapter 4.1.2, Table 8 383 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 384 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 385 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 386 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 387 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 388 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 389 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 390 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 391 392 // Long long helper functions 393 // RTABI chapter 4.2, Table 9 394 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 395 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 396 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 397 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 398 399 // Integer division functions 400 // RTABI chapter 4.3.1 401 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 402 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 403 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 404 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 405 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 406 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 407 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 408 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 409 }; 410 411 for (const auto &LC : LibraryCalls) { 412 setLibcallName(LC.Op, LC.Name); 413 setLibcallCallingConv(LC.Op, LC.CC); 414 if (LC.Cond != ISD::SETCC_INVALID) 415 setCmpLibcallCC(LC.Op, LC.Cond); 416 } 417 418 // EABI dependent RTLIB 419 if (TM.Options.EABIVersion == EABI::EABI4 || 420 TM.Options.EABIVersion == EABI::EABI5) { 421 static const struct { 422 const RTLIB::Libcall Op; 423 const char *const Name; 424 const CallingConv::ID CC; 425 const ISD::CondCode Cond; 426 } MemOpsLibraryCalls[] = { 427 // Memory operations 428 // RTABI chapter 4.3.4 429 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 430 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 431 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 432 }; 433 434 for (const auto &LC : MemOpsLibraryCalls) { 435 setLibcallName(LC.Op, LC.Name); 436 setLibcallCallingConv(LC.Op, LC.CC); 437 if (LC.Cond != ISD::SETCC_INVALID) 438 setCmpLibcallCC(LC.Op, LC.Cond); 439 } 440 } 441 } 442 443 if (Subtarget->isTargetWindows()) { 444 static const struct { 445 const RTLIB::Libcall Op; 446 const char * const Name; 447 const CallingConv::ID CC; 448 } LibraryCalls[] = { 449 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 450 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 451 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 452 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 453 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 454 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 455 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 456 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 457 }; 458 459 for (const auto &LC : LibraryCalls) { 460 setLibcallName(LC.Op, LC.Name); 461 setLibcallCallingConv(LC.Op, LC.CC); 462 } 463 } 464 465 // Use divmod compiler-rt calls for iOS 5.0 and later. 466 if (Subtarget->isTargetMachO() && 467 !(Subtarget->isTargetIOS() && 468 Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 469 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 470 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 471 } 472 473 // The half <-> float conversion functions are always soft-float on 474 // non-watchos platforms, but are needed for some targets which use a 475 // hard-float calling convention by default. 476 if (!Subtarget->isTargetWatchABI()) { 477 if (Subtarget->isAAPCS_ABI()) { 478 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 479 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 480 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 481 } else { 482 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 483 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 484 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 485 } 486 } 487 488 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 489 // a __gnu_ prefix (which is the default). 490 if (Subtarget->isTargetAEABI()) { 491 static const struct { 492 const RTLIB::Libcall Op; 493 const char * const Name; 494 const CallingConv::ID CC; 495 } LibraryCalls[] = { 496 { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS }, 497 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS }, 498 { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS }, 499 }; 500 501 for (const auto &LC : LibraryCalls) { 502 setLibcallName(LC.Op, LC.Name); 503 setLibcallCallingConv(LC.Op, LC.CC); 504 } 505 } 506 507 if (Subtarget->isThumb1Only()) 508 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 509 else 510 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 511 512 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 513 !Subtarget->isThumb1Only()) { 514 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 515 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 516 } 517 518 if (Subtarget->hasFullFP16()) { 519 addRegisterClass(MVT::f16, &ARM::HPRRegClass); 520 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 521 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 522 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 523 524 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 525 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 526 } 527 528 for (MVT VT : MVT::vector_valuetypes()) { 529 for (MVT InnerVT : MVT::vector_valuetypes()) { 530 setTruncStoreAction(VT, InnerVT, Expand); 531 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 532 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 533 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 534 } 535 536 setOperationAction(ISD::MULHS, VT, Expand); 537 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 538 setOperationAction(ISD::MULHU, VT, Expand); 539 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 540 541 setOperationAction(ISD::BSWAP, VT, Expand); 542 } 543 544 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 545 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 546 547 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 548 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 549 550 if (Subtarget->hasNEON()) { 551 addDRTypeForNEON(MVT::v2f32); 552 addDRTypeForNEON(MVT::v8i8); 553 addDRTypeForNEON(MVT::v4i16); 554 addDRTypeForNEON(MVT::v2i32); 555 addDRTypeForNEON(MVT::v1i64); 556 557 addQRTypeForNEON(MVT::v4f32); 558 addQRTypeForNEON(MVT::v2f64); 559 addQRTypeForNEON(MVT::v16i8); 560 addQRTypeForNEON(MVT::v8i16); 561 addQRTypeForNEON(MVT::v4i32); 562 addQRTypeForNEON(MVT::v2i64); 563 564 if (Subtarget->hasFullFP16()) { 565 addQRTypeForNEON(MVT::v8f16); 566 addDRTypeForNEON(MVT::v4f16); 567 } 568 569 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 570 // neither Neon nor VFP support any arithmetic operations on it. 571 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 572 // supported for v4f32. 573 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 574 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 575 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 576 // FIXME: Code duplication: FDIV and FREM are expanded always, see 577 // ARMTargetLowering::addTypeForNEON method for details. 578 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 579 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 580 // FIXME: Create unittest. 581 // In another words, find a way when "copysign" appears in DAG with vector 582 // operands. 583 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 584 // FIXME: Code duplication: SETCC has custom operation action, see 585 // ARMTargetLowering::addTypeForNEON method for details. 586 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 587 // FIXME: Create unittest for FNEG and for FABS. 588 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 589 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 590 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 591 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 592 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 593 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 594 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 595 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 596 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 597 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 598 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 599 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 600 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 601 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 602 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 603 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 604 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 605 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 606 607 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 608 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 609 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 610 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 611 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 612 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 613 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 614 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 615 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 616 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 617 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 618 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 619 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 620 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 621 622 // Mark v2f32 intrinsics. 623 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 624 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 625 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 626 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 627 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 628 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 629 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 630 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 631 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 632 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 633 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 634 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 635 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 636 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 637 638 // Neon does not support some operations on v1i64 and v2i64 types. 639 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 640 // Custom handling for some quad-vector types to detect VMULL. 641 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 642 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 643 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 644 // Custom handling for some vector types to avoid expensive expansions 645 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 646 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 647 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 648 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 649 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 650 // a destination type that is wider than the source, and nor does 651 // it have a FP_TO_[SU]INT instruction with a narrower destination than 652 // source. 653 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 654 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom); 655 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 656 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); 657 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 658 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom); 659 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 660 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); 661 662 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 663 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 664 665 // NEON does not have single instruction CTPOP for vectors with element 666 // types wider than 8-bits. However, custom lowering can leverage the 667 // v8i8/v16i8 vcnt instruction. 668 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 669 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 670 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 671 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 672 setOperationAction(ISD::CTPOP, MVT::v1i64, Custom); 673 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom); 674 675 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 676 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 677 678 // NEON does not have single instruction CTTZ for vectors. 679 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 680 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 681 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 682 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 683 684 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 685 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 686 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 687 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 688 689 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 690 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 691 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 692 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 693 694 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 695 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 696 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 697 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 698 699 // NEON only has FMA instructions as of VFP4. 700 if (!Subtarget->hasVFP4()) { 701 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 702 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 703 } 704 705 setTargetDAGCombine(ISD::INTRINSIC_VOID); 706 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 707 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 708 setTargetDAGCombine(ISD::SHL); 709 setTargetDAGCombine(ISD::SRL); 710 setTargetDAGCombine(ISD::SRA); 711 setTargetDAGCombine(ISD::SIGN_EXTEND); 712 setTargetDAGCombine(ISD::ZERO_EXTEND); 713 setTargetDAGCombine(ISD::ANY_EXTEND); 714 setTargetDAGCombine(ISD::BUILD_VECTOR); 715 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 716 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 717 setTargetDAGCombine(ISD::STORE); 718 setTargetDAGCombine(ISD::FP_TO_SINT); 719 setTargetDAGCombine(ISD::FP_TO_UINT); 720 setTargetDAGCombine(ISD::FDIV); 721 setTargetDAGCombine(ISD::LOAD); 722 723 // It is legal to extload from v4i8 to v4i16 or v4i32. 724 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 725 MVT::v2i32}) { 726 for (MVT VT : MVT::integer_vector_valuetypes()) { 727 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 728 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 729 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 730 } 731 } 732 } 733 734 if (Subtarget->isFPOnlySP()) { 735 // When targeting a floating-point unit with only single-precision 736 // operations, f64 is legal for the few double-precision instructions which 737 // are present However, no double-precision operations other than moves, 738 // loads and stores are provided by the hardware. 739 setOperationAction(ISD::FADD, MVT::f64, Expand); 740 setOperationAction(ISD::FSUB, MVT::f64, Expand); 741 setOperationAction(ISD::FMUL, MVT::f64, Expand); 742 setOperationAction(ISD::FMA, MVT::f64, Expand); 743 setOperationAction(ISD::FDIV, MVT::f64, Expand); 744 setOperationAction(ISD::FREM, MVT::f64, Expand); 745 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 746 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 747 setOperationAction(ISD::FNEG, MVT::f64, Expand); 748 setOperationAction(ISD::FABS, MVT::f64, Expand); 749 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 750 setOperationAction(ISD::FSIN, MVT::f64, Expand); 751 setOperationAction(ISD::FCOS, MVT::f64, Expand); 752 setOperationAction(ISD::FPOW, MVT::f64, Expand); 753 setOperationAction(ISD::FLOG, MVT::f64, Expand); 754 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 755 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 756 setOperationAction(ISD::FEXP, MVT::f64, Expand); 757 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 758 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 759 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 760 setOperationAction(ISD::FRINT, MVT::f64, Expand); 761 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 762 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 763 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 764 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 765 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 766 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 767 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 768 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 769 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 770 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 771 } 772 773 computeRegisterProperties(Subtarget->getRegisterInfo()); 774 775 // ARM does not have floating-point extending loads. 776 for (MVT VT : MVT::fp_valuetypes()) { 777 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 778 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 779 } 780 781 // ... or truncating stores 782 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 783 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 784 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 785 786 // ARM does not have i1 sign extending load. 787 for (MVT VT : MVT::integer_valuetypes()) 788 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 789 790 // ARM supports all 4 flavors of integer indexed load / store. 791 if (!Subtarget->isThumb1Only()) { 792 for (unsigned im = (unsigned)ISD::PRE_INC; 793 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 794 setIndexedLoadAction(im, MVT::i1, Legal); 795 setIndexedLoadAction(im, MVT::i8, Legal); 796 setIndexedLoadAction(im, MVT::i16, Legal); 797 setIndexedLoadAction(im, MVT::i32, Legal); 798 setIndexedStoreAction(im, MVT::i1, Legal); 799 setIndexedStoreAction(im, MVT::i8, Legal); 800 setIndexedStoreAction(im, MVT::i16, Legal); 801 setIndexedStoreAction(im, MVT::i32, Legal); 802 } 803 } else { 804 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. 805 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); 806 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); 807 } 808 809 setOperationAction(ISD::SADDO, MVT::i32, Custom); 810 setOperationAction(ISD::UADDO, MVT::i32, Custom); 811 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 812 setOperationAction(ISD::USUBO, MVT::i32, Custom); 813 814 setOperationAction(ISD::ADDCARRY, MVT::i32, Custom); 815 setOperationAction(ISD::SUBCARRY, MVT::i32, Custom); 816 817 // i64 operation support. 818 setOperationAction(ISD::MUL, MVT::i64, Expand); 819 setOperationAction(ISD::MULHU, MVT::i32, Expand); 820 if (Subtarget->isThumb1Only()) { 821 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 822 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 823 } 824 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 825 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 826 setOperationAction(ISD::MULHS, MVT::i32, Expand); 827 828 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 829 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 830 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 831 setOperationAction(ISD::SRL, MVT::i64, Custom); 832 setOperationAction(ISD::SRA, MVT::i64, Custom); 833 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 834 835 // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1. 836 if (Subtarget->isThumb1Only()) { 837 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 838 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 839 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 840 } 841 842 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 843 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 844 845 // ARM does not have ROTL. 846 setOperationAction(ISD::ROTL, MVT::i32, Expand); 847 for (MVT VT : MVT::vector_valuetypes()) { 848 setOperationAction(ISD::ROTL, VT, Expand); 849 setOperationAction(ISD::ROTR, VT, Expand); 850 } 851 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 852 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 853 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) { 854 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 855 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall); 856 } 857 858 // @llvm.readcyclecounter requires the Performance Monitors extension. 859 // Default to the 0 expansion on unsupported platforms. 860 // FIXME: Technically there are older ARM CPUs that have 861 // implementation-specific ways of obtaining this information. 862 if (Subtarget->hasPerfMon()) 863 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 864 865 // Only ARMv6 has BSWAP. 866 if (!Subtarget->hasV6Ops()) 867 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 868 869 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 870 : Subtarget->hasDivideInARMMode(); 871 if (!hasDivide) { 872 // These are expanded into libcalls if the cpu doesn't have HW divider. 873 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 874 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 875 } 876 877 if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) { 878 setOperationAction(ISD::SDIV, MVT::i32, Custom); 879 setOperationAction(ISD::UDIV, MVT::i32, Custom); 880 881 setOperationAction(ISD::SDIV, MVT::i64, Custom); 882 setOperationAction(ISD::UDIV, MVT::i64, Custom); 883 } 884 885 setOperationAction(ISD::SREM, MVT::i32, Expand); 886 setOperationAction(ISD::UREM, MVT::i32, Expand); 887 888 // Register based DivRem for AEABI (RTABI 4.2) 889 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 890 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 891 Subtarget->isTargetWindows()) { 892 setOperationAction(ISD::SREM, MVT::i64, Custom); 893 setOperationAction(ISD::UREM, MVT::i64, Custom); 894 HasStandaloneRem = false; 895 896 if (Subtarget->isTargetWindows()) { 897 const struct { 898 const RTLIB::Libcall Op; 899 const char * const Name; 900 const CallingConv::ID CC; 901 } LibraryCalls[] = { 902 { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS }, 903 { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS }, 904 { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS }, 905 { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS }, 906 907 { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS }, 908 { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS }, 909 { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS }, 910 { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS }, 911 }; 912 913 for (const auto &LC : LibraryCalls) { 914 setLibcallName(LC.Op, LC.Name); 915 setLibcallCallingConv(LC.Op, LC.CC); 916 } 917 } else { 918 const struct { 919 const RTLIB::Libcall Op; 920 const char * const Name; 921 const CallingConv::ID CC; 922 } LibraryCalls[] = { 923 { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 924 { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 925 { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS }, 926 { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS }, 927 928 { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 929 { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 930 { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS }, 931 { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS }, 932 }; 933 934 for (const auto &LC : LibraryCalls) { 935 setLibcallName(LC.Op, LC.Name); 936 setLibcallCallingConv(LC.Op, LC.CC); 937 } 938 } 939 940 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 941 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 942 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 943 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 944 } else { 945 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 946 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 947 } 948 949 if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT()) 950 for (auto &VT : {MVT::f32, MVT::f64}) 951 setOperationAction(ISD::FPOWI, VT, Custom); 952 953 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 954 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 955 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 956 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 957 958 setOperationAction(ISD::TRAP, MVT::Other, Legal); 959 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 960 961 // Use the default implementation. 962 setOperationAction(ISD::VASTART, MVT::Other, Custom); 963 setOperationAction(ISD::VAARG, MVT::Other, Expand); 964 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 965 setOperationAction(ISD::VAEND, MVT::Other, Expand); 966 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 967 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 968 969 if (Subtarget->isTargetWindows()) 970 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 971 else 972 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 973 974 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 975 // the default expansion. 976 InsertFencesForAtomic = false; 977 if (Subtarget->hasAnyDataBarrier() && 978 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 979 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 980 // to ldrex/strex loops already. 981 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 982 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 983 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 984 985 // On v8, we have particularly efficient implementations of atomic fences 986 // if they can be combined with nearby atomic loads and stores. 987 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) { 988 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 989 InsertFencesForAtomic = true; 990 } 991 } else { 992 // If there's anything we can use as a barrier, go through custom lowering 993 // for ATOMIC_FENCE. 994 // If target has DMB in thumb, Fences can be inserted. 995 if (Subtarget->hasDataBarrier()) 996 InsertFencesForAtomic = true; 997 998 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 999 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 1000 1001 // Set them all for expansion, which will force libcalls. 1002 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 1003 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 1004 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 1005 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 1006 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 1007 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 1008 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 1009 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 1010 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 1011 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 1012 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 1013 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 1014 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 1015 // Unordered/Monotonic case. 1016 if (!InsertFencesForAtomic) { 1017 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1018 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1019 } 1020 } 1021 1022 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1023 1024 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 1025 if (!Subtarget->hasV6Ops()) { 1026 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1027 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 1028 } 1029 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1030 1031 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 1032 !Subtarget->isThumb1Only()) { 1033 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 1034 // iff target supports vfp2. 1035 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 1036 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 1037 } 1038 1039 // We want to custom lower some of our intrinsics. 1040 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1041 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 1042 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 1043 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 1044 if (Subtarget->useSjLjEH()) 1045 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 1046 1047 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1048 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1049 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1050 setOperationAction(ISD::SELECT, MVT::i32, Custom); 1051 setOperationAction(ISD::SELECT, MVT::f32, Custom); 1052 setOperationAction(ISD::SELECT, MVT::f64, Custom); 1053 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1054 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1055 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1056 if (Subtarget->hasFullFP16()) { 1057 setOperationAction(ISD::SETCC, MVT::f16, Expand); 1058 setOperationAction(ISD::SELECT, MVT::f16, Custom); 1059 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); 1060 } 1061 1062 setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom); 1063 1064 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 1065 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1066 if (Subtarget->hasFullFP16()) 1067 setOperationAction(ISD::BR_CC, MVT::f16, Custom); 1068 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1069 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1070 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 1071 1072 // We don't support sin/cos/fmod/copysign/pow 1073 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1074 setOperationAction(ISD::FSIN, MVT::f32, Expand); 1075 setOperationAction(ISD::FCOS, MVT::f32, Expand); 1076 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1077 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1078 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1079 setOperationAction(ISD::FREM, MVT::f64, Expand); 1080 setOperationAction(ISD::FREM, MVT::f32, Expand); 1081 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 1082 !Subtarget->isThumb1Only()) { 1083 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 1084 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 1085 } 1086 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1087 setOperationAction(ISD::FPOW, MVT::f32, Expand); 1088 1089 if (!Subtarget->hasVFP4()) { 1090 setOperationAction(ISD::FMA, MVT::f64, Expand); 1091 setOperationAction(ISD::FMA, MVT::f32, Expand); 1092 } 1093 1094 // Various VFP goodness 1095 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 1096 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 1097 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { 1098 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1099 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1100 } 1101 1102 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 1103 if (!Subtarget->hasFP16()) { 1104 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1105 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1106 } 1107 } 1108 1109 // Use __sincos_stret if available. 1110 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && 1111 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { 1112 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1113 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1114 } 1115 1116 // FP-ARMv8 implements a lot of rounding-like FP operations. 1117 if (Subtarget->hasFPARMv8()) { 1118 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1119 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1120 setOperationAction(ISD::FROUND, MVT::f32, Legal); 1121 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1122 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1123 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1124 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1125 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1126 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 1127 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 1128 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1129 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1130 1131 if (!Subtarget->isFPOnlySP()) { 1132 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1133 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1134 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1135 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1136 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1137 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1138 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1139 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1140 } 1141 } 1142 1143 if (Subtarget->hasNEON()) { 1144 // vmin and vmax aren't available in a scalar form, so we use 1145 // a NEON instruction with an undef lane instead. 1146 setOperationAction(ISD::FMINIMUM, MVT::f16, Legal); 1147 setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal); 1148 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal); 1149 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal); 1150 setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal); 1151 setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal); 1152 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal); 1153 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal); 1154 1155 if (Subtarget->hasFullFP16()) { 1156 setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal); 1157 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal); 1158 setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal); 1159 setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal); 1160 1161 setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal); 1162 setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal); 1163 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal); 1164 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal); 1165 } 1166 } 1167 1168 // We have target-specific dag combine patterns for the following nodes: 1169 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1170 setTargetDAGCombine(ISD::ADD); 1171 setTargetDAGCombine(ISD::SUB); 1172 setTargetDAGCombine(ISD::MUL); 1173 setTargetDAGCombine(ISD::AND); 1174 setTargetDAGCombine(ISD::OR); 1175 setTargetDAGCombine(ISD::XOR); 1176 1177 if (Subtarget->hasV6Ops()) 1178 setTargetDAGCombine(ISD::SRL); 1179 1180 setStackPointerRegisterToSaveRestore(ARM::SP); 1181 1182 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1183 !Subtarget->hasVFP2()) 1184 setSchedulingPreference(Sched::RegPressure); 1185 else 1186 setSchedulingPreference(Sched::Hybrid); 1187 1188 //// temporary - rewrite interface to use type 1189 MaxStoresPerMemset = 8; 1190 MaxStoresPerMemsetOptSize = 4; 1191 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1192 MaxStoresPerMemcpyOptSize = 2; 1193 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1194 MaxStoresPerMemmoveOptSize = 2; 1195 1196 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1197 // are at least 4 bytes aligned. 1198 setMinStackArgumentAlignment(4); 1199 1200 // Prefer likely predicted branches to selects on out-of-order cores. 1201 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1202 1203 setPrefLoopAlignment(Subtarget->getPrefLoopAlignment()); 1204 1205 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 1206 } 1207 1208 bool ARMTargetLowering::useSoftFloat() const { 1209 return Subtarget->useSoftFloat(); 1210 } 1211 1212 // FIXME: It might make sense to define the representative register class as the 1213 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1214 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1215 // SPR's representative would be DPR_VFP2. This should work well if register 1216 // pressure tracking were modified such that a register use would increment the 1217 // pressure of the register class's representative and all of it's super 1218 // classes' representatives transitively. We have not implemented this because 1219 // of the difficulty prior to coalescing of modeling operand register classes 1220 // due to the common occurrence of cross class copies and subregister insertions 1221 // and extractions. 1222 std::pair<const TargetRegisterClass *, uint8_t> 1223 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1224 MVT VT) const { 1225 const TargetRegisterClass *RRC = nullptr; 1226 uint8_t Cost = 1; 1227 switch (VT.SimpleTy) { 1228 default: 1229 return TargetLowering::findRepresentativeClass(TRI, VT); 1230 // Use DPR as representative register class for all floating point 1231 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1232 // the cost is 1 for both f32 and f64. 1233 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1234 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1235 RRC = &ARM::DPRRegClass; 1236 // When NEON is used for SP, only half of the register file is available 1237 // because operations that define both SP and DP results will be constrained 1238 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1239 // coalescing by double-counting the SP regs. See the FIXME above. 1240 if (Subtarget->useNEONForSinglePrecisionFP()) 1241 Cost = 2; 1242 break; 1243 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1244 case MVT::v4f32: case MVT::v2f64: 1245 RRC = &ARM::DPRRegClass; 1246 Cost = 2; 1247 break; 1248 case MVT::v4i64: 1249 RRC = &ARM::DPRRegClass; 1250 Cost = 4; 1251 break; 1252 case MVT::v8i64: 1253 RRC = &ARM::DPRRegClass; 1254 Cost = 8; 1255 break; 1256 } 1257 return std::make_pair(RRC, Cost); 1258 } 1259 1260 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1261 switch ((ARMISD::NodeType)Opcode) { 1262 case ARMISD::FIRST_NUMBER: break; 1263 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1264 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1265 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1266 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; 1267 case ARMISD::CALL: return "ARMISD::CALL"; 1268 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1269 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1270 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1271 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1272 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1273 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1274 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1275 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1276 case ARMISD::CMP: return "ARMISD::CMP"; 1277 case ARMISD::CMN: return "ARMISD::CMN"; 1278 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1279 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1280 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1281 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1282 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1283 1284 case ARMISD::CMOV: return "ARMISD::CMOV"; 1285 1286 case ARMISD::SSAT: return "ARMISD::SSAT"; 1287 case ARMISD::USAT: return "ARMISD::USAT"; 1288 1289 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1290 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1291 case ARMISD::RRX: return "ARMISD::RRX"; 1292 1293 case ARMISD::ADDC: return "ARMISD::ADDC"; 1294 case ARMISD::ADDE: return "ARMISD::ADDE"; 1295 case ARMISD::SUBC: return "ARMISD::SUBC"; 1296 case ARMISD::SUBE: return "ARMISD::SUBE"; 1297 1298 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1299 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1300 case ARMISD::VMOVhr: return "ARMISD::VMOVhr"; 1301 case ARMISD::VMOVrh: return "ARMISD::VMOVrh"; 1302 case ARMISD::VMOVSR: return "ARMISD::VMOVSR"; 1303 1304 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1305 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; 1306 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; 1307 1308 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1309 1310 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1311 1312 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1313 1314 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1315 1316 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1317 1318 case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK"; 1319 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; 1320 1321 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 1322 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 1323 case ARMISD::VCGE: return "ARMISD::VCGE"; 1324 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 1325 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 1326 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 1327 case ARMISD::VCGT: return "ARMISD::VCGT"; 1328 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1329 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1330 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1331 case ARMISD::VTST: return "ARMISD::VTST"; 1332 1333 case ARMISD::VSHL: return "ARMISD::VSHL"; 1334 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1335 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1336 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1337 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1338 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1339 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1340 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1341 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1342 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1343 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1344 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1345 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1346 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1347 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1348 case ARMISD::VSLI: return "ARMISD::VSLI"; 1349 case ARMISD::VSRI: return "ARMISD::VSRI"; 1350 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1351 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1352 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1353 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1354 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1355 case ARMISD::VDUP: return "ARMISD::VDUP"; 1356 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1357 case ARMISD::VEXT: return "ARMISD::VEXT"; 1358 case ARMISD::VREV64: return "ARMISD::VREV64"; 1359 case ARMISD::VREV32: return "ARMISD::VREV32"; 1360 case ARMISD::VREV16: return "ARMISD::VREV16"; 1361 case ARMISD::VZIP: return "ARMISD::VZIP"; 1362 case ARMISD::VUZP: return "ARMISD::VUZP"; 1363 case ARMISD::VTRN: return "ARMISD::VTRN"; 1364 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1365 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1366 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1367 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1368 case ARMISD::UMAAL: return "ARMISD::UMAAL"; 1369 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1370 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1371 case ARMISD::SMLALBB: return "ARMISD::SMLALBB"; 1372 case ARMISD::SMLALBT: return "ARMISD::SMLALBT"; 1373 case ARMISD::SMLALTB: return "ARMISD::SMLALTB"; 1374 case ARMISD::SMLALTT: return "ARMISD::SMLALTT"; 1375 case ARMISD::SMULWB: return "ARMISD::SMULWB"; 1376 case ARMISD::SMULWT: return "ARMISD::SMULWT"; 1377 case ARMISD::SMLALD: return "ARMISD::SMLALD"; 1378 case ARMISD::SMLALDX: return "ARMISD::SMLALDX"; 1379 case ARMISD::SMLSLD: return "ARMISD::SMLSLD"; 1380 case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX"; 1381 case ARMISD::SMMLAR: return "ARMISD::SMMLAR"; 1382 case ARMISD::SMMLSR: return "ARMISD::SMMLSR"; 1383 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1384 case ARMISD::BFI: return "ARMISD::BFI"; 1385 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1386 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1387 case ARMISD::VBSL: return "ARMISD::VBSL"; 1388 case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; 1389 case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP"; 1390 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1391 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1392 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1393 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1394 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1395 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1396 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1397 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1398 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1399 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1400 case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD"; 1401 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1402 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1403 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1404 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1405 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1406 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1407 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1408 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1409 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1410 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1411 } 1412 return nullptr; 1413 } 1414 1415 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1416 EVT VT) const { 1417 if (!VT.isVector()) 1418 return getPointerTy(DL); 1419 return VT.changeVectorElementTypeToInteger(); 1420 } 1421 1422 /// getRegClassFor - Return the register class that should be used for the 1423 /// specified value type. 1424 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1425 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1426 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1427 // load / store 4 to 8 consecutive D registers. 1428 if (Subtarget->hasNEON()) { 1429 if (VT == MVT::v4i64) 1430 return &ARM::QQPRRegClass; 1431 if (VT == MVT::v8i64) 1432 return &ARM::QQQQPRRegClass; 1433 } 1434 return TargetLowering::getRegClassFor(VT); 1435 } 1436 1437 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1438 // source/dest is aligned and the copy size is large enough. We therefore want 1439 // to align such objects passed to memory intrinsics. 1440 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1441 unsigned &PrefAlign) const { 1442 if (!isa<MemIntrinsic>(CI)) 1443 return false; 1444 MinSize = 8; 1445 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1446 // cycle faster than 4-byte aligned LDM. 1447 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1448 return true; 1449 } 1450 1451 // Create a fast isel object. 1452 FastISel * 1453 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1454 const TargetLibraryInfo *libInfo) const { 1455 return ARM::createFastISel(funcInfo, libInfo); 1456 } 1457 1458 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1459 unsigned NumVals = N->getNumValues(); 1460 if (!NumVals) 1461 return Sched::RegPressure; 1462 1463 for (unsigned i = 0; i != NumVals; ++i) { 1464 EVT VT = N->getValueType(i); 1465 if (VT == MVT::Glue || VT == MVT::Other) 1466 continue; 1467 if (VT.isFloatingPoint() || VT.isVector()) 1468 return Sched::ILP; 1469 } 1470 1471 if (!N->isMachineOpcode()) 1472 return Sched::RegPressure; 1473 1474 // Load are scheduled for latency even if there instruction itinerary 1475 // is not available. 1476 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1477 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1478 1479 if (MCID.getNumDefs() == 0) 1480 return Sched::RegPressure; 1481 if (!Itins->isEmpty() && 1482 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1483 return Sched::ILP; 1484 1485 return Sched::RegPressure; 1486 } 1487 1488 //===----------------------------------------------------------------------===// 1489 // Lowering Code 1490 //===----------------------------------------------------------------------===// 1491 1492 static bool isSRL16(const SDValue &Op) { 1493 if (Op.getOpcode() != ISD::SRL) 1494 return false; 1495 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1496 return Const->getZExtValue() == 16; 1497 return false; 1498 } 1499 1500 static bool isSRA16(const SDValue &Op) { 1501 if (Op.getOpcode() != ISD::SRA) 1502 return false; 1503 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1504 return Const->getZExtValue() == 16; 1505 return false; 1506 } 1507 1508 static bool isSHL16(const SDValue &Op) { 1509 if (Op.getOpcode() != ISD::SHL) 1510 return false; 1511 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 1512 return Const->getZExtValue() == 16; 1513 return false; 1514 } 1515 1516 // Check for a signed 16-bit value. We special case SRA because it makes it 1517 // more simple when also looking for SRAs that aren't sign extending a 1518 // smaller value. Without the check, we'd need to take extra care with 1519 // checking order for some operations. 1520 static bool isS16(const SDValue &Op, SelectionDAG &DAG) { 1521 if (isSRA16(Op)) 1522 return isSHL16(Op.getOperand(0)); 1523 return DAG.ComputeNumSignBits(Op) == 17; 1524 } 1525 1526 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1527 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1528 switch (CC) { 1529 default: llvm_unreachable("Unknown condition code!"); 1530 case ISD::SETNE: return ARMCC::NE; 1531 case ISD::SETEQ: return ARMCC::EQ; 1532 case ISD::SETGT: return ARMCC::GT; 1533 case ISD::SETGE: return ARMCC::GE; 1534 case ISD::SETLT: return ARMCC::LT; 1535 case ISD::SETLE: return ARMCC::LE; 1536 case ISD::SETUGT: return ARMCC::HI; 1537 case ISD::SETUGE: return ARMCC::HS; 1538 case ISD::SETULT: return ARMCC::LO; 1539 case ISD::SETULE: return ARMCC::LS; 1540 } 1541 } 1542 1543 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1544 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1545 ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) { 1546 CondCode2 = ARMCC::AL; 1547 InvalidOnQNaN = true; 1548 switch (CC) { 1549 default: llvm_unreachable("Unknown FP condition!"); 1550 case ISD::SETEQ: 1551 case ISD::SETOEQ: 1552 CondCode = ARMCC::EQ; 1553 InvalidOnQNaN = false; 1554 break; 1555 case ISD::SETGT: 1556 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1557 case ISD::SETGE: 1558 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1559 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1560 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1561 case ISD::SETONE: 1562 CondCode = ARMCC::MI; 1563 CondCode2 = ARMCC::GT; 1564 InvalidOnQNaN = false; 1565 break; 1566 case ISD::SETO: CondCode = ARMCC::VC; break; 1567 case ISD::SETUO: CondCode = ARMCC::VS; break; 1568 case ISD::SETUEQ: 1569 CondCode = ARMCC::EQ; 1570 CondCode2 = ARMCC::VS; 1571 InvalidOnQNaN = false; 1572 break; 1573 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1574 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1575 case ISD::SETLT: 1576 case ISD::SETULT: CondCode = ARMCC::LT; break; 1577 case ISD::SETLE: 1578 case ISD::SETULE: CondCode = ARMCC::LE; break; 1579 case ISD::SETNE: 1580 case ISD::SETUNE: 1581 CondCode = ARMCC::NE; 1582 InvalidOnQNaN = false; 1583 break; 1584 } 1585 } 1586 1587 //===----------------------------------------------------------------------===// 1588 // Calling Convention Implementation 1589 //===----------------------------------------------------------------------===// 1590 1591 #include "ARMGenCallingConv.inc" 1592 1593 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1594 /// account presence of floating point hardware and calling convention 1595 /// limitations, such as support for variadic functions. 1596 CallingConv::ID 1597 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1598 bool isVarArg) const { 1599 switch (CC) { 1600 default: 1601 report_fatal_error("Unsupported calling convention"); 1602 case CallingConv::ARM_AAPCS: 1603 case CallingConv::ARM_APCS: 1604 case CallingConv::GHC: 1605 return CC; 1606 case CallingConv::PreserveMost: 1607 return CallingConv::PreserveMost; 1608 case CallingConv::ARM_AAPCS_VFP: 1609 case CallingConv::Swift: 1610 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1611 case CallingConv::C: 1612 if (!Subtarget->isAAPCS_ABI()) 1613 return CallingConv::ARM_APCS; 1614 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && 1615 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1616 !isVarArg) 1617 return CallingConv::ARM_AAPCS_VFP; 1618 else 1619 return CallingConv::ARM_AAPCS; 1620 case CallingConv::Fast: 1621 case CallingConv::CXX_FAST_TLS: 1622 if (!Subtarget->isAAPCS_ABI()) { 1623 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1624 return CallingConv::Fast; 1625 return CallingConv::ARM_APCS; 1626 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1627 return CallingConv::ARM_AAPCS_VFP; 1628 else 1629 return CallingConv::ARM_AAPCS; 1630 } 1631 } 1632 1633 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1634 bool isVarArg) const { 1635 return CCAssignFnForNode(CC, false, isVarArg); 1636 } 1637 1638 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1639 bool isVarArg) const { 1640 return CCAssignFnForNode(CC, true, isVarArg); 1641 } 1642 1643 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 1644 /// CallingConvention. 1645 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1646 bool Return, 1647 bool isVarArg) const { 1648 switch (getEffectiveCallingConv(CC, isVarArg)) { 1649 default: 1650 report_fatal_error("Unsupported calling convention"); 1651 case CallingConv::ARM_APCS: 1652 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1653 case CallingConv::ARM_AAPCS: 1654 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1655 case CallingConv::ARM_AAPCS_VFP: 1656 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1657 case CallingConv::Fast: 1658 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1659 case CallingConv::GHC: 1660 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1661 case CallingConv::PreserveMost: 1662 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1663 } 1664 } 1665 1666 /// LowerCallResult - Lower the result values of a call into the 1667 /// appropriate copies out of appropriate physical registers. 1668 SDValue ARMTargetLowering::LowerCallResult( 1669 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 1670 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1671 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 1672 SDValue ThisVal) const { 1673 // Assign locations to each value returned by this call. 1674 SmallVector<CCValAssign, 16> RVLocs; 1675 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1676 *DAG.getContext()); 1677 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg)); 1678 1679 // Copy all of the result registers out of their specified physreg. 1680 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1681 CCValAssign VA = RVLocs[i]; 1682 1683 // Pass 'this' value directly from the argument to return value, to avoid 1684 // reg unit interference 1685 if (i == 0 && isThisReturn) { 1686 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1687 "unexpected return calling convention register assignment"); 1688 InVals.push_back(ThisVal); 1689 continue; 1690 } 1691 1692 SDValue Val; 1693 if (VA.needsCustom()) { 1694 // Handle f64 or half of a v2f64. 1695 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1696 InFlag); 1697 Chain = Lo.getValue(1); 1698 InFlag = Lo.getValue(2); 1699 VA = RVLocs[++i]; // skip ahead to next loc 1700 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1701 InFlag); 1702 Chain = Hi.getValue(1); 1703 InFlag = Hi.getValue(2); 1704 if (!Subtarget->isLittle()) 1705 std::swap (Lo, Hi); 1706 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1707 1708 if (VA.getLocVT() == MVT::v2f64) { 1709 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1710 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1711 DAG.getConstant(0, dl, MVT::i32)); 1712 1713 VA = RVLocs[++i]; // skip ahead to next loc 1714 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1715 Chain = Lo.getValue(1); 1716 InFlag = Lo.getValue(2); 1717 VA = RVLocs[++i]; // skip ahead to next loc 1718 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1719 Chain = Hi.getValue(1); 1720 InFlag = Hi.getValue(2); 1721 if (!Subtarget->isLittle()) 1722 std::swap (Lo, Hi); 1723 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1724 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1725 DAG.getConstant(1, dl, MVT::i32)); 1726 } 1727 } else { 1728 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1729 InFlag); 1730 Chain = Val.getValue(1); 1731 InFlag = Val.getValue(2); 1732 } 1733 1734 switch (VA.getLocInfo()) { 1735 default: llvm_unreachable("Unknown loc info!"); 1736 case CCValAssign::Full: break; 1737 case CCValAssign::BCvt: 1738 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1739 break; 1740 } 1741 1742 InVals.push_back(Val); 1743 } 1744 1745 return Chain; 1746 } 1747 1748 /// LowerMemOpCallTo - Store the argument to the stack. 1749 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, 1750 SDValue Arg, const SDLoc &dl, 1751 SelectionDAG &DAG, 1752 const CCValAssign &VA, 1753 ISD::ArgFlagsTy Flags) const { 1754 unsigned LocMemOffset = VA.getLocMemOffset(); 1755 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1756 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 1757 StackPtr, PtrOff); 1758 return DAG.getStore( 1759 Chain, dl, Arg, PtrOff, 1760 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); 1761 } 1762 1763 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, 1764 SDValue Chain, SDValue &Arg, 1765 RegsToPassVector &RegsToPass, 1766 CCValAssign &VA, CCValAssign &NextVA, 1767 SDValue &StackPtr, 1768 SmallVectorImpl<SDValue> &MemOpChains, 1769 ISD::ArgFlagsTy Flags) const { 1770 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1771 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1772 unsigned id = Subtarget->isLittle() ? 0 : 1; 1773 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 1774 1775 if (NextVA.isRegLoc()) 1776 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 1777 else { 1778 assert(NextVA.isMemLoc()); 1779 if (!StackPtr.getNode()) 1780 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 1781 getPointerTy(DAG.getDataLayout())); 1782 1783 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 1784 dl, DAG, NextVA, 1785 Flags)); 1786 } 1787 } 1788 1789 /// LowerCall - Lowering a call into a callseq_start <- 1790 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1791 /// nodes. 1792 SDValue 1793 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1794 SmallVectorImpl<SDValue> &InVals) const { 1795 SelectionDAG &DAG = CLI.DAG; 1796 SDLoc &dl = CLI.DL; 1797 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1798 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1799 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1800 SDValue Chain = CLI.Chain; 1801 SDValue Callee = CLI.Callee; 1802 bool &isTailCall = CLI.IsTailCall; 1803 CallingConv::ID CallConv = CLI.CallConv; 1804 bool doesNotRet = CLI.DoesNotReturn; 1805 bool isVarArg = CLI.IsVarArg; 1806 1807 MachineFunction &MF = DAG.getMachineFunction(); 1808 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1809 bool isThisReturn = false; 1810 bool isSibCall = false; 1811 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); 1812 1813 // Disable tail calls if they're not supported. 1814 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") 1815 isTailCall = false; 1816 1817 if (isTailCall) { 1818 // Check if it's really possible to do a tail call. 1819 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1820 isVarArg, isStructRet, MF.getFunction().hasStructRetAttr(), 1821 Outs, OutVals, Ins, DAG); 1822 if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall()) 1823 report_fatal_error("failed to perform tail call elimination on a call " 1824 "site marked musttail"); 1825 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1826 // detected sibcalls. 1827 if (isTailCall) { 1828 ++NumTailCalls; 1829 isSibCall = true; 1830 } 1831 } 1832 1833 // Analyze operands of the call, assigning locations to each operand. 1834 SmallVector<CCValAssign, 16> ArgLocs; 1835 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1836 *DAG.getContext()); 1837 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); 1838 1839 // Get a count of how many bytes are to be pushed on the stack. 1840 unsigned NumBytes = CCInfo.getNextStackOffset(); 1841 1842 // For tail calls, memory operands are available in our caller's stack. 1843 if (isSibCall) 1844 NumBytes = 0; 1845 1846 // Adjust the stack pointer for the new arguments... 1847 // These operations are automatically eliminated by the prolog/epilog pass 1848 if (!isSibCall) 1849 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 1850 1851 SDValue StackPtr = 1852 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 1853 1854 RegsToPassVector RegsToPass; 1855 SmallVector<SDValue, 8> MemOpChains; 1856 1857 // Walk the register/memloc assignments, inserting copies/loads. In the case 1858 // of tail call optimization, arguments are handled later. 1859 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1860 i != e; 1861 ++i, ++realArgIdx) { 1862 CCValAssign &VA = ArgLocs[i]; 1863 SDValue Arg = OutVals[realArgIdx]; 1864 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1865 bool isByVal = Flags.isByVal(); 1866 1867 // Promote the value if needed. 1868 switch (VA.getLocInfo()) { 1869 default: llvm_unreachable("Unknown loc info!"); 1870 case CCValAssign::Full: break; 1871 case CCValAssign::SExt: 1872 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1873 break; 1874 case CCValAssign::ZExt: 1875 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1876 break; 1877 case CCValAssign::AExt: 1878 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1879 break; 1880 case CCValAssign::BCvt: 1881 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1882 break; 1883 } 1884 1885 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1886 if (VA.needsCustom()) { 1887 if (VA.getLocVT() == MVT::v2f64) { 1888 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1889 DAG.getConstant(0, dl, MVT::i32)); 1890 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1891 DAG.getConstant(1, dl, MVT::i32)); 1892 1893 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1894 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1895 1896 VA = ArgLocs[++i]; // skip ahead to next loc 1897 if (VA.isRegLoc()) { 1898 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1899 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1900 } else { 1901 assert(VA.isMemLoc()); 1902 1903 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1904 dl, DAG, VA, Flags)); 1905 } 1906 } else { 1907 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1908 StackPtr, MemOpChains, Flags); 1909 } 1910 } else if (VA.isRegLoc()) { 1911 if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && 1912 Outs[0].VT == MVT::i32) { 1913 assert(VA.getLocVT() == MVT::i32 && 1914 "unexpected calling convention register assignment"); 1915 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1916 "unexpected use of 'returned'"); 1917 isThisReturn = true; 1918 } 1919 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1920 } else if (isByVal) { 1921 assert(VA.isMemLoc()); 1922 unsigned offset = 0; 1923 1924 // True if this byval aggregate will be split between registers 1925 // and memory. 1926 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1927 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 1928 1929 if (CurByValIdx < ByValArgsCount) { 1930 1931 unsigned RegBegin, RegEnd; 1932 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1933 1934 EVT PtrVT = 1935 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1936 unsigned int i, j; 1937 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1938 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 1939 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1940 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1941 MachinePointerInfo(), 1942 DAG.InferPtrAlignment(AddArg)); 1943 MemOpChains.push_back(Load.getValue(1)); 1944 RegsToPass.push_back(std::make_pair(j, Load)); 1945 } 1946 1947 // If parameter size outsides register area, "offset" value 1948 // helps us to calculate stack slot for remained part properly. 1949 offset = RegEnd - RegBegin; 1950 1951 CCInfo.nextInRegsParam(); 1952 } 1953 1954 if (Flags.getByValSize() > 4*offset) { 1955 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1956 unsigned LocMemOffset = VA.getLocMemOffset(); 1957 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1958 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 1959 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 1960 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 1961 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 1962 MVT::i32); 1963 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, 1964 MVT::i32); 1965 1966 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1967 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1968 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1969 Ops)); 1970 } 1971 } else if (!isSibCall) { 1972 assert(VA.isMemLoc()); 1973 1974 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1975 dl, DAG, VA, Flags)); 1976 } 1977 } 1978 1979 if (!MemOpChains.empty()) 1980 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1981 1982 // Build a sequence of copy-to-reg nodes chained together with token chain 1983 // and flag operands which copy the outgoing args into the appropriate regs. 1984 SDValue InFlag; 1985 // Tail call byval lowering might overwrite argument registers so in case of 1986 // tail call optimization the copies to registers are lowered later. 1987 if (!isTailCall) 1988 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1989 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1990 RegsToPass[i].second, InFlag); 1991 InFlag = Chain.getValue(1); 1992 } 1993 1994 // For tail calls lower the arguments to the 'real' stack slot. 1995 if (isTailCall) { 1996 // Force all the incoming stack arguments to be loaded from the stack 1997 // before any new outgoing arguments are stored to the stack, because the 1998 // outgoing stack slots may alias the incoming argument stack slots, and 1999 // the alias isn't otherwise explicit. This is slightly more conservative 2000 // than necessary, because it means that each store effectively depends 2001 // on every argument instead of just those arguments it would clobber. 2002 2003 // Do not flag preceding copytoreg stuff together with the following stuff. 2004 InFlag = SDValue(); 2005 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2006 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2007 RegsToPass[i].second, InFlag); 2008 InFlag = Chain.getValue(1); 2009 } 2010 InFlag = SDValue(); 2011 } 2012 2013 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 2014 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 2015 // node so that legalize doesn't hack it. 2016 bool isDirect = false; 2017 2018 const TargetMachine &TM = getTargetMachine(); 2019 const Module *Mod = MF.getFunction().getParent(); 2020 const GlobalValue *GV = nullptr; 2021 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 2022 GV = G->getGlobal(); 2023 bool isStub = 2024 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); 2025 2026 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 2027 bool isLocalARMFunc = false; 2028 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2029 auto PtrVt = getPointerTy(DAG.getDataLayout()); 2030 2031 if (Subtarget->genLongCalls()) { 2032 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && 2033 "long-calls codegen is not position independent!"); 2034 // Handle a global address or an external symbol. If it's not one of 2035 // those, the target's already in a register, so we don't need to do 2036 // anything extra. 2037 if (isa<GlobalAddressSDNode>(Callee)) { 2038 // Create a constant pool entry for the callee address 2039 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2040 ARMConstantPoolValue *CPV = 2041 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 2042 2043 // Get the address of the callee into a register 2044 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2045 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2046 Callee = DAG.getLoad( 2047 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2048 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2049 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 2050 const char *Sym = S->getSymbol(); 2051 2052 // Create a constant pool entry for the callee address 2053 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2054 ARMConstantPoolValue *CPV = 2055 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2056 ARMPCLabelIndex, 0); 2057 // Get the address of the callee into a register 2058 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2059 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2060 Callee = DAG.getLoad( 2061 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2062 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2063 } 2064 } else if (isa<GlobalAddressSDNode>(Callee)) { 2065 // If we're optimizing for minimum size and the function is called three or 2066 // more times in this block, we can improve codesize by calling indirectly 2067 // as BLXr has a 16-bit encoding. 2068 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 2069 auto *BB = CLI.CS.getParent(); 2070 bool PreferIndirect = 2071 Subtarget->isThumb() && MF.getFunction().optForMinSize() && 2072 count_if(GV->users(), [&BB](const User *U) { 2073 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB; 2074 }) > 2; 2075 2076 if (!PreferIndirect) { 2077 isDirect = true; 2078 bool isDef = GV->isStrongDefinitionForLinker(); 2079 2080 // ARM call to a local ARM function is predicable. 2081 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 2082 // tBX takes a register source operand. 2083 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2084 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 2085 Callee = DAG.getNode( 2086 ARMISD::WrapperPIC, dl, PtrVt, 2087 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 2088 Callee = DAG.getLoad( 2089 PtrVt, dl, DAG.getEntryNode(), Callee, 2090 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2091 /* Alignment = */ 0, MachineMemOperand::MODereferenceable | 2092 MachineMemOperand::MOInvariant); 2093 } else if (Subtarget->isTargetCOFF()) { 2094 assert(Subtarget->isTargetWindows() && 2095 "Windows is the only supported COFF target"); 2096 unsigned TargetFlags = GV->hasDLLImportStorageClass() 2097 ? ARMII::MO_DLLIMPORT 2098 : ARMII::MO_NO_FLAG; 2099 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, 2100 TargetFlags); 2101 if (GV->hasDLLImportStorageClass()) 2102 Callee = 2103 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 2104 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 2105 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2106 } else { 2107 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); 2108 } 2109 } 2110 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2111 isDirect = true; 2112 // tBX takes a register source operand. 2113 const char *Sym = S->getSymbol(); 2114 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2115 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2116 ARMConstantPoolValue *CPV = 2117 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2118 ARMPCLabelIndex, 4); 2119 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2120 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2121 Callee = DAG.getLoad( 2122 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2123 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2124 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2125 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 2126 } else { 2127 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); 2128 } 2129 } 2130 2131 // FIXME: handle tail calls differently. 2132 unsigned CallOpc; 2133 if (Subtarget->isThumb()) { 2134 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 2135 CallOpc = ARMISD::CALL_NOLINK; 2136 else 2137 CallOpc = ARMISD::CALL; 2138 } else { 2139 if (!isDirect && !Subtarget->hasV5TOps()) 2140 CallOpc = ARMISD::CALL_NOLINK; 2141 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && 2142 // Emit regular call when code size is the priority 2143 !MF.getFunction().optForMinSize()) 2144 // "mov lr, pc; b _foo" to avoid confusing the RSP 2145 CallOpc = ARMISD::CALL_NOLINK; 2146 else 2147 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 2148 } 2149 2150 std::vector<SDValue> Ops; 2151 Ops.push_back(Chain); 2152 Ops.push_back(Callee); 2153 2154 // Add argument registers to the end of the list so that they are known live 2155 // into the call. 2156 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2157 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2158 RegsToPass[i].second.getValueType())); 2159 2160 // Add a register mask operand representing the call-preserved registers. 2161 if (!isTailCall) { 2162 const uint32_t *Mask; 2163 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 2164 if (isThisReturn) { 2165 // For 'this' returns, use the R0-preserving mask if applicable 2166 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 2167 if (!Mask) { 2168 // Set isThisReturn to false if the calling convention is not one that 2169 // allows 'returned' to be modeled in this way, so LowerCallResult does 2170 // not try to pass 'this' straight through 2171 isThisReturn = false; 2172 Mask = ARI->getCallPreservedMask(MF, CallConv); 2173 } 2174 } else 2175 Mask = ARI->getCallPreservedMask(MF, CallConv); 2176 2177 assert(Mask && "Missing call preserved mask for calling convention"); 2178 Ops.push_back(DAG.getRegisterMask(Mask)); 2179 } 2180 2181 if (InFlag.getNode()) 2182 Ops.push_back(InFlag); 2183 2184 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2185 if (isTailCall) { 2186 MF.getFrameInfo().setHasTailCall(); 2187 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 2188 } 2189 2190 // Returns a chain and a flag for retval copy to use. 2191 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 2192 InFlag = Chain.getValue(1); 2193 2194 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 2195 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 2196 if (!Ins.empty()) 2197 InFlag = Chain.getValue(1); 2198 2199 // Handle result values, copying them out of physregs into vregs that we 2200 // return. 2201 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 2202 InVals, isThisReturn, 2203 isThisReturn ? OutVals[0] : SDValue()); 2204 } 2205 2206 /// HandleByVal - Every parameter *after* a byval parameter is passed 2207 /// on the stack. Remember the next parameter register to allocate, 2208 /// and then confiscate the rest of the parameter registers to insure 2209 /// this. 2210 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 2211 unsigned Align) const { 2212 // Byval (as with any stack) slots are always at least 4 byte aligned. 2213 Align = std::max(Align, 4U); 2214 2215 unsigned Reg = State->AllocateReg(GPRArgRegs); 2216 if (!Reg) 2217 return; 2218 2219 unsigned AlignInRegs = Align / 4; 2220 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2221 for (unsigned i = 0; i < Waste; ++i) 2222 Reg = State->AllocateReg(GPRArgRegs); 2223 2224 if (!Reg) 2225 return; 2226 2227 unsigned Excess = 4 * (ARM::R4 - Reg); 2228 2229 // Special case when NSAA != SP and parameter size greater than size of 2230 // all remained GPR regs. In that case we can't split parameter, we must 2231 // send it to stack. We also must set NCRN to R4, so waste all 2232 // remained registers. 2233 const unsigned NSAAOffset = State->getNextStackOffset(); 2234 if (NSAAOffset != 0 && Size > Excess) { 2235 while (State->AllocateReg(GPRArgRegs)) 2236 ; 2237 return; 2238 } 2239 2240 // First register for byval parameter is the first register that wasn't 2241 // allocated before this method call, so it would be "reg". 2242 // If parameter is small enough to be saved in range [reg, r4), then 2243 // the end (first after last) register would be reg + param-size-in-regs, 2244 // else parameter would be splitted between registers and stack, 2245 // end register would be r4 in this case. 2246 unsigned ByValRegBegin = Reg; 2247 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2248 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2249 // Note, first register is allocated in the beginning of function already, 2250 // allocate remained amount of registers we need. 2251 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2252 State->AllocateReg(GPRArgRegs); 2253 // A byval parameter that is split between registers and memory needs its 2254 // size truncated here. 2255 // In the case where the entire structure fits in registers, we set the 2256 // size in memory to zero. 2257 Size = std::max<int>(Size - Excess, 0); 2258 } 2259 2260 /// MatchingStackOffset - Return true if the given stack call argument is 2261 /// already available in the same position (relatively) of the caller's 2262 /// incoming argument stack. 2263 static 2264 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2265 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, 2266 const TargetInstrInfo *TII) { 2267 unsigned Bytes = Arg.getValueSizeInBits() / 8; 2268 int FI = std::numeric_limits<int>::max(); 2269 if (Arg.getOpcode() == ISD::CopyFromReg) { 2270 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2271 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2272 return false; 2273 MachineInstr *Def = MRI->getVRegDef(VR); 2274 if (!Def) 2275 return false; 2276 if (!Flags.isByVal()) { 2277 if (!TII->isLoadFromStackSlot(*Def, FI)) 2278 return false; 2279 } else { 2280 return false; 2281 } 2282 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2283 if (Flags.isByVal()) 2284 // ByVal argument is passed in as a pointer but it's now being 2285 // dereferenced. e.g. 2286 // define @foo(%struct.X* %A) { 2287 // tail call @bar(%struct.X* byval %A) 2288 // } 2289 return false; 2290 SDValue Ptr = Ld->getBasePtr(); 2291 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2292 if (!FINode) 2293 return false; 2294 FI = FINode->getIndex(); 2295 } else 2296 return false; 2297 2298 assert(FI != std::numeric_limits<int>::max()); 2299 if (!MFI.isFixedObjectIndex(FI)) 2300 return false; 2301 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); 2302 } 2303 2304 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2305 /// for tail call optimization. Targets which want to do tail call 2306 /// optimization should implement this function. 2307 bool 2308 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2309 CallingConv::ID CalleeCC, 2310 bool isVarArg, 2311 bool isCalleeStructRet, 2312 bool isCallerStructRet, 2313 const SmallVectorImpl<ISD::OutputArg> &Outs, 2314 const SmallVectorImpl<SDValue> &OutVals, 2315 const SmallVectorImpl<ISD::InputArg> &Ins, 2316 SelectionDAG& DAG) const { 2317 MachineFunction &MF = DAG.getMachineFunction(); 2318 const Function &CallerF = MF.getFunction(); 2319 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2320 2321 assert(Subtarget->supportsTailCall()); 2322 2323 // Tail calls to function pointers cannot be optimized for Thumb1 if the args 2324 // to the call take up r0-r3. The reason is that there are no legal registers 2325 // left to hold the pointer to the function to be called. 2326 if (Subtarget->isThumb1Only() && Outs.size() >= 4 && 2327 !isa<GlobalAddressSDNode>(Callee.getNode())) 2328 return false; 2329 2330 // Look for obvious safe cases to perform tail call optimization that do not 2331 // require ABI changes. This is what gcc calls sibcall. 2332 2333 // Exception-handling functions need a special set of instructions to indicate 2334 // a return to the hardware. Tail-calling another function would probably 2335 // break this. 2336 if (CallerF.hasFnAttribute("interrupt")) 2337 return false; 2338 2339 // Also avoid sibcall optimization if either caller or callee uses struct 2340 // return semantics. 2341 if (isCalleeStructRet || isCallerStructRet) 2342 return false; 2343 2344 // Externally-defined functions with weak linkage should not be 2345 // tail-called on ARM when the OS does not support dynamic 2346 // pre-emption of symbols, as the AAELF spec requires normal calls 2347 // to undefined weak functions to be replaced with a NOP or jump to the 2348 // next instruction. The behaviour of branch instructions in this 2349 // situation (as used for tail calls) is implementation-defined, so we 2350 // cannot rely on the linker replacing the tail call with a return. 2351 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2352 const GlobalValue *GV = G->getGlobal(); 2353 const Triple &TT = getTargetMachine().getTargetTriple(); 2354 if (GV->hasExternalWeakLinkage() && 2355 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2356 return false; 2357 } 2358 2359 // Check that the call results are passed in the same way. 2360 LLVMContext &C = *DAG.getContext(); 2361 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, 2362 CCAssignFnForReturn(CalleeCC, isVarArg), 2363 CCAssignFnForReturn(CallerCC, isVarArg))) 2364 return false; 2365 // The callee has to preserve all registers the caller needs to preserve. 2366 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2367 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2368 if (CalleeCC != CallerCC) { 2369 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2370 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2371 return false; 2372 } 2373 2374 // If Caller's vararg or byval argument has been split between registers and 2375 // stack, do not perform tail call, since part of the argument is in caller's 2376 // local frame. 2377 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2378 if (AFI_Caller->getArgRegsSaveSize()) 2379 return false; 2380 2381 // If the callee takes no arguments then go on to check the results of the 2382 // call. 2383 if (!Outs.empty()) { 2384 // Check if stack adjustment is needed. For now, do not do this if any 2385 // argument is passed on the stack. 2386 SmallVector<CCValAssign, 16> ArgLocs; 2387 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); 2388 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); 2389 if (CCInfo.getNextStackOffset()) { 2390 // Check if the arguments are already laid out in the right way as 2391 // the caller's fixed stack objects. 2392 MachineFrameInfo &MFI = MF.getFrameInfo(); 2393 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2394 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2395 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2396 i != e; 2397 ++i, ++realArgIdx) { 2398 CCValAssign &VA = ArgLocs[i]; 2399 EVT RegVT = VA.getLocVT(); 2400 SDValue Arg = OutVals[realArgIdx]; 2401 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2402 if (VA.getLocInfo() == CCValAssign::Indirect) 2403 return false; 2404 if (VA.needsCustom()) { 2405 // f64 and vector types are split into multiple registers or 2406 // register/stack-slot combinations. The types will not match 2407 // the registers; give up on memory f64 refs until we figure 2408 // out what to do about this. 2409 if (!VA.isRegLoc()) 2410 return false; 2411 if (!ArgLocs[++i].isRegLoc()) 2412 return false; 2413 if (RegVT == MVT::v2f64) { 2414 if (!ArgLocs[++i].isRegLoc()) 2415 return false; 2416 if (!ArgLocs[++i].isRegLoc()) 2417 return false; 2418 } 2419 } else if (!VA.isRegLoc()) { 2420 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2421 MFI, MRI, TII)) 2422 return false; 2423 } 2424 } 2425 } 2426 2427 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2428 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2429 return false; 2430 } 2431 2432 return true; 2433 } 2434 2435 bool 2436 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2437 MachineFunction &MF, bool isVarArg, 2438 const SmallVectorImpl<ISD::OutputArg> &Outs, 2439 LLVMContext &Context) const { 2440 SmallVector<CCValAssign, 16> RVLocs; 2441 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2442 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2443 } 2444 2445 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2446 const SDLoc &DL, SelectionDAG &DAG) { 2447 const MachineFunction &MF = DAG.getMachineFunction(); 2448 const Function &F = MF.getFunction(); 2449 2450 StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString(); 2451 2452 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2453 // version of the "preferred return address". These offsets affect the return 2454 // instruction if this is a return from PL1 without hypervisor extensions. 2455 // IRQ/FIQ: +4 "subs pc, lr, #4" 2456 // SWI: 0 "subs pc, lr, #0" 2457 // ABORT: +4 "subs pc, lr, #4" 2458 // UNDEF: +4/+2 "subs pc, lr, #0" 2459 // UNDEF varies depending on where the exception came from ARM or Thumb 2460 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2461 2462 int64_t LROffset; 2463 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2464 IntKind == "ABORT") 2465 LROffset = 4; 2466 else if (IntKind == "SWI" || IntKind == "UNDEF") 2467 LROffset = 0; 2468 else 2469 report_fatal_error("Unsupported interrupt attribute. If present, value " 2470 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2471 2472 RetOps.insert(RetOps.begin() + 1, 2473 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2474 2475 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2476 } 2477 2478 SDValue 2479 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2480 bool isVarArg, 2481 const SmallVectorImpl<ISD::OutputArg> &Outs, 2482 const SmallVectorImpl<SDValue> &OutVals, 2483 const SDLoc &dl, SelectionDAG &DAG) const { 2484 // CCValAssign - represent the assignment of the return value to a location. 2485 SmallVector<CCValAssign, 16> RVLocs; 2486 2487 // CCState - Info about the registers and stack slots. 2488 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2489 *DAG.getContext()); 2490 2491 // Analyze outgoing return values. 2492 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); 2493 2494 SDValue Flag; 2495 SmallVector<SDValue, 4> RetOps; 2496 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2497 bool isLittleEndian = Subtarget->isLittle(); 2498 2499 MachineFunction &MF = DAG.getMachineFunction(); 2500 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2501 AFI->setReturnRegsCount(RVLocs.size()); 2502 2503 // Copy the result values into the output registers. 2504 for (unsigned i = 0, realRVLocIdx = 0; 2505 i != RVLocs.size(); 2506 ++i, ++realRVLocIdx) { 2507 CCValAssign &VA = RVLocs[i]; 2508 assert(VA.isRegLoc() && "Can only return in registers!"); 2509 2510 SDValue Arg = OutVals[realRVLocIdx]; 2511 bool ReturnF16 = false; 2512 2513 if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) { 2514 // Half-precision return values can be returned like this: 2515 // 2516 // t11 f16 = fadd ... 2517 // t12: i16 = bitcast t11 2518 // t13: i32 = zero_extend t12 2519 // t14: f32 = bitcast t13 <~~~~~~~ Arg 2520 // 2521 // to avoid code generation for bitcasts, we simply set Arg to the node 2522 // that produces the f16 value, t11 in this case. 2523 // 2524 if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) { 2525 SDValue ZE = Arg.getOperand(0); 2526 if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) { 2527 SDValue BC = ZE.getOperand(0); 2528 if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) { 2529 Arg = BC.getOperand(0); 2530 ReturnF16 = true; 2531 } 2532 } 2533 } 2534 } 2535 2536 switch (VA.getLocInfo()) { 2537 default: llvm_unreachable("Unknown loc info!"); 2538 case CCValAssign::Full: break; 2539 case CCValAssign::BCvt: 2540 if (!ReturnF16) 2541 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2542 break; 2543 } 2544 2545 if (VA.needsCustom()) { 2546 if (VA.getLocVT() == MVT::v2f64) { 2547 // Extract the first half and return it in two registers. 2548 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2549 DAG.getConstant(0, dl, MVT::i32)); 2550 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2551 DAG.getVTList(MVT::i32, MVT::i32), Half); 2552 2553 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2554 HalfGPRs.getValue(isLittleEndian ? 0 : 1), 2555 Flag); 2556 Flag = Chain.getValue(1); 2557 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2558 VA = RVLocs[++i]; // skip ahead to next loc 2559 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2560 HalfGPRs.getValue(isLittleEndian ? 1 : 0), 2561 Flag); 2562 Flag = Chain.getValue(1); 2563 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2564 VA = RVLocs[++i]; // skip ahead to next loc 2565 2566 // Extract the 2nd half and fall through to handle it as an f64 value. 2567 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2568 DAG.getConstant(1, dl, MVT::i32)); 2569 } 2570 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2571 // available. 2572 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2573 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2574 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2575 fmrrd.getValue(isLittleEndian ? 0 : 1), 2576 Flag); 2577 Flag = Chain.getValue(1); 2578 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2579 VA = RVLocs[++i]; // skip ahead to next loc 2580 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2581 fmrrd.getValue(isLittleEndian ? 1 : 0), 2582 Flag); 2583 } else 2584 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2585 2586 // Guarantee that all emitted copies are 2587 // stuck together, avoiding something bad. 2588 Flag = Chain.getValue(1); 2589 RetOps.push_back(DAG.getRegister(VA.getLocReg(), 2590 ReturnF16 ? MVT::f16 : VA.getLocVT())); 2591 } 2592 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2593 const MCPhysReg *I = 2594 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2595 if (I) { 2596 for (; *I; ++I) { 2597 if (ARM::GPRRegClass.contains(*I)) 2598 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2599 else if (ARM::DPRRegClass.contains(*I)) 2600 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 2601 else 2602 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2603 } 2604 } 2605 2606 // Update chain and glue. 2607 RetOps[0] = Chain; 2608 if (Flag.getNode()) 2609 RetOps.push_back(Flag); 2610 2611 // CPUs which aren't M-class use a special sequence to return from 2612 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2613 // though we use "subs pc, lr, #N"). 2614 // 2615 // M-class CPUs actually use a normal return sequence with a special 2616 // (hardware-provided) value in LR, so the normal code path works. 2617 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") && 2618 !Subtarget->isMClass()) { 2619 if (Subtarget->isThumb1Only()) 2620 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2621 return LowerInterruptReturn(RetOps, dl, DAG); 2622 } 2623 2624 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); 2625 } 2626 2627 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2628 if (N->getNumValues() != 1) 2629 return false; 2630 if (!N->hasNUsesOfValue(1, 0)) 2631 return false; 2632 2633 SDValue TCChain = Chain; 2634 SDNode *Copy = *N->use_begin(); 2635 if (Copy->getOpcode() == ISD::CopyToReg) { 2636 // If the copy has a glue operand, we conservatively assume it isn't safe to 2637 // perform a tail call. 2638 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2639 return false; 2640 TCChain = Copy->getOperand(0); 2641 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2642 SDNode *VMov = Copy; 2643 // f64 returned in a pair of GPRs. 2644 SmallPtrSet<SDNode*, 2> Copies; 2645 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2646 UI != UE; ++UI) { 2647 if (UI->getOpcode() != ISD::CopyToReg) 2648 return false; 2649 Copies.insert(*UI); 2650 } 2651 if (Copies.size() > 2) 2652 return false; 2653 2654 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2655 UI != UE; ++UI) { 2656 SDValue UseChain = UI->getOperand(0); 2657 if (Copies.count(UseChain.getNode())) 2658 // Second CopyToReg 2659 Copy = *UI; 2660 else { 2661 // We are at the top of this chain. 2662 // If the copy has a glue operand, we conservatively assume it 2663 // isn't safe to perform a tail call. 2664 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 2665 return false; 2666 // First CopyToReg 2667 TCChain = UseChain; 2668 } 2669 } 2670 } else if (Copy->getOpcode() == ISD::BITCAST) { 2671 // f32 returned in a single GPR. 2672 if (!Copy->hasOneUse()) 2673 return false; 2674 Copy = *Copy->use_begin(); 2675 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2676 return false; 2677 // If the copy has a glue operand, we conservatively assume it isn't safe to 2678 // perform a tail call. 2679 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2680 return false; 2681 TCChain = Copy->getOperand(0); 2682 } else { 2683 return false; 2684 } 2685 2686 bool HasRet = false; 2687 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2688 UI != UE; ++UI) { 2689 if (UI->getOpcode() != ARMISD::RET_FLAG && 2690 UI->getOpcode() != ARMISD::INTRET_FLAG) 2691 return false; 2692 HasRet = true; 2693 } 2694 2695 if (!HasRet) 2696 return false; 2697 2698 Chain = TCChain; 2699 return true; 2700 } 2701 2702 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2703 if (!Subtarget->supportsTailCall()) 2704 return false; 2705 2706 auto Attr = 2707 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); 2708 if (!CI->isTailCall() || Attr.getValueAsString() == "true") 2709 return false; 2710 2711 return true; 2712 } 2713 2714 // Trying to write a 64 bit value so need to split into two 32 bit values first, 2715 // and pass the lower and high parts through. 2716 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 2717 SDLoc DL(Op); 2718 SDValue WriteValue = Op->getOperand(2); 2719 2720 // This function is only supposed to be called for i64 type argument. 2721 assert(WriteValue.getValueType() == MVT::i64 2722 && "LowerWRITE_REGISTER called for non-i64 type argument."); 2723 2724 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2725 DAG.getConstant(0, DL, MVT::i32)); 2726 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2727 DAG.getConstant(1, DL, MVT::i32)); 2728 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 2729 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 2730 } 2731 2732 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2733 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2734 // one of the above mentioned nodes. It has to be wrapped because otherwise 2735 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2736 // be used to form addressing mode. These wrapped nodes will be selected 2737 // into MOVi. 2738 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op, 2739 SelectionDAG &DAG) const { 2740 EVT PtrVT = Op.getValueType(); 2741 // FIXME there is no actual debug info here 2742 SDLoc dl(Op); 2743 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2744 SDValue Res; 2745 2746 // When generating execute-only code Constant Pools must be promoted to the 2747 // global data section. It's a bit ugly that we can't share them across basic 2748 // blocks, but this way we guarantee that execute-only behaves correct with 2749 // position-independent addressing modes. 2750 if (Subtarget->genExecuteOnly()) { 2751 auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2752 auto T = const_cast<Type*>(CP->getType()); 2753 auto C = const_cast<Constant*>(CP->getConstVal()); 2754 auto M = const_cast<Module*>(DAG.getMachineFunction(). 2755 getFunction().getParent()); 2756 auto GV = new GlobalVariable( 2757 *M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C, 2758 Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + 2759 Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + 2760 Twine(AFI->createPICLabelUId()) 2761 ); 2762 SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV), 2763 dl, PtrVT); 2764 return LowerGlobalAddress(GA, DAG); 2765 } 2766 2767 if (CP->isMachineConstantPoolEntry()) 2768 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2769 CP->getAlignment()); 2770 else 2771 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2772 CP->getAlignment()); 2773 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2774 } 2775 2776 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2777 return MachineJumpTableInfo::EK_Inline; 2778 } 2779 2780 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2781 SelectionDAG &DAG) const { 2782 MachineFunction &MF = DAG.getMachineFunction(); 2783 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2784 unsigned ARMPCLabelIndex = 0; 2785 SDLoc DL(Op); 2786 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2787 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2788 SDValue CPAddr; 2789 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); 2790 if (!IsPositionIndependent) { 2791 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2792 } else { 2793 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2794 ARMPCLabelIndex = AFI->createPICLabelUId(); 2795 ARMConstantPoolValue *CPV = 2796 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2797 ARMCP::CPBlockAddress, PCAdj); 2798 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2799 } 2800 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2801 SDValue Result = DAG.getLoad( 2802 PtrVT, DL, DAG.getEntryNode(), CPAddr, 2803 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2804 if (!IsPositionIndependent) 2805 return Result; 2806 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 2807 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2808 } 2809 2810 /// Convert a TLS address reference into the correct sequence of loads 2811 /// and calls to compute the variable's address for Darwin, and return an 2812 /// SDValue containing the final node. 2813 2814 /// Darwin only has one TLS scheme which must be capable of dealing with the 2815 /// fully general situation, in the worst case. This means: 2816 /// + "extern __thread" declaration. 2817 /// + Defined in a possibly unknown dynamic library. 2818 /// 2819 /// The general system is that each __thread variable has a [3 x i32] descriptor 2820 /// which contains information used by the runtime to calculate the address. The 2821 /// only part of this the compiler needs to know about is the first word, which 2822 /// contains a function pointer that must be called with the address of the 2823 /// entire descriptor in "r0". 2824 /// 2825 /// Since this descriptor may be in a different unit, in general access must 2826 /// proceed along the usual ARM rules. A common sequence to produce is: 2827 /// 2828 /// movw rT1, :lower16:_var$non_lazy_ptr 2829 /// movt rT1, :upper16:_var$non_lazy_ptr 2830 /// ldr r0, [rT1] 2831 /// ldr rT2, [r0] 2832 /// blx rT2 2833 /// [...address now in r0...] 2834 SDValue 2835 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 2836 SelectionDAG &DAG) const { 2837 assert(Subtarget->isTargetDarwin() && 2838 "This function expects a Darwin target"); 2839 SDLoc DL(Op); 2840 2841 // First step is to get the address of the actua global symbol. This is where 2842 // the TLS descriptor lives. 2843 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 2844 2845 // The first entry in the descriptor is a function pointer that we must call 2846 // to obtain the address of the variable. 2847 SDValue Chain = DAG.getEntryNode(); 2848 SDValue FuncTLVGet = DAG.getLoad( 2849 MVT::i32, DL, Chain, DescAddr, 2850 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2851 /* Alignment = */ 4, 2852 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | 2853 MachineMemOperand::MOInvariant); 2854 Chain = FuncTLVGet.getValue(1); 2855 2856 MachineFunction &F = DAG.getMachineFunction(); 2857 MachineFrameInfo &MFI = F.getFrameInfo(); 2858 MFI.setAdjustsStack(true); 2859 2860 // TLS calls preserve all registers except those that absolutely must be 2861 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 2862 // silly). 2863 auto TRI = 2864 getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo(); 2865 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 2866 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 2867 2868 // Finally, we can make the call. This is just a degenerate version of a 2869 // normal AArch64 call node: r0 takes the address of the descriptor, and 2870 // returns the address of the variable in this thread. 2871 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 2872 Chain = 2873 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 2874 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 2875 DAG.getRegisterMask(Mask), Chain.getValue(1)); 2876 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 2877 } 2878 2879 SDValue 2880 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 2881 SelectionDAG &DAG) const { 2882 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 2883 2884 SDValue Chain = DAG.getEntryNode(); 2885 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2886 SDLoc DL(Op); 2887 2888 // Load the current TEB (thread environment block) 2889 SDValue Ops[] = {Chain, 2890 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 2891 DAG.getConstant(15, DL, MVT::i32), 2892 DAG.getConstant(0, DL, MVT::i32), 2893 DAG.getConstant(13, DL, MVT::i32), 2894 DAG.getConstant(0, DL, MVT::i32), 2895 DAG.getConstant(2, DL, MVT::i32)}; 2896 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 2897 DAG.getVTList(MVT::i32, MVT::Other), Ops); 2898 2899 SDValue TEB = CurrentTEB.getValue(0); 2900 Chain = CurrentTEB.getValue(1); 2901 2902 // Load the ThreadLocalStoragePointer from the TEB 2903 // A pointer to the TLS array is located at offset 0x2c from the TEB. 2904 SDValue TLSArray = 2905 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 2906 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); 2907 2908 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 2909 // offset into the TLSArray. 2910 2911 // Load the TLS index from the C runtime 2912 SDValue TLSIndex = 2913 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 2914 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 2915 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); 2916 2917 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 2918 DAG.getConstant(2, DL, MVT::i32)); 2919 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 2920 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 2921 MachinePointerInfo()); 2922 2923 // Get the offset of the start of the .tls section (section base) 2924 const auto *GA = cast<GlobalAddressSDNode>(Op); 2925 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); 2926 SDValue Offset = DAG.getLoad( 2927 PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, 2928 DAG.getTargetConstantPool(CPV, PtrVT, 4)), 2929 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2930 2931 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); 2932 } 2933 2934 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2935 SDValue 2936 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2937 SelectionDAG &DAG) const { 2938 SDLoc dl(GA); 2939 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2940 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2941 MachineFunction &MF = DAG.getMachineFunction(); 2942 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2943 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2944 ARMConstantPoolValue *CPV = 2945 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2946 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2947 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2948 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2949 Argument = DAG.getLoad( 2950 PtrVT, dl, DAG.getEntryNode(), Argument, 2951 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2952 SDValue Chain = Argument.getValue(1); 2953 2954 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2955 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2956 2957 // call __tls_get_addr. 2958 ArgListTy Args; 2959 ArgListEntry Entry; 2960 Entry.Node = Argument; 2961 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2962 Args.push_back(Entry); 2963 2964 // FIXME: is there useful debug info available here? 2965 TargetLowering::CallLoweringInfo CLI(DAG); 2966 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 2967 CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 2968 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); 2969 2970 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2971 return CallResult.first; 2972 } 2973 2974 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2975 // "local exec" model. 2976 SDValue 2977 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2978 SelectionDAG &DAG, 2979 TLSModel::Model model) const { 2980 const GlobalValue *GV = GA->getGlobal(); 2981 SDLoc dl(GA); 2982 SDValue Offset; 2983 SDValue Chain = DAG.getEntryNode(); 2984 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2985 // Get the Thread Pointer 2986 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2987 2988 if (model == TLSModel::InitialExec) { 2989 MachineFunction &MF = DAG.getMachineFunction(); 2990 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2991 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2992 // Initial exec model. 2993 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2994 ARMConstantPoolValue *CPV = 2995 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2996 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2997 true); 2998 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2999 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3000 Offset = DAG.getLoad( 3001 PtrVT, dl, Chain, Offset, 3002 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3003 Chain = Offset.getValue(1); 3004 3005 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3006 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 3007 3008 Offset = DAG.getLoad( 3009 PtrVT, dl, Chain, Offset, 3010 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3011 } else { 3012 // local exec model 3013 assert(model == TLSModel::LocalExec); 3014 ARMConstantPoolValue *CPV = 3015 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 3016 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3017 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 3018 Offset = DAG.getLoad( 3019 PtrVT, dl, Chain, Offset, 3020 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3021 } 3022 3023 // The address of the thread local variable is the add of the thread 3024 // pointer with the offset of the variable. 3025 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 3026 } 3027 3028 SDValue 3029 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 3030 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 3031 if (DAG.getTarget().useEmulatedTLS()) 3032 return LowerToTLSEmulatedModel(GA, DAG); 3033 3034 if (Subtarget->isTargetDarwin()) 3035 return LowerGlobalTLSAddressDarwin(Op, DAG); 3036 3037 if (Subtarget->isTargetWindows()) 3038 return LowerGlobalTLSAddressWindows(Op, DAG); 3039 3040 // TODO: implement the "local dynamic" model 3041 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 3042 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 3043 3044 switch (model) { 3045 case TLSModel::GeneralDynamic: 3046 case TLSModel::LocalDynamic: 3047 return LowerToTLSGeneralDynamicModel(GA, DAG); 3048 case TLSModel::InitialExec: 3049 case TLSModel::LocalExec: 3050 return LowerToTLSExecModels(GA, DAG, model); 3051 } 3052 llvm_unreachable("bogus TLS model"); 3053 } 3054 3055 /// Return true if all users of V are within function F, looking through 3056 /// ConstantExprs. 3057 static bool allUsersAreInFunction(const Value *V, const Function *F) { 3058 SmallVector<const User*,4> Worklist; 3059 for (auto *U : V->users()) 3060 Worklist.push_back(U); 3061 while (!Worklist.empty()) { 3062 auto *U = Worklist.pop_back_val(); 3063 if (isa<ConstantExpr>(U)) { 3064 for (auto *UU : U->users()) 3065 Worklist.push_back(UU); 3066 continue; 3067 } 3068 3069 auto *I = dyn_cast<Instruction>(U); 3070 if (!I || I->getParent()->getParent() != F) 3071 return false; 3072 } 3073 return true; 3074 } 3075 3076 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, 3077 const GlobalValue *GV, SelectionDAG &DAG, 3078 EVT PtrVT, const SDLoc &dl) { 3079 // If we're creating a pool entry for a constant global with unnamed address, 3080 // and the global is small enough, we can emit it inline into the constant pool 3081 // to save ourselves an indirection. 3082 // 3083 // This is a win if the constant is only used in one function (so it doesn't 3084 // need to be duplicated) or duplicating the constant wouldn't increase code 3085 // size (implying the constant is no larger than 4 bytes). 3086 const Function &F = DAG.getMachineFunction().getFunction(); 3087 3088 // We rely on this decision to inline being idemopotent and unrelated to the 3089 // use-site. We know that if we inline a variable at one use site, we'll 3090 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel 3091 // doesn't know about this optimization, so bail out if it's enabled else 3092 // we could decide to inline here (and thus never emit the GV) but require 3093 // the GV from fast-isel generated code. 3094 if (!EnableConstpoolPromotion || 3095 DAG.getMachineFunction().getTarget().Options.EnableFastISel) 3096 return SDValue(); 3097 3098 auto *GVar = dyn_cast<GlobalVariable>(GV); 3099 if (!GVar || !GVar->hasInitializer() || 3100 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || 3101 !GVar->hasLocalLinkage()) 3102 return SDValue(); 3103 3104 // If we inline a value that contains relocations, we move the relocations 3105 // from .data to .text. This is not allowed in position-independent code. 3106 auto *Init = GVar->getInitializer(); 3107 if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) && 3108 Init->needsRelocation()) 3109 return SDValue(); 3110 3111 // The constant islands pass can only really deal with alignment requests 3112 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote 3113 // any type wanting greater alignment requirements than 4 bytes. We also 3114 // can only promote constants that are multiples of 4 bytes in size or 3115 // are paddable to a multiple of 4. Currently we only try and pad constants 3116 // that are strings for simplicity. 3117 auto *CDAInit = dyn_cast<ConstantDataArray>(Init); 3118 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); 3119 unsigned Align = DAG.getDataLayout().getPreferredAlignment(GVar); 3120 unsigned RequiredPadding = 4 - (Size % 4); 3121 bool PaddingPossible = 3122 RequiredPadding == 4 || (CDAInit && CDAInit->isString()); 3123 if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize || 3124 Size == 0) 3125 return SDValue(); 3126 3127 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); 3128 MachineFunction &MF = DAG.getMachineFunction(); 3129 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3130 3131 // We can't bloat the constant pool too much, else the ConstantIslands pass 3132 // may fail to converge. If we haven't promoted this global yet (it may have 3133 // multiple uses), and promoting it would increase the constant pool size (Sz 3134 // > 4), ensure we have space to do so up to MaxTotal. 3135 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) 3136 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= 3137 ConstpoolPromotionMaxTotal) 3138 return SDValue(); 3139 3140 // This is only valid if all users are in a single function; we can't clone 3141 // the constant in general. The LLVM IR unnamed_addr allows merging 3142 // constants, but not cloning them. 3143 // 3144 // We could potentially allow cloning if we could prove all uses of the 3145 // constant in the current function don't care about the address, like 3146 // printf format strings. But that isn't implemented for now. 3147 if (!allUsersAreInFunction(GVar, &F)) 3148 return SDValue(); 3149 3150 // We're going to inline this global. Pad it out if needed. 3151 if (RequiredPadding != 4) { 3152 StringRef S = CDAInit->getAsString(); 3153 3154 SmallVector<uint8_t,16> V(S.size()); 3155 std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); 3156 while (RequiredPadding--) 3157 V.push_back(0); 3158 Init = ConstantDataArray::get(*DAG.getContext(), V); 3159 } 3160 3161 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); 3162 SDValue CPAddr = 3163 DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4); 3164 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { 3165 AFI->markGlobalAsPromotedToConstantPool(GVar); 3166 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + 3167 PaddedSize - 4); 3168 } 3169 ++NumConstpoolPromoted; 3170 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3171 } 3172 3173 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { 3174 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 3175 if (!(GV = GA->getBaseObject())) 3176 return false; 3177 if (const auto *V = dyn_cast<GlobalVariable>(GV)) 3178 return V->isConstant(); 3179 return isa<Function>(GV); 3180 } 3181 3182 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op, 3183 SelectionDAG &DAG) const { 3184 switch (Subtarget->getTargetTriple().getObjectFormat()) { 3185 default: llvm_unreachable("unknown object format"); 3186 case Triple::COFF: 3187 return LowerGlobalAddressWindows(Op, DAG); 3188 case Triple::ELF: 3189 return LowerGlobalAddressELF(Op, DAG); 3190 case Triple::MachO: 3191 return LowerGlobalAddressDarwin(Op, DAG); 3192 } 3193 } 3194 3195 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 3196 SelectionDAG &DAG) const { 3197 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3198 SDLoc dl(Op); 3199 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3200 const TargetMachine &TM = getTargetMachine(); 3201 bool IsRO = isReadOnly(GV); 3202 3203 // promoteToConstantPool only if not generating XO text section 3204 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly()) 3205 if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl)) 3206 return V; 3207 3208 if (isPositionIndependent()) { 3209 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 3210 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 3211 UseGOT_PREL ? ARMII::MO_GOT : 0); 3212 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3213 if (UseGOT_PREL) 3214 Result = 3215 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3216 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3217 return Result; 3218 } else if (Subtarget->isROPI() && IsRO) { 3219 // PC-relative. 3220 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); 3221 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3222 return Result; 3223 } else if (Subtarget->isRWPI() && !IsRO) { 3224 // SB-relative. 3225 SDValue RelAddr; 3226 if (Subtarget->useMovt(DAG.getMachineFunction())) { 3227 ++NumMovwMovt; 3228 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL); 3229 RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G); 3230 } else { // use literal pool for address constant 3231 ARMConstantPoolValue *CPV = 3232 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); 3233 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3234 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3235 RelAddr = DAG.getLoad( 3236 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3237 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3238 } 3239 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); 3240 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr); 3241 return Result; 3242 } 3243 3244 // If we have T2 ops, we can materialize the address directly via movt/movw 3245 // pair. This is always cheaper. 3246 if (Subtarget->useMovt(DAG.getMachineFunction())) { 3247 ++NumMovwMovt; 3248 // FIXME: Once remat is capable of dealing with instructions with register 3249 // operands, expand this into two nodes. 3250 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 3251 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 3252 } else { 3253 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 3254 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3255 return DAG.getLoad( 3256 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3257 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3258 } 3259 } 3260 3261 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 3262 SelectionDAG &DAG) const { 3263 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3264 "ROPI/RWPI not currently supported for Darwin"); 3265 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3266 SDLoc dl(Op); 3267 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3268 3269 if (Subtarget->useMovt(DAG.getMachineFunction())) 3270 ++NumMovwMovt; 3271 3272 // FIXME: Once remat is capable of dealing with instructions with register 3273 // operands, expand this into multiple nodes 3274 unsigned Wrapper = 3275 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; 3276 3277 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 3278 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 3279 3280 if (Subtarget->isGVIndirectSymbol(GV)) 3281 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3282 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3283 return Result; 3284 } 3285 3286 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 3287 SelectionDAG &DAG) const { 3288 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 3289 assert(Subtarget->useMovt(DAG.getMachineFunction()) && 3290 "Windows on ARM expects to use movw/movt"); 3291 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3292 "ROPI/RWPI not currently supported for Windows"); 3293 3294 const TargetMachine &TM = getTargetMachine(); 3295 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3296 ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG; 3297 if (GV->hasDLLImportStorageClass()) 3298 TargetFlags = ARMII::MO_DLLIMPORT; 3299 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 3300 TargetFlags = ARMII::MO_COFFSTUB; 3301 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3302 SDValue Result; 3303 SDLoc DL(Op); 3304 3305 ++NumMovwMovt; 3306 3307 // FIXME: Once remat is capable of dealing with instructions with register 3308 // operands, expand this into two nodes. 3309 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 3310 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, 3311 TargetFlags)); 3312 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB)) 3313 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3314 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3315 return Result; 3316 } 3317 3318 SDValue 3319 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 3320 SDLoc dl(Op); 3321 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 3322 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 3323 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 3324 Op.getOperand(1), Val); 3325 } 3326 3327 SDValue 3328 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 3329 SDLoc dl(Op); 3330 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 3331 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 3332 } 3333 3334 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 3335 SelectionDAG &DAG) const { 3336 SDLoc dl(Op); 3337 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 3338 Op.getOperand(0)); 3339 } 3340 3341 SDValue 3342 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 3343 const ARMSubtarget *Subtarget) const { 3344 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3345 SDLoc dl(Op); 3346 switch (IntNo) { 3347 default: return SDValue(); // Don't custom lower most intrinsics. 3348 case Intrinsic::thread_pointer: { 3349 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3350 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3351 } 3352 case Intrinsic::eh_sjlj_lsda: { 3353 MachineFunction &MF = DAG.getMachineFunction(); 3354 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3355 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3356 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3357 SDValue CPAddr; 3358 bool IsPositionIndependent = isPositionIndependent(); 3359 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 3360 ARMConstantPoolValue *CPV = 3361 ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex, 3362 ARMCP::CPLSDA, PCAdj); 3363 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3364 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3365 SDValue Result = DAG.getLoad( 3366 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3367 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3368 3369 if (IsPositionIndependent) { 3370 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3371 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 3372 } 3373 return Result; 3374 } 3375 case Intrinsic::arm_neon_vabs: 3376 return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(), 3377 Op.getOperand(1)); 3378 case Intrinsic::arm_neon_vmulls: 3379 case Intrinsic::arm_neon_vmullu: { 3380 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 3381 ? ARMISD::VMULLs : ARMISD::VMULLu; 3382 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3383 Op.getOperand(1), Op.getOperand(2)); 3384 } 3385 case Intrinsic::arm_neon_vminnm: 3386 case Intrinsic::arm_neon_vmaxnm: { 3387 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 3388 ? ISD::FMINNUM : ISD::FMAXNUM; 3389 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3390 Op.getOperand(1), Op.getOperand(2)); 3391 } 3392 case Intrinsic::arm_neon_vminu: 3393 case Intrinsic::arm_neon_vmaxu: { 3394 if (Op.getValueType().isFloatingPoint()) 3395 return SDValue(); 3396 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 3397 ? ISD::UMIN : ISD::UMAX; 3398 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3399 Op.getOperand(1), Op.getOperand(2)); 3400 } 3401 case Intrinsic::arm_neon_vmins: 3402 case Intrinsic::arm_neon_vmaxs: { 3403 // v{min,max}s is overloaded between signed integers and floats. 3404 if (!Op.getValueType().isFloatingPoint()) { 3405 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3406 ? ISD::SMIN : ISD::SMAX; 3407 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3408 Op.getOperand(1), Op.getOperand(2)); 3409 } 3410 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3411 ? ISD::FMINIMUM : ISD::FMAXIMUM; 3412 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3413 Op.getOperand(1), Op.getOperand(2)); 3414 } 3415 case Intrinsic::arm_neon_vtbl1: 3416 return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(), 3417 Op.getOperand(1), Op.getOperand(2)); 3418 case Intrinsic::arm_neon_vtbl2: 3419 return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(), 3420 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 3421 } 3422 } 3423 3424 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 3425 const ARMSubtarget *Subtarget) { 3426 SDLoc dl(Op); 3427 ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2)); 3428 auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue()); 3429 if (SSID == SyncScope::SingleThread) 3430 return Op; 3431 3432 if (!Subtarget->hasDataBarrier()) { 3433 // Some ARMv6 cpus can support data barriers with an mcr instruction. 3434 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 3435 // here. 3436 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 3437 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 3438 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 3439 DAG.getConstant(0, dl, MVT::i32)); 3440 } 3441 3442 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 3443 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 3444 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 3445 if (Subtarget->isMClass()) { 3446 // Only a full system barrier exists in the M-class architectures. 3447 Domain = ARM_MB::SY; 3448 } else if (Subtarget->preferISHSTBarriers() && 3449 Ord == AtomicOrdering::Release) { 3450 // Swift happens to implement ISHST barriers in a way that's compatible with 3451 // Release semantics but weaker than ISH so we'd be fools not to use 3452 // it. Beware: other processors probably don't! 3453 Domain = ARM_MB::ISHST; 3454 } 3455 3456 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 3457 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 3458 DAG.getConstant(Domain, dl, MVT::i32)); 3459 } 3460 3461 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 3462 const ARMSubtarget *Subtarget) { 3463 // ARM pre v5TE and Thumb1 does not have preload instructions. 3464 if (!(Subtarget->isThumb2() || 3465 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 3466 // Just preserve the chain. 3467 return Op.getOperand(0); 3468 3469 SDLoc dl(Op); 3470 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 3471 if (!isRead && 3472 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 3473 // ARMv7 with MP extension has PLDW. 3474 return Op.getOperand(0); 3475 3476 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3477 if (Subtarget->isThumb()) { 3478 // Invert the bits. 3479 isRead = ~isRead & 1; 3480 isData = ~isData & 1; 3481 } 3482 3483 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 3484 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 3485 DAG.getConstant(isData, dl, MVT::i32)); 3486 } 3487 3488 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 3489 MachineFunction &MF = DAG.getMachineFunction(); 3490 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 3491 3492 // vastart just stores the address of the VarArgsFrameIndex slot into the 3493 // memory location argument. 3494 SDLoc dl(Op); 3495 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 3496 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3497 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3498 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3499 MachinePointerInfo(SV)); 3500 } 3501 3502 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, 3503 CCValAssign &NextVA, 3504 SDValue &Root, 3505 SelectionDAG &DAG, 3506 const SDLoc &dl) const { 3507 MachineFunction &MF = DAG.getMachineFunction(); 3508 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3509 3510 const TargetRegisterClass *RC; 3511 if (AFI->isThumb1OnlyFunction()) 3512 RC = &ARM::tGPRRegClass; 3513 else 3514 RC = &ARM::GPRRegClass; 3515 3516 // Transform the arguments stored in physical registers into virtual ones. 3517 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3518 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3519 3520 SDValue ArgValue2; 3521 if (NextVA.isMemLoc()) { 3522 MachineFrameInfo &MFI = MF.getFrameInfo(); 3523 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); 3524 3525 // Create load node to retrieve arguments from the stack. 3526 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3527 ArgValue2 = DAG.getLoad( 3528 MVT::i32, dl, Root, FIN, 3529 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 3530 } else { 3531 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 3532 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3533 } 3534 if (!Subtarget->isLittle()) 3535 std::swap (ArgValue, ArgValue2); 3536 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 3537 } 3538 3539 // The remaining GPRs hold either the beginning of variable-argument 3540 // data, or the beginning of an aggregate passed by value (usually 3541 // byval). Either way, we allocate stack slots adjacent to the data 3542 // provided by our caller, and store the unallocated registers there. 3543 // If this is a variadic function, the va_list pointer will begin with 3544 // these values; otherwise, this reassembles a (byval) structure that 3545 // was split between registers and memory. 3546 // Return: The frame index registers were stored into. 3547 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 3548 const SDLoc &dl, SDValue &Chain, 3549 const Value *OrigArg, 3550 unsigned InRegsParamRecordIdx, 3551 int ArgOffset, unsigned ArgSize) const { 3552 // Currently, two use-cases possible: 3553 // Case #1. Non-var-args function, and we meet first byval parameter. 3554 // Setup first unallocated register as first byval register; 3555 // eat all remained registers 3556 // (these two actions are performed by HandleByVal method). 3557 // Then, here, we initialize stack frame with 3558 // "store-reg" instructions. 3559 // Case #2. Var-args function, that doesn't contain byval parameters. 3560 // The same: eat all remained unallocated registers, 3561 // initialize stack frame. 3562 3563 MachineFunction &MF = DAG.getMachineFunction(); 3564 MachineFrameInfo &MFI = MF.getFrameInfo(); 3565 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3566 unsigned RBegin, REnd; 3567 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 3568 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 3569 } else { 3570 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3571 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 3572 REnd = ARM::R4; 3573 } 3574 3575 if (REnd != RBegin) 3576 ArgOffset = -4 * (ARM::R4 - RBegin); 3577 3578 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3579 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); 3580 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 3581 3582 SmallVector<SDValue, 4> MemOps; 3583 const TargetRegisterClass *RC = 3584 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 3585 3586 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 3587 unsigned VReg = MF.addLiveIn(Reg, RC); 3588 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3589 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3590 MachinePointerInfo(OrigArg, 4 * i)); 3591 MemOps.push_back(Store); 3592 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 3593 } 3594 3595 if (!MemOps.empty()) 3596 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3597 return FrameIndex; 3598 } 3599 3600 // Setup stack frame, the va_list pointer will start from. 3601 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 3602 const SDLoc &dl, SDValue &Chain, 3603 unsigned ArgOffset, 3604 unsigned TotalArgRegsSaveSize, 3605 bool ForceMutable) const { 3606 MachineFunction &MF = DAG.getMachineFunction(); 3607 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3608 3609 // Try to store any remaining integer argument regs 3610 // to their spots on the stack so that they may be loaded by dereferencing 3611 // the result of va_next. 3612 // If there is no regs to be stored, just point address after last 3613 // argument passed via stack. 3614 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 3615 CCInfo.getInRegsParamsCount(), 3616 CCInfo.getNextStackOffset(), 4); 3617 AFI->setVarArgsFrameIndex(FrameIndex); 3618 } 3619 3620 SDValue ARMTargetLowering::LowerFormalArguments( 3621 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3622 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3623 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3624 MachineFunction &MF = DAG.getMachineFunction(); 3625 MachineFrameInfo &MFI = MF.getFrameInfo(); 3626 3627 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3628 3629 // Assign locations to all of the incoming arguments. 3630 SmallVector<CCValAssign, 16> ArgLocs; 3631 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3632 *DAG.getContext()); 3633 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); 3634 3635 SmallVector<SDValue, 16> ArgValues; 3636 SDValue ArgValue; 3637 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); 3638 unsigned CurArgIdx = 0; 3639 3640 // Initially ArgRegsSaveSize is zero. 3641 // Then we increase this value each time we meet byval parameter. 3642 // We also increase this value in case of varargs function. 3643 AFI->setArgRegsSaveSize(0); 3644 3645 // Calculate the amount of stack space that we need to allocate to store 3646 // byval and variadic arguments that are passed in registers. 3647 // We need to know this before we allocate the first byval or variadic 3648 // argument, as they will be allocated a stack slot below the CFA (Canonical 3649 // Frame Address, the stack pointer at entry to the function). 3650 unsigned ArgRegBegin = ARM::R4; 3651 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3652 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 3653 break; 3654 3655 CCValAssign &VA = ArgLocs[i]; 3656 unsigned Index = VA.getValNo(); 3657 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 3658 if (!Flags.isByVal()) 3659 continue; 3660 3661 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 3662 unsigned RBegin, REnd; 3663 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 3664 ArgRegBegin = std::min(ArgRegBegin, RBegin); 3665 3666 CCInfo.nextInRegsParam(); 3667 } 3668 CCInfo.rewindByValRegsInfo(); 3669 3670 int lastInsIndex = -1; 3671 if (isVarArg && MFI.hasVAStart()) { 3672 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3673 if (RegIdx != array_lengthof(GPRArgRegs)) 3674 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 3675 } 3676 3677 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 3678 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 3679 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3680 3681 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3682 CCValAssign &VA = ArgLocs[i]; 3683 if (Ins[VA.getValNo()].isOrigArg()) { 3684 std::advance(CurOrigArg, 3685 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 3686 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 3687 } 3688 // Arguments stored in registers. 3689 if (VA.isRegLoc()) { 3690 EVT RegVT = VA.getLocVT(); 3691 3692 if (VA.needsCustom()) { 3693 // f64 and vector types are split up into multiple registers or 3694 // combinations of registers and stack slots. 3695 if (VA.getLocVT() == MVT::v2f64) { 3696 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 3697 Chain, DAG, dl); 3698 VA = ArgLocs[++i]; // skip ahead to next loc 3699 SDValue ArgValue2; 3700 if (VA.isMemLoc()) { 3701 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); 3702 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3703 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 3704 MachinePointerInfo::getFixedStack( 3705 DAG.getMachineFunction(), FI)); 3706 } else { 3707 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3708 Chain, DAG, dl); 3709 } 3710 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3711 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3712 ArgValue, ArgValue1, 3713 DAG.getIntPtrConstant(0, dl)); 3714 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3715 ArgValue, ArgValue2, 3716 DAG.getIntPtrConstant(1, dl)); 3717 } else 3718 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3719 } else { 3720 const TargetRegisterClass *RC; 3721 3722 3723 if (RegVT == MVT::f16) 3724 RC = &ARM::HPRRegClass; 3725 else if (RegVT == MVT::f32) 3726 RC = &ARM::SPRRegClass; 3727 else if (RegVT == MVT::f64 || RegVT == MVT::v4f16) 3728 RC = &ARM::DPRRegClass; 3729 else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16) 3730 RC = &ARM::QPRRegClass; 3731 else if (RegVT == MVT::i32) 3732 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 3733 : &ARM::GPRRegClass; 3734 else 3735 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3736 3737 // Transform the arguments in physical registers into virtual ones. 3738 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3739 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3740 } 3741 3742 // If this is an 8 or 16-bit value, it is really passed promoted 3743 // to 32 bits. Insert an assert[sz]ext to capture this, then 3744 // truncate to the right size. 3745 switch (VA.getLocInfo()) { 3746 default: llvm_unreachable("Unknown loc info!"); 3747 case CCValAssign::Full: break; 3748 case CCValAssign::BCvt: 3749 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3750 break; 3751 case CCValAssign::SExt: 3752 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3753 DAG.getValueType(VA.getValVT())); 3754 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3755 break; 3756 case CCValAssign::ZExt: 3757 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3758 DAG.getValueType(VA.getValVT())); 3759 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3760 break; 3761 } 3762 3763 InVals.push_back(ArgValue); 3764 } else { // VA.isRegLoc() 3765 // sanity check 3766 assert(VA.isMemLoc()); 3767 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3768 3769 int index = VA.getValNo(); 3770 3771 // Some Ins[] entries become multiple ArgLoc[] entries. 3772 // Process them only once. 3773 if (index != lastInsIndex) 3774 { 3775 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3776 // FIXME: For now, all byval parameter objects are marked mutable. 3777 // This can be changed with more analysis. 3778 // In case of tail call optimization mark all arguments mutable. 3779 // Since they could be overwritten by lowering of arguments in case of 3780 // a tail call. 3781 if (Flags.isByVal()) { 3782 assert(Ins[index].isOrigArg() && 3783 "Byval arguments cannot be implicit"); 3784 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 3785 3786 int FrameIndex = StoreByValRegs( 3787 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 3788 VA.getLocMemOffset(), Flags.getByValSize()); 3789 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 3790 CCInfo.nextInRegsParam(); 3791 } else { 3792 unsigned FIOffset = VA.getLocMemOffset(); 3793 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3794 FIOffset, true); 3795 3796 // Create load nodes to retrieve arguments from the stack. 3797 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3798 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 3799 MachinePointerInfo::getFixedStack( 3800 DAG.getMachineFunction(), FI))); 3801 } 3802 lastInsIndex = index; 3803 } 3804 } 3805 } 3806 3807 // varargs 3808 if (isVarArg && MFI.hasVAStart()) 3809 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3810 CCInfo.getNextStackOffset(), 3811 TotalArgRegsSaveSize); 3812 3813 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 3814 3815 return Chain; 3816 } 3817 3818 /// isFloatingPointZero - Return true if this is +0.0. 3819 static bool isFloatingPointZero(SDValue Op) { 3820 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3821 return CFP->getValueAPF().isPosZero(); 3822 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3823 // Maybe this has already been legalized into the constant pool? 3824 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3825 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3826 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3827 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3828 return CFP->getValueAPF().isPosZero(); 3829 } 3830 } else if (Op->getOpcode() == ISD::BITCAST && 3831 Op->getValueType(0) == MVT::f64) { 3832 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 3833 // created by LowerConstantFP(). 3834 SDValue BitcastOp = Op->getOperand(0); 3835 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 3836 isNullConstant(BitcastOp->getOperand(0))) 3837 return true; 3838 } 3839 return false; 3840 } 3841 3842 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3843 /// the given operands. 3844 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3845 SDValue &ARMcc, SelectionDAG &DAG, 3846 const SDLoc &dl) const { 3847 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3848 unsigned C = RHSC->getZExtValue(); 3849 if (!isLegalICmpImmediate((int32_t)C)) { 3850 // Constant does not fit, try adjusting it by one. 3851 switch (CC) { 3852 default: break; 3853 case ISD::SETLT: 3854 case ISD::SETGE: 3855 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3856 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3857 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3858 } 3859 break; 3860 case ISD::SETULT: 3861 case ISD::SETUGE: 3862 if (C != 0 && isLegalICmpImmediate(C-1)) { 3863 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3864 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3865 } 3866 break; 3867 case ISD::SETLE: 3868 case ISD::SETGT: 3869 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3870 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3871 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3872 } 3873 break; 3874 case ISD::SETULE: 3875 case ISD::SETUGT: 3876 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3877 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3878 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3879 } 3880 break; 3881 } 3882 } 3883 } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) && 3884 (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) { 3885 // In ARM and Thumb-2, the compare instructions can shift their second 3886 // operand. 3887 CC = ISD::getSetCCSwappedOperands(CC); 3888 std::swap(LHS, RHS); 3889 } 3890 3891 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3892 ARMISD::NodeType CompareType; 3893 switch (CondCode) { 3894 default: 3895 CompareType = ARMISD::CMP; 3896 break; 3897 case ARMCC::EQ: 3898 case ARMCC::NE: 3899 // Uses only Z Flag 3900 CompareType = ARMISD::CMPZ; 3901 break; 3902 } 3903 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3904 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3905 } 3906 3907 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3908 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, 3909 SelectionDAG &DAG, const SDLoc &dl, 3910 bool InvalidOnQNaN) const { 3911 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); 3912 SDValue Cmp; 3913 SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); 3914 if (!isFloatingPointZero(RHS)) 3915 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C); 3916 else 3917 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C); 3918 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3919 } 3920 3921 /// duplicateCmp - Glue values can have only one use, so this function 3922 /// duplicates a comparison node. 3923 SDValue 3924 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3925 unsigned Opc = Cmp.getOpcode(); 3926 SDLoc DL(Cmp); 3927 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3928 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3929 3930 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3931 Cmp = Cmp.getOperand(0); 3932 Opc = Cmp.getOpcode(); 3933 if (Opc == ARMISD::CMPFP) 3934 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), 3935 Cmp.getOperand(1), Cmp.getOperand(2)); 3936 else { 3937 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3938 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), 3939 Cmp.getOperand(1)); 3940 } 3941 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3942 } 3943 3944 // This function returns three things: the arithmetic computation itself 3945 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The 3946 // comparison and the condition code define the case in which the arithmetic 3947 // computation *does not* overflow. 3948 std::pair<SDValue, SDValue> 3949 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 3950 SDValue &ARMcc) const { 3951 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 3952 3953 SDValue Value, OverflowCmp; 3954 SDValue LHS = Op.getOperand(0); 3955 SDValue RHS = Op.getOperand(1); 3956 SDLoc dl(Op); 3957 3958 // FIXME: We are currently always generating CMPs because we don't support 3959 // generating CMN through the backend. This is not as good as the natural 3960 // CMP case because it causes a register dependency and cannot be folded 3961 // later. 3962 3963 switch (Op.getOpcode()) { 3964 default: 3965 llvm_unreachable("Unknown overflow instruction!"); 3966 case ISD::SADDO: 3967 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3968 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3969 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3970 break; 3971 case ISD::UADDO: 3972 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3973 // We use ADDC here to correspond to its use in LowerUnsignedALUO. 3974 // We do not use it in the USUBO case as Value may not be used. 3975 Value = DAG.getNode(ARMISD::ADDC, dl, 3976 DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS) 3977 .getValue(0); 3978 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3979 break; 3980 case ISD::SSUBO: 3981 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3982 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3983 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3984 break; 3985 case ISD::USUBO: 3986 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3987 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3988 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3989 break; 3990 case ISD::UMULO: 3991 // We generate a UMUL_LOHI and then check if the high word is 0. 3992 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 3993 Value = DAG.getNode(ISD::UMUL_LOHI, dl, 3994 DAG.getVTList(Op.getValueType(), Op.getValueType()), 3995 LHS, RHS); 3996 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 3997 DAG.getConstant(0, dl, MVT::i32)); 3998 Value = Value.getValue(0); // We only want the low 32 bits for the result. 3999 break; 4000 case ISD::SMULO: 4001 // We generate a SMUL_LOHI and then check if all the bits of the high word 4002 // are the same as the sign bit of the low word. 4003 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32); 4004 Value = DAG.getNode(ISD::SMUL_LOHI, dl, 4005 DAG.getVTList(Op.getValueType(), Op.getValueType()), 4006 LHS, RHS); 4007 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1), 4008 DAG.getNode(ISD::SRA, dl, Op.getValueType(), 4009 Value.getValue(0), 4010 DAG.getConstant(31, dl, MVT::i32))); 4011 Value = Value.getValue(0); // We only want the low 32 bits for the result. 4012 break; 4013 } // switch (...) 4014 4015 return std::make_pair(Value, OverflowCmp); 4016 } 4017 4018 SDValue 4019 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { 4020 // Let legalize expand this if it isn't a legal type yet. 4021 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4022 return SDValue(); 4023 4024 SDValue Value, OverflowCmp; 4025 SDValue ARMcc; 4026 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 4027 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4028 SDLoc dl(Op); 4029 // We use 0 and 1 as false and true values. 4030 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 4031 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 4032 EVT VT = Op.getValueType(); 4033 4034 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 4035 ARMcc, CCR, OverflowCmp); 4036 4037 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 4038 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4039 } 4040 4041 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, 4042 SelectionDAG &DAG) { 4043 SDLoc DL(BoolCarry); 4044 EVT CarryVT = BoolCarry.getValueType(); 4045 4046 // This converts the boolean value carry into the carry flag by doing 4047 // ARMISD::SUBC Carry, 1 4048 SDValue Carry = DAG.getNode(ARMISD::SUBC, DL, 4049 DAG.getVTList(CarryVT, MVT::i32), 4050 BoolCarry, DAG.getConstant(1, DL, CarryVT)); 4051 return Carry.getValue(1); 4052 } 4053 4054 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, 4055 SelectionDAG &DAG) { 4056 SDLoc DL(Flags); 4057 4058 // Now convert the carry flag into a boolean carry. We do this 4059 // using ARMISD:ADDE 0, 0, Carry 4060 return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32), 4061 DAG.getConstant(0, DL, MVT::i32), 4062 DAG.getConstant(0, DL, MVT::i32), Flags); 4063 } 4064 4065 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op, 4066 SelectionDAG &DAG) const { 4067 // Let legalize expand this if it isn't a legal type yet. 4068 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 4069 return SDValue(); 4070 4071 SDValue LHS = Op.getOperand(0); 4072 SDValue RHS = Op.getOperand(1); 4073 SDLoc dl(Op); 4074 4075 EVT VT = Op.getValueType(); 4076 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 4077 SDValue Value; 4078 SDValue Overflow; 4079 switch (Op.getOpcode()) { 4080 default: 4081 llvm_unreachable("Unknown overflow instruction!"); 4082 case ISD::UADDO: 4083 Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS); 4084 // Convert the carry flag into a boolean value. 4085 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4086 break; 4087 case ISD::USUBO: { 4088 Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS); 4089 // Convert the carry flag into a boolean value. 4090 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG); 4091 // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow 4092 // value. So compute 1 - C. 4093 Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32, 4094 DAG.getConstant(1, dl, MVT::i32), Overflow); 4095 break; 4096 } 4097 } 4098 4099 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 4100 } 4101 4102 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 4103 SDValue Cond = Op.getOperand(0); 4104 SDValue SelectTrue = Op.getOperand(1); 4105 SDValue SelectFalse = Op.getOperand(2); 4106 SDLoc dl(Op); 4107 unsigned Opc = Cond.getOpcode(); 4108 4109 if (Cond.getResNo() == 1 && 4110 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 4111 Opc == ISD::USUBO)) { 4112 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 4113 return SDValue(); 4114 4115 SDValue Value, OverflowCmp; 4116 SDValue ARMcc; 4117 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 4118 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4119 EVT VT = Op.getValueType(); 4120 4121 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 4122 OverflowCmp, DAG); 4123 } 4124 4125 // Convert: 4126 // 4127 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 4128 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 4129 // 4130 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 4131 const ConstantSDNode *CMOVTrue = 4132 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 4133 const ConstantSDNode *CMOVFalse = 4134 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 4135 4136 if (CMOVTrue && CMOVFalse) { 4137 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 4138 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 4139 4140 SDValue True; 4141 SDValue False; 4142 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 4143 True = SelectTrue; 4144 False = SelectFalse; 4145 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 4146 True = SelectFalse; 4147 False = SelectTrue; 4148 } 4149 4150 if (True.getNode() && False.getNode()) { 4151 EVT VT = Op.getValueType(); 4152 SDValue ARMcc = Cond.getOperand(2); 4153 SDValue CCR = Cond.getOperand(3); 4154 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 4155 assert(True.getValueType() == VT); 4156 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 4157 } 4158 } 4159 } 4160 4161 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 4162 // undefined bits before doing a full-word comparison with zero. 4163 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 4164 DAG.getConstant(1, dl, Cond.getValueType())); 4165 4166 return DAG.getSelectCC(dl, Cond, 4167 DAG.getConstant(0, dl, Cond.getValueType()), 4168 SelectTrue, SelectFalse, ISD::SETNE); 4169 } 4170 4171 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 4172 bool &swpCmpOps, bool &swpVselOps) { 4173 // Start by selecting the GE condition code for opcodes that return true for 4174 // 'equality' 4175 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 4176 CC == ISD::SETULE) 4177 CondCode = ARMCC::GE; 4178 4179 // and GT for opcodes that return false for 'equality'. 4180 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 4181 CC == ISD::SETULT) 4182 CondCode = ARMCC::GT; 4183 4184 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 4185 // to swap the compare operands. 4186 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 4187 CC == ISD::SETULT) 4188 swpCmpOps = true; 4189 4190 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 4191 // If we have an unordered opcode, we need to swap the operands to the VSEL 4192 // instruction (effectively negating the condition). 4193 // 4194 // This also has the effect of swapping which one of 'less' or 'greater' 4195 // returns true, so we also swap the compare operands. It also switches 4196 // whether we return true for 'equality', so we compensate by picking the 4197 // opposite condition code to our original choice. 4198 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 4199 CC == ISD::SETUGT) { 4200 swpCmpOps = !swpCmpOps; 4201 swpVselOps = !swpVselOps; 4202 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 4203 } 4204 4205 // 'ordered' is 'anything but unordered', so use the VS condition code and 4206 // swap the VSEL operands. 4207 if (CC == ISD::SETO) { 4208 CondCode = ARMCC::VS; 4209 swpVselOps = true; 4210 } 4211 4212 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 4213 // code and swap the VSEL operands. 4214 if (CC == ISD::SETUNE) { 4215 CondCode = ARMCC::EQ; 4216 swpVselOps = true; 4217 } 4218 } 4219 4220 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, 4221 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 4222 SDValue Cmp, SelectionDAG &DAG) const { 4223 if (Subtarget->isFPOnlySP() && VT == MVT::f64) { 4224 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4225 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 4226 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4227 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 4228 4229 SDValue TrueLow = TrueVal.getValue(0); 4230 SDValue TrueHigh = TrueVal.getValue(1); 4231 SDValue FalseLow = FalseVal.getValue(0); 4232 SDValue FalseHigh = FalseVal.getValue(1); 4233 4234 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 4235 ARMcc, CCR, Cmp); 4236 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 4237 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 4238 4239 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 4240 } else { 4241 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 4242 Cmp); 4243 } 4244 } 4245 4246 static bool isGTorGE(ISD::CondCode CC) { 4247 return CC == ISD::SETGT || CC == ISD::SETGE; 4248 } 4249 4250 static bool isLTorLE(ISD::CondCode CC) { 4251 return CC == ISD::SETLT || CC == ISD::SETLE; 4252 } 4253 4254 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. 4255 // All of these conditions (and their <= and >= counterparts) will do: 4256 // x < k ? k : x 4257 // x > k ? x : k 4258 // k < x ? x : k 4259 // k > x ? k : x 4260 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, 4261 const SDValue TrueVal, const SDValue FalseVal, 4262 const ISD::CondCode CC, const SDValue K) { 4263 return (isGTorGE(CC) && 4264 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || 4265 (isLTorLE(CC) && 4266 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); 4267 } 4268 4269 // Similar to isLowerSaturate(), but checks for upper-saturating conditions. 4270 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, 4271 const SDValue TrueVal, const SDValue FalseVal, 4272 const ISD::CondCode CC, const SDValue K) { 4273 return (isGTorGE(CC) && 4274 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) || 4275 (isLTorLE(CC) && 4276 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))); 4277 } 4278 4279 // Check if two chained conditionals could be converted into SSAT or USAT. 4280 // 4281 // SSAT can replace a set of two conditional selectors that bound a number to an 4282 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: 4283 // 4284 // x < -k ? -k : (x > k ? k : x) 4285 // x < -k ? -k : (x < k ? x : k) 4286 // x > -k ? (x > k ? k : x) : -k 4287 // x < k ? (x < -k ? -k : x) : k 4288 // etc. 4289 // 4290 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is 4291 // a power of 2. 4292 // 4293 // It returns true if the conversion can be done, false otherwise. 4294 // Additionally, the variable is returned in parameter V, the constant in K and 4295 // usat is set to true if the conditional represents an unsigned saturation 4296 static bool isSaturatingConditional(const SDValue &Op, SDValue &V, 4297 uint64_t &K, bool &usat) { 4298 SDValue LHS1 = Op.getOperand(0); 4299 SDValue RHS1 = Op.getOperand(1); 4300 SDValue TrueVal1 = Op.getOperand(2); 4301 SDValue FalseVal1 = Op.getOperand(3); 4302 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4303 4304 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; 4305 if (Op2.getOpcode() != ISD::SELECT_CC) 4306 return false; 4307 4308 SDValue LHS2 = Op2.getOperand(0); 4309 SDValue RHS2 = Op2.getOperand(1); 4310 SDValue TrueVal2 = Op2.getOperand(2); 4311 SDValue FalseVal2 = Op2.getOperand(3); 4312 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); 4313 4314 // Find out which are the constants and which are the variables 4315 // in each conditional 4316 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1) 4317 ? &RHS1 4318 : nullptr; 4319 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2) 4320 ? &RHS2 4321 : nullptr; 4322 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2; 4323 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1; 4324 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2; 4325 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2; 4326 4327 // We must detect cases where the original operations worked with 16- or 4328 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations 4329 // must work with sign-extended values but the select operations return 4330 // the original non-extended value. 4331 SDValue V2TmpReg = V2Tmp; 4332 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG) 4333 V2TmpReg = V2Tmp->getOperand(0); 4334 4335 // Check that the registers and the constants have the correct values 4336 // in both conditionals 4337 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp || 4338 V2TmpReg != V2) 4339 return false; 4340 4341 // Figure out which conditional is saturating the lower/upper bound. 4342 const SDValue *LowerCheckOp = 4343 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 4344 ? &Op 4345 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) 4346 ? &Op2 4347 : nullptr; 4348 const SDValue *UpperCheckOp = 4349 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 4350 ? &Op 4351 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) 4352 ? &Op2 4353 : nullptr; 4354 4355 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp) 4356 return false; 4357 4358 // Check that the constant in the lower-bound check is 4359 // the opposite of the constant in the upper-bound check 4360 // in 1's complement. 4361 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue(); 4362 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue(); 4363 int64_t PosVal = std::max(Val1, Val2); 4364 int64_t NegVal = std::min(Val1, Val2); 4365 4366 if (((Val1 > Val2 && UpperCheckOp == &Op) || 4367 (Val1 < Val2 && UpperCheckOp == &Op2)) && 4368 isPowerOf2_64(PosVal + 1)) { 4369 4370 // Handle the difference between USAT (unsigned) and SSAT (signed) saturation 4371 if (Val1 == ~Val2) 4372 usat = false; 4373 else if (NegVal == 0) 4374 usat = true; 4375 else 4376 return false; 4377 4378 V = V2; 4379 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive 4380 4381 return true; 4382 } 4383 4384 return false; 4385 } 4386 4387 // Check if a condition of the type x < k ? k : x can be converted into a 4388 // bit operation instead of conditional moves. 4389 // Currently this is allowed given: 4390 // - The conditions and values match up 4391 // - k is 0 or -1 (all ones) 4392 // This function will not check the last condition, thats up to the caller 4393 // It returns true if the transformation can be made, and in such case 4394 // returns x in V, and k in SatK. 4395 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, 4396 SDValue &SatK) 4397 { 4398 SDValue LHS = Op.getOperand(0); 4399 SDValue RHS = Op.getOperand(1); 4400 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4401 SDValue TrueVal = Op.getOperand(2); 4402 SDValue FalseVal = Op.getOperand(3); 4403 4404 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS) 4405 ? &RHS 4406 : nullptr; 4407 4408 // No constant operation in comparison, early out 4409 if (!K) 4410 return false; 4411 4412 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal; 4413 V = (KTmp == TrueVal) ? FalseVal : TrueVal; 4414 SDValue VTmp = (K && *K == LHS) ? RHS : LHS; 4415 4416 // If the constant on left and right side, or variable on left and right, 4417 // does not match, early out 4418 if (*K != KTmp || V != VTmp) 4419 return false; 4420 4421 if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) { 4422 SatK = *K; 4423 return true; 4424 } 4425 4426 return false; 4427 } 4428 4429 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4430 EVT VT = Op.getValueType(); 4431 SDLoc dl(Op); 4432 4433 // Try to convert two saturating conditional selects into a single SSAT 4434 SDValue SatValue; 4435 uint64_t SatConstant; 4436 bool SatUSat; 4437 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) && 4438 isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) { 4439 if (SatUSat) 4440 return DAG.getNode(ARMISD::USAT, dl, VT, SatValue, 4441 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); 4442 else 4443 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue, 4444 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); 4445 } 4446 4447 // Try to convert expressions of the form x < k ? k : x (and similar forms) 4448 // into more efficient bit operations, which is possible when k is 0 or -1 4449 // On ARM and Thumb-2 which have flexible operand 2 this will result in 4450 // single instructions. On Thumb the shift and the bit operation will be two 4451 // instructions. 4452 // Only allow this transformation on full-width (32-bit) operations 4453 SDValue LowerSatConstant; 4454 if (VT == MVT::i32 && 4455 isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) { 4456 SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue, 4457 DAG.getConstant(31, dl, VT)); 4458 if (isNullConstant(LowerSatConstant)) { 4459 SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV, 4460 DAG.getAllOnesConstant(dl, VT)); 4461 return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV); 4462 } else if (isAllOnesConstant(LowerSatConstant)) 4463 return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); 4464 } 4465 4466 SDValue LHS = Op.getOperand(0); 4467 SDValue RHS = Op.getOperand(1); 4468 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4469 SDValue TrueVal = Op.getOperand(2); 4470 SDValue FalseVal = Op.getOperand(3); 4471 4472 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 4473 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 4474 dl); 4475 4476 // If softenSetCCOperands only returned one value, we should compare it to 4477 // zero. 4478 if (!RHS.getNode()) { 4479 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4480 CC = ISD::SETNE; 4481 } 4482 } 4483 4484 if (LHS.getValueType() == MVT::i32) { 4485 // Try to generate VSEL on ARMv8. 4486 // The VSEL instruction can't use all the usual ARM condition 4487 // codes: it only has two bits to select the condition code, so it's 4488 // constrained to use only GE, GT, VS and EQ. 4489 // 4490 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 4491 // swap the operands of the previous compare instruction (effectively 4492 // inverting the compare condition, swapping 'less' and 'greater') and 4493 // sometimes need to swap the operands to the VSEL (which inverts the 4494 // condition in the sense of firing whenever the previous condition didn't) 4495 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 4496 TrueVal.getValueType() == MVT::f64)) { 4497 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4498 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 4499 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 4500 CC = ISD::getSetCCInverse(CC, true); 4501 std::swap(TrueVal, FalseVal); 4502 } 4503 } 4504 4505 SDValue ARMcc; 4506 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4507 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4508 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 4509 } 4510 4511 ARMCC::CondCodes CondCode, CondCode2; 4512 bool InvalidOnQNaN; 4513 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); 4514 4515 // Normalize the fp compare. If RHS is zero we keep it there so we match 4516 // CMPFPw0 instead of CMPFP. 4517 if (Subtarget->hasFPARMv8() && !isFloatingPointZero(RHS) && 4518 (TrueVal.getValueType() == MVT::f16 || 4519 TrueVal.getValueType() == MVT::f32 || 4520 TrueVal.getValueType() == MVT::f64)) { 4521 bool swpCmpOps = false; 4522 bool swpVselOps = false; 4523 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 4524 4525 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 4526 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 4527 if (swpCmpOps) 4528 std::swap(LHS, RHS); 4529 if (swpVselOps) 4530 std::swap(TrueVal, FalseVal); 4531 } 4532 } 4533 4534 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4535 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); 4536 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4537 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 4538 if (CondCode2 != ARMCC::AL) { 4539 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 4540 // FIXME: Needs another CMP because flag can have but one use. 4541 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); 4542 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 4543 } 4544 return Result; 4545 } 4546 4547 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 4548 /// to morph to an integer compare sequence. 4549 static bool canChangeToInt(SDValue Op, bool &SeenZero, 4550 const ARMSubtarget *Subtarget) { 4551 SDNode *N = Op.getNode(); 4552 if (!N->hasOneUse()) 4553 // Otherwise it requires moving the value from fp to integer registers. 4554 return false; 4555 if (!N->getNumValues()) 4556 return false; 4557 EVT VT = Op.getValueType(); 4558 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 4559 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 4560 // vmrs are very slow, e.g. cortex-a8. 4561 return false; 4562 4563 if (isFloatingPointZero(Op)) { 4564 SeenZero = true; 4565 return true; 4566 } 4567 return ISD::isNormalLoad(N); 4568 } 4569 4570 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 4571 if (isFloatingPointZero(Op)) 4572 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 4573 4574 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 4575 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), 4576 Ld->getPointerInfo(), Ld->getAlignment(), 4577 Ld->getMemOperand()->getFlags()); 4578 4579 llvm_unreachable("Unknown VFP cmp argument!"); 4580 } 4581 4582 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 4583 SDValue &RetVal1, SDValue &RetVal2) { 4584 SDLoc dl(Op); 4585 4586 if (isFloatingPointZero(Op)) { 4587 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 4588 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 4589 return; 4590 } 4591 4592 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 4593 SDValue Ptr = Ld->getBasePtr(); 4594 RetVal1 = 4595 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), 4596 Ld->getAlignment(), Ld->getMemOperand()->getFlags()); 4597 4598 EVT PtrType = Ptr.getValueType(); 4599 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 4600 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 4601 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 4602 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, 4603 Ld->getPointerInfo().getWithOffset(4), NewAlign, 4604 Ld->getMemOperand()->getFlags()); 4605 return; 4606 } 4607 4608 llvm_unreachable("Unknown VFP cmp argument!"); 4609 } 4610 4611 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 4612 /// f32 and even f64 comparisons to integer ones. 4613 SDValue 4614 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 4615 SDValue Chain = Op.getOperand(0); 4616 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 4617 SDValue LHS = Op.getOperand(2); 4618 SDValue RHS = Op.getOperand(3); 4619 SDValue Dest = Op.getOperand(4); 4620 SDLoc dl(Op); 4621 4622 bool LHSSeenZero = false; 4623 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 4624 bool RHSSeenZero = false; 4625 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 4626 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 4627 // If unsafe fp math optimization is enabled and there are no other uses of 4628 // the CMP operands, and the condition code is EQ or NE, we can optimize it 4629 // to an integer comparison. 4630 if (CC == ISD::SETOEQ) 4631 CC = ISD::SETEQ; 4632 else if (CC == ISD::SETUNE) 4633 CC = ISD::SETNE; 4634 4635 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 4636 SDValue ARMcc; 4637 if (LHS.getValueType() == MVT::f32) { 4638 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 4639 bitcastf32Toi32(LHS, DAG), Mask); 4640 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 4641 bitcastf32Toi32(RHS, DAG), Mask); 4642 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4643 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4644 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 4645 Chain, Dest, ARMcc, CCR, Cmp); 4646 } 4647 4648 SDValue LHS1, LHS2; 4649 SDValue RHS1, RHS2; 4650 expandf64Toi32(LHS, DAG, LHS1, LHS2); 4651 expandf64Toi32(RHS, DAG, RHS1, RHS2); 4652 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 4653 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 4654 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4655 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4656 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 4657 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 4658 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 4659 } 4660 4661 return SDValue(); 4662 } 4663 4664 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 4665 SDValue Chain = Op.getOperand(0); 4666 SDValue Cond = Op.getOperand(1); 4667 SDValue Dest = Op.getOperand(2); 4668 SDLoc dl(Op); 4669 4670 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 4671 // instruction. 4672 unsigned Opc = Cond.getOpcode(); 4673 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 4674 !Subtarget->isThumb1Only(); 4675 if (Cond.getResNo() == 1 && 4676 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 4677 Opc == ISD::USUBO || OptimizeMul)) { 4678 // Only lower legal XALUO ops. 4679 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 4680 return SDValue(); 4681 4682 // The actual operation with overflow check. 4683 SDValue Value, OverflowCmp; 4684 SDValue ARMcc; 4685 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 4686 4687 // Reverse the condition code. 4688 ARMCC::CondCodes CondCode = 4689 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 4690 CondCode = ARMCC::getOppositeCondition(CondCode); 4691 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 4692 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4693 4694 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 4695 OverflowCmp); 4696 } 4697 4698 return SDValue(); 4699 } 4700 4701 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 4702 SDValue Chain = Op.getOperand(0); 4703 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 4704 SDValue LHS = Op.getOperand(2); 4705 SDValue RHS = Op.getOperand(3); 4706 SDValue Dest = Op.getOperand(4); 4707 SDLoc dl(Op); 4708 4709 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 4710 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 4711 dl); 4712 4713 // If softenSetCCOperands only returned one value, we should compare it to 4714 // zero. 4715 if (!RHS.getNode()) { 4716 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4717 CC = ISD::SETNE; 4718 } 4719 } 4720 4721 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 4722 // instruction. 4723 unsigned Opc = LHS.getOpcode(); 4724 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) && 4725 !Subtarget->isThumb1Only(); 4726 if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) && 4727 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 4728 Opc == ISD::USUBO || OptimizeMul) && 4729 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 4730 // Only lower legal XALUO ops. 4731 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) 4732 return SDValue(); 4733 4734 // The actual operation with overflow check. 4735 SDValue Value, OverflowCmp; 4736 SDValue ARMcc; 4737 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc); 4738 4739 if ((CC == ISD::SETNE) != isOneConstant(RHS)) { 4740 // Reverse the condition code. 4741 ARMCC::CondCodes CondCode = 4742 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue(); 4743 CondCode = ARMCC::getOppositeCondition(CondCode); 4744 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32); 4745 } 4746 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4747 4748 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR, 4749 OverflowCmp); 4750 } 4751 4752 if (LHS.getValueType() == MVT::i32) { 4753 SDValue ARMcc; 4754 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4755 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4756 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 4757 Chain, Dest, ARMcc, CCR, Cmp); 4758 } 4759 4760 if (getTargetMachine().Options.UnsafeFPMath && 4761 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 4762 CC == ISD::SETNE || CC == ISD::SETUNE)) { 4763 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 4764 return Result; 4765 } 4766 4767 ARMCC::CondCodes CondCode, CondCode2; 4768 bool InvalidOnQNaN; 4769 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); 4770 4771 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4772 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); 4773 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4774 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 4775 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 4776 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 4777 if (CondCode2 != ARMCC::AL) { 4778 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 4779 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 4780 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 4781 } 4782 return Res; 4783 } 4784 4785 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 4786 SDValue Chain = Op.getOperand(0); 4787 SDValue Table = Op.getOperand(1); 4788 SDValue Index = Op.getOperand(2); 4789 SDLoc dl(Op); 4790 4791 EVT PTy = getPointerTy(DAG.getDataLayout()); 4792 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 4793 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 4794 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 4795 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 4796 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index); 4797 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) { 4798 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table 4799 // which does another jump to the destination. This also makes it easier 4800 // to translate it to TBB / TBH later (Thumb2 only). 4801 // FIXME: This might not work if the function is extremely large. 4802 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 4803 Addr, Op.getOperand(2), JTI); 4804 } 4805 if (isPositionIndependent() || Subtarget->isROPI()) { 4806 Addr = 4807 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 4808 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 4809 Chain = Addr.getValue(1); 4810 Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr); 4811 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4812 } else { 4813 Addr = 4814 DAG.getLoad(PTy, dl, Chain, Addr, 4815 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 4816 Chain = Addr.getValue(1); 4817 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4818 } 4819 } 4820 4821 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 4822 EVT VT = Op.getValueType(); 4823 SDLoc dl(Op); 4824 4825 if (Op.getValueType().getVectorElementType() == MVT::i32) { 4826 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 4827 return Op; 4828 return DAG.UnrollVectorOp(Op.getNode()); 4829 } 4830 4831 const bool HasFullFP16 = 4832 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 4833 4834 EVT NewTy; 4835 const EVT OpTy = Op.getOperand(0).getValueType(); 4836 if (OpTy == MVT::v4f32) 4837 NewTy = MVT::v4i32; 4838 else if (OpTy == MVT::v4f16 && HasFullFP16) 4839 NewTy = MVT::v4i16; 4840 else if (OpTy == MVT::v8f16 && HasFullFP16) 4841 NewTy = MVT::v8i16; 4842 else 4843 llvm_unreachable("Invalid type for custom lowering!"); 4844 4845 if (VT != MVT::v4i16 && VT != MVT::v8i16) 4846 return DAG.UnrollVectorOp(Op.getNode()); 4847 4848 Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0)); 4849 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 4850 } 4851 4852 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 4853 EVT VT = Op.getValueType(); 4854 if (VT.isVector()) 4855 return LowerVectorFP_TO_INT(Op, DAG); 4856 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { 4857 RTLIB::Libcall LC; 4858 if (Op.getOpcode() == ISD::FP_TO_SINT) 4859 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), 4860 Op.getValueType()); 4861 else 4862 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), 4863 Op.getValueType()); 4864 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4865 /*isSigned*/ false, SDLoc(Op)).first; 4866 } 4867 4868 return Op; 4869 } 4870 4871 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 4872 EVT VT = Op.getValueType(); 4873 SDLoc dl(Op); 4874 4875 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 4876 if (VT.getVectorElementType() == MVT::f32) 4877 return Op; 4878 return DAG.UnrollVectorOp(Op.getNode()); 4879 } 4880 4881 assert((Op.getOperand(0).getValueType() == MVT::v4i16 || 4882 Op.getOperand(0).getValueType() == MVT::v8i16) && 4883 "Invalid type for custom lowering!"); 4884 4885 const bool HasFullFP16 = 4886 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16(); 4887 4888 EVT DestVecType; 4889 if (VT == MVT::v4f32) 4890 DestVecType = MVT::v4i32; 4891 else if (VT == MVT::v4f16 && HasFullFP16) 4892 DestVecType = MVT::v4i16; 4893 else if (VT == MVT::v8f16 && HasFullFP16) 4894 DestVecType = MVT::v8i16; 4895 else 4896 return DAG.UnrollVectorOp(Op.getNode()); 4897 4898 unsigned CastOpc; 4899 unsigned Opc; 4900 switch (Op.getOpcode()) { 4901 default: llvm_unreachable("Invalid opcode!"); 4902 case ISD::SINT_TO_FP: 4903 CastOpc = ISD::SIGN_EXTEND; 4904 Opc = ISD::SINT_TO_FP; 4905 break; 4906 case ISD::UINT_TO_FP: 4907 CastOpc = ISD::ZERO_EXTEND; 4908 Opc = ISD::UINT_TO_FP; 4909 break; 4910 } 4911 4912 Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0)); 4913 return DAG.getNode(Opc, dl, VT, Op); 4914 } 4915 4916 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 4917 EVT VT = Op.getValueType(); 4918 if (VT.isVector()) 4919 return LowerVectorINT_TO_FP(Op, DAG); 4920 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { 4921 RTLIB::Libcall LC; 4922 if (Op.getOpcode() == ISD::SINT_TO_FP) 4923 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 4924 Op.getValueType()); 4925 else 4926 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 4927 Op.getValueType()); 4928 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4929 /*isSigned*/ false, SDLoc(Op)).first; 4930 } 4931 4932 return Op; 4933 } 4934 4935 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 4936 // Implement fcopysign with a fabs and a conditional fneg. 4937 SDValue Tmp0 = Op.getOperand(0); 4938 SDValue Tmp1 = Op.getOperand(1); 4939 SDLoc dl(Op); 4940 EVT VT = Op.getValueType(); 4941 EVT SrcVT = Tmp1.getValueType(); 4942 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 4943 Tmp0.getOpcode() == ARMISD::VMOVDRR; 4944 bool UseNEON = !InGPR && Subtarget->hasNEON(); 4945 4946 if (UseNEON) { 4947 // Use VBSL to copy the sign bit. 4948 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 4949 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 4950 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 4951 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 4952 if (VT == MVT::f64) 4953 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4954 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 4955 DAG.getConstant(32, dl, MVT::i32)); 4956 else /*if (VT == MVT::f32)*/ 4957 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 4958 if (SrcVT == MVT::f32) { 4959 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 4960 if (VT == MVT::f64) 4961 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4962 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 4963 DAG.getConstant(32, dl, MVT::i32)); 4964 } else if (VT == MVT::f32) 4965 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 4966 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 4967 DAG.getConstant(32, dl, MVT::i32)); 4968 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 4969 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 4970 4971 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 4972 dl, MVT::i32); 4973 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 4974 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 4975 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 4976 4977 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 4978 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 4979 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 4980 if (VT == MVT::f32) { 4981 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 4982 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 4983 DAG.getConstant(0, dl, MVT::i32)); 4984 } else { 4985 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 4986 } 4987 4988 return Res; 4989 } 4990 4991 // Bitcast operand 1 to i32. 4992 if (SrcVT == MVT::f64) 4993 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4994 Tmp1).getValue(1); 4995 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 4996 4997 // Or in the signbit with integer operations. 4998 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 4999 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 5000 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 5001 if (VT == MVT::f32) { 5002 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 5003 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 5004 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 5005 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 5006 } 5007 5008 // f64: Or the high part with signbit and then combine two parts. 5009 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 5010 Tmp0); 5011 SDValue Lo = Tmp0.getValue(0); 5012 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 5013 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 5014 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 5015 } 5016 5017 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 5018 MachineFunction &MF = DAG.getMachineFunction(); 5019 MachineFrameInfo &MFI = MF.getFrameInfo(); 5020 MFI.setReturnAddressIsTaken(true); 5021 5022 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 5023 return SDValue(); 5024 5025 EVT VT = Op.getValueType(); 5026 SDLoc dl(Op); 5027 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5028 if (Depth) { 5029 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 5030 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 5031 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 5032 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 5033 MachinePointerInfo()); 5034 } 5035 5036 // Return LR, which contains the return address. Mark it an implicit live-in. 5037 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 5038 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 5039 } 5040 5041 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 5042 const ARMBaseRegisterInfo &ARI = 5043 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 5044 MachineFunction &MF = DAG.getMachineFunction(); 5045 MachineFrameInfo &MFI = MF.getFrameInfo(); 5046 MFI.setFrameAddressIsTaken(true); 5047 5048 EVT VT = Op.getValueType(); 5049 SDLoc dl(Op); // FIXME probably not meaningful 5050 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5051 unsigned FrameReg = ARI.getFrameRegister(MF); 5052 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 5053 while (Depth--) 5054 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 5055 MachinePointerInfo()); 5056 return FrameAddr; 5057 } 5058 5059 // FIXME? Maybe this could be a TableGen attribute on some registers and 5060 // this table could be generated automatically from RegInfo. 5061 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, 5062 SelectionDAG &DAG) const { 5063 unsigned Reg = StringSwitch<unsigned>(RegName) 5064 .Case("sp", ARM::SP) 5065 .Default(0); 5066 if (Reg) 5067 return Reg; 5068 report_fatal_error(Twine("Invalid register name \"" 5069 + StringRef(RegName) + "\".")); 5070 } 5071 5072 // Result is 64 bit value so split into two 32 bit values and return as a 5073 // pair of values. 5074 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 5075 SelectionDAG &DAG) { 5076 SDLoc DL(N); 5077 5078 // This function is only supposed to be called for i64 type destination. 5079 assert(N->getValueType(0) == MVT::i64 5080 && "ExpandREAD_REGISTER called for non-i64 type result."); 5081 5082 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 5083 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 5084 N->getOperand(0), 5085 N->getOperand(1)); 5086 5087 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 5088 Read.getValue(1))); 5089 Results.push_back(Read.getOperand(0)); 5090 } 5091 5092 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 5093 /// When \p DstVT, the destination type of \p BC, is on the vector 5094 /// register bank and the source of bitcast, \p Op, operates on the same bank, 5095 /// it might be possible to combine them, such that everything stays on the 5096 /// vector register bank. 5097 /// \p return The node that would replace \p BT, if the combine 5098 /// is possible. 5099 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 5100 SelectionDAG &DAG) { 5101 SDValue Op = BC->getOperand(0); 5102 EVT DstVT = BC->getValueType(0); 5103 5104 // The only vector instruction that can produce a scalar (remember, 5105 // since the bitcast was about to be turned into VMOVDRR, the source 5106 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 5107 // Moreover, we can do this combine only if there is one use. 5108 // Finally, if the destination type is not a vector, there is not 5109 // much point on forcing everything on the vector bank. 5110 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 5111 !Op.hasOneUse()) 5112 return SDValue(); 5113 5114 // If the index is not constant, we will introduce an additional 5115 // multiply that will stick. 5116 // Give up in that case. 5117 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5118 if (!Index) 5119 return SDValue(); 5120 unsigned DstNumElt = DstVT.getVectorNumElements(); 5121 5122 // Compute the new index. 5123 const APInt &APIntIndex = Index->getAPIntValue(); 5124 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 5125 NewIndex *= APIntIndex; 5126 // Check if the new constant index fits into i32. 5127 if (NewIndex.getBitWidth() > 32) 5128 return SDValue(); 5129 5130 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 5131 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 5132 SDLoc dl(Op); 5133 SDValue ExtractSrc = Op.getOperand(0); 5134 EVT VecVT = EVT::getVectorVT( 5135 *DAG.getContext(), DstVT.getScalarType(), 5136 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 5137 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 5138 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 5139 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 5140 } 5141 5142 /// ExpandBITCAST - If the target supports VFP, this function is called to 5143 /// expand a bit convert where either the source or destination type is i64 to 5144 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 5145 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 5146 /// vectors), since the legalizer won't know what to do with that. 5147 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, 5148 const ARMSubtarget *Subtarget) { 5149 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5150 SDLoc dl(N); 5151 SDValue Op = N->getOperand(0); 5152 5153 // This function is only supposed to be called for i64 types, either as the 5154 // source or destination of the bit convert. 5155 EVT SrcVT = Op.getValueType(); 5156 EVT DstVT = N->getValueType(0); 5157 const bool HasFullFP16 = Subtarget->hasFullFP16(); 5158 5159 if (SrcVT == MVT::f32 && DstVT == MVT::i32) { 5160 // FullFP16: half values are passed in S-registers, and we don't 5161 // need any of the bitcast and moves: 5162 // 5163 // t2: f32,ch = CopyFromReg t0, Register:f32 %0 5164 // t5: i32 = bitcast t2 5165 // t18: f16 = ARMISD::VMOVhr t5 5166 if (Op.getOpcode() != ISD::CopyFromReg || 5167 Op.getValueType() != MVT::f32) 5168 return SDValue(); 5169 5170 auto Move = N->use_begin(); 5171 if (Move->getOpcode() != ARMISD::VMOVhr) 5172 return SDValue(); 5173 5174 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 5175 SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops); 5176 DAG.ReplaceAllUsesWith(*Move, &Copy); 5177 return Copy; 5178 } 5179 5180 if (SrcVT == MVT::i16 && DstVT == MVT::f16) { 5181 if (!HasFullFP16) 5182 return SDValue(); 5183 // SoftFP: read half-precision arguments: 5184 // 5185 // t2: i32,ch = ... 5186 // t7: i16 = truncate t2 <~~~~ Op 5187 // t8: f16 = bitcast t7 <~~~~ N 5188 // 5189 if (Op.getOperand(0).getValueType() == MVT::i32) 5190 return DAG.getNode(ARMISD::VMOVhr, SDLoc(Op), 5191 MVT::f16, Op.getOperand(0)); 5192 5193 return SDValue(); 5194 } 5195 5196 // Half-precision return values 5197 if (SrcVT == MVT::f16 && DstVT == MVT::i16) { 5198 if (!HasFullFP16) 5199 return SDValue(); 5200 // 5201 // t11: f16 = fadd t8, t10 5202 // t12: i16 = bitcast t11 <~~~ SDNode N 5203 // t13: i32 = zero_extend t12 5204 // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13 5205 // t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1 5206 // 5207 // transform this into: 5208 // 5209 // t20: i32 = ARMISD::VMOVrh t11 5210 // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20 5211 // 5212 auto ZeroExtend = N->use_begin(); 5213 if (N->use_size() != 1 || ZeroExtend->getOpcode() != ISD::ZERO_EXTEND || 5214 ZeroExtend->getValueType(0) != MVT::i32) 5215 return SDValue(); 5216 5217 auto Copy = ZeroExtend->use_begin(); 5218 if (Copy->getOpcode() == ISD::CopyToReg && 5219 Copy->use_begin()->getOpcode() == ARMISD::RET_FLAG) { 5220 SDValue Cvt = DAG.getNode(ARMISD::VMOVrh, SDLoc(Op), MVT::i32, Op); 5221 DAG.ReplaceAllUsesWith(*ZeroExtend, &Cvt); 5222 return Cvt; 5223 } 5224 return SDValue(); 5225 } 5226 5227 if (!(SrcVT == MVT::i64 || DstVT == MVT::i64)) 5228 return SDValue(); 5229 5230 // Turn i64->f64 into VMOVDRR. 5231 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 5232 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 5233 // if we can combine the bitcast with its source. 5234 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 5235 return Val; 5236 5237 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5238 DAG.getConstant(0, dl, MVT::i32)); 5239 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 5240 DAG.getConstant(1, dl, MVT::i32)); 5241 return DAG.getNode(ISD::BITCAST, dl, DstVT, 5242 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 5243 } 5244 5245 // Turn f64->i64 into VMOVRRD. 5246 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 5247 SDValue Cvt; 5248 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 5249 SrcVT.getVectorNumElements() > 1) 5250 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 5251 DAG.getVTList(MVT::i32, MVT::i32), 5252 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 5253 else 5254 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 5255 DAG.getVTList(MVT::i32, MVT::i32), Op); 5256 // Merge the pieces into a single i64 value. 5257 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 5258 } 5259 5260 return SDValue(); 5261 } 5262 5263 /// getZeroVector - Returns a vector of specified type with all zero elements. 5264 /// Zero vectors are used to represent vector negation and in those cases 5265 /// will be implemented with the NEON VNEG instruction. However, VNEG does 5266 /// not support i64 elements, so sometimes the zero vectors will need to be 5267 /// explicitly constructed. Regardless, use a canonical VMOV to create the 5268 /// zero vector. 5269 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { 5270 assert(VT.isVector() && "Expected a vector type"); 5271 // The canonical modified immediate encoding of a zero vector is....0! 5272 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 5273 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 5274 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 5275 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 5276 } 5277 5278 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 5279 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 5280 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 5281 SelectionDAG &DAG) const { 5282 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 5283 EVT VT = Op.getValueType(); 5284 unsigned VTBits = VT.getSizeInBits(); 5285 SDLoc dl(Op); 5286 SDValue ShOpLo = Op.getOperand(0); 5287 SDValue ShOpHi = Op.getOperand(1); 5288 SDValue ShAmt = Op.getOperand(2); 5289 SDValue ARMcc; 5290 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5291 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 5292 5293 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 5294 5295 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 5296 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 5297 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 5298 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 5299 DAG.getConstant(VTBits, dl, MVT::i32)); 5300 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 5301 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 5302 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 5303 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 5304 ISD::SETGE, ARMcc, DAG, dl); 5305 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift, 5306 ARMcc, CCR, CmpLo); 5307 5308 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 5309 SDValue HiBigShift = Opc == ISD::SRA 5310 ? DAG.getNode(Opc, dl, VT, ShOpHi, 5311 DAG.getConstant(VTBits - 1, dl, VT)) 5312 : DAG.getConstant(0, dl, VT); 5313 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 5314 ISD::SETGE, ARMcc, DAG, dl); 5315 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 5316 ARMcc, CCR, CmpHi); 5317 5318 SDValue Ops[2] = { Lo, Hi }; 5319 return DAG.getMergeValues(Ops, dl); 5320 } 5321 5322 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 5323 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 5324 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 5325 SelectionDAG &DAG) const { 5326 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 5327 EVT VT = Op.getValueType(); 5328 unsigned VTBits = VT.getSizeInBits(); 5329 SDLoc dl(Op); 5330 SDValue ShOpLo = Op.getOperand(0); 5331 SDValue ShOpHi = Op.getOperand(1); 5332 SDValue ShAmt = Op.getOperand(2); 5333 SDValue ARMcc; 5334 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5335 5336 assert(Op.getOpcode() == ISD::SHL_PARTS); 5337 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 5338 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 5339 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 5340 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 5341 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 5342 5343 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 5344 DAG.getConstant(VTBits, dl, MVT::i32)); 5345 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 5346 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 5347 ISD::SETGE, ARMcc, DAG, dl); 5348 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift, 5349 ARMcc, CCR, CmpHi); 5350 5351 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 5352 ISD::SETGE, ARMcc, DAG, dl); 5353 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 5354 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, 5355 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo); 5356 5357 SDValue Ops[2] = { Lo, Hi }; 5358 return DAG.getMergeValues(Ops, dl); 5359 } 5360 5361 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 5362 SelectionDAG &DAG) const { 5363 // The rounding mode is in bits 23:22 of the FPSCR. 5364 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 5365 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 5366 // so that the shift + and get folded into a bitfield extract. 5367 SDLoc dl(Op); 5368 SDValue Ops[] = { DAG.getEntryNode(), 5369 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) }; 5370 5371 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops); 5372 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 5373 DAG.getConstant(1U << 22, dl, MVT::i32)); 5374 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 5375 DAG.getConstant(22, dl, MVT::i32)); 5376 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 5377 DAG.getConstant(3, dl, MVT::i32)); 5378 } 5379 5380 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 5381 const ARMSubtarget *ST) { 5382 SDLoc dl(N); 5383 EVT VT = N->getValueType(0); 5384 if (VT.isVector()) { 5385 assert(ST->hasNEON()); 5386 5387 // Compute the least significant set bit: LSB = X & -X 5388 SDValue X = N->getOperand(0); 5389 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 5390 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 5391 5392 EVT ElemTy = VT.getVectorElementType(); 5393 5394 if (ElemTy == MVT::i8) { 5395 // Compute with: cttz(x) = ctpop(lsb - 1) 5396 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5397 DAG.getTargetConstant(1, dl, ElemTy)); 5398 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 5399 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 5400 } 5401 5402 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 5403 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 5404 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 5405 unsigned NumBits = ElemTy.getSizeInBits(); 5406 SDValue WidthMinus1 = 5407 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5408 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 5409 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 5410 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 5411 } 5412 5413 // Compute with: cttz(x) = ctpop(lsb - 1) 5414 5415 // Compute LSB - 1. 5416 SDValue Bits; 5417 if (ElemTy == MVT::i64) { 5418 // Load constant 0xffff'ffff'ffff'ffff to register. 5419 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5420 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 5421 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 5422 } else { 5423 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5424 DAG.getTargetConstant(1, dl, ElemTy)); 5425 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 5426 } 5427 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 5428 } 5429 5430 if (!ST->hasV6T2Ops()) 5431 return SDValue(); 5432 5433 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 5434 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 5435 } 5436 5437 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 5438 const ARMSubtarget *ST) { 5439 EVT VT = N->getValueType(0); 5440 SDLoc DL(N); 5441 5442 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 5443 assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 || 5444 VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) && 5445 "Unexpected type for custom ctpop lowering"); 5446 5447 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5448 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 5449 SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0)); 5450 Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res); 5451 5452 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds. 5453 unsigned EltSize = 8; 5454 unsigned NumElts = VT.is64BitVector() ? 8 : 16; 5455 while (EltSize != VT.getScalarSizeInBits()) { 5456 SmallVector<SDValue, 8> Ops; 5457 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL, 5458 TLI.getPointerTy(DAG.getDataLayout()))); 5459 Ops.push_back(Res); 5460 5461 EltSize *= 2; 5462 NumElts /= 2; 5463 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts); 5464 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops); 5465 } 5466 5467 return Res; 5468 } 5469 5470 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 5471 const ARMSubtarget *ST) { 5472 EVT VT = N->getValueType(0); 5473 SDLoc dl(N); 5474 5475 if (!VT.isVector()) 5476 return SDValue(); 5477 5478 // Lower vector shifts on NEON to use VSHL. 5479 assert(ST->hasNEON() && "unexpected vector shift"); 5480 5481 // Left shifts translate directly to the vshiftu intrinsic. 5482 if (N->getOpcode() == ISD::SHL) 5483 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 5484 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, 5485 MVT::i32), 5486 N->getOperand(0), N->getOperand(1)); 5487 5488 assert((N->getOpcode() == ISD::SRA || 5489 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 5490 5491 // NEON uses the same intrinsics for both left and right shifts. For 5492 // right shifts, the shift amounts are negative, so negate the vector of 5493 // shift amounts. 5494 EVT ShiftVT = N->getOperand(1).getValueType(); 5495 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 5496 getZeroVector(ShiftVT, DAG, dl), 5497 N->getOperand(1)); 5498 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 5499 Intrinsic::arm_neon_vshifts : 5500 Intrinsic::arm_neon_vshiftu); 5501 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 5502 DAG.getConstant(vshiftInt, dl, MVT::i32), 5503 N->getOperand(0), NegatedCount); 5504 } 5505 5506 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 5507 const ARMSubtarget *ST) { 5508 EVT VT = N->getValueType(0); 5509 SDLoc dl(N); 5510 5511 // We can get here for a node like i32 = ISD::SHL i32, i64 5512 if (VT != MVT::i64) 5513 return SDValue(); 5514 5515 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 5516 "Unknown shift to lower!"); 5517 5518 // We only lower SRA, SRL of 1 here, all others use generic lowering. 5519 if (!isOneConstant(N->getOperand(1))) 5520 return SDValue(); 5521 5522 // If we are in thumb mode, we don't have RRX. 5523 if (ST->isThumb1Only()) return SDValue(); 5524 5525 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 5526 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 5527 DAG.getConstant(0, dl, MVT::i32)); 5528 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 5529 DAG.getConstant(1, dl, MVT::i32)); 5530 5531 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 5532 // captures the result into a carry flag. 5533 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 5534 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 5535 5536 // The low part is an ARMISD::RRX operand, which shifts the carry in. 5537 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 5538 5539 // Merge the pieces into a single i64 value. 5540 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5541 } 5542 5543 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 5544 SDValue TmpOp0, TmpOp1; 5545 bool Invert = false; 5546 bool Swap = false; 5547 unsigned Opc = 0; 5548 5549 SDValue Op0 = Op.getOperand(0); 5550 SDValue Op1 = Op.getOperand(1); 5551 SDValue CC = Op.getOperand(2); 5552 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 5553 EVT VT = Op.getValueType(); 5554 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 5555 SDLoc dl(Op); 5556 5557 if (Op0.getValueType().getVectorElementType() == MVT::i64 && 5558 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) { 5559 // Special-case integer 64-bit equality comparisons. They aren't legal, 5560 // but they can be lowered with a few vector instructions. 5561 unsigned CmpElements = CmpVT.getVectorNumElements() * 2; 5562 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements); 5563 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0); 5564 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1); 5565 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1, 5566 DAG.getCondCode(ISD::SETEQ)); 5567 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp); 5568 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed); 5569 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged); 5570 if (SetCCOpcode == ISD::SETNE) 5571 Merged = DAG.getNOT(dl, Merged, CmpVT); 5572 Merged = DAG.getSExtOrTrunc(Merged, dl, VT); 5573 return Merged; 5574 } 5575 5576 if (CmpVT.getVectorElementType() == MVT::i64) 5577 // 64-bit comparisons are not legal in general. 5578 return SDValue(); 5579 5580 if (Op1.getValueType().isFloatingPoint()) { 5581 switch (SetCCOpcode) { 5582 default: llvm_unreachable("Illegal FP comparison"); 5583 case ISD::SETUNE: 5584 case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH; 5585 case ISD::SETOEQ: 5586 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 5587 case ISD::SETOLT: 5588 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 5589 case ISD::SETOGT: 5590 case ISD::SETGT: Opc = ARMISD::VCGT; break; 5591 case ISD::SETOLE: 5592 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 5593 case ISD::SETOGE: 5594 case ISD::SETGE: Opc = ARMISD::VCGE; break; 5595 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; 5596 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 5597 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; 5598 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 5599 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; 5600 case ISD::SETONE: 5601 // Expand this to (OLT | OGT). 5602 TmpOp0 = Op0; 5603 TmpOp1 = Op1; 5604 Opc = ISD::OR; 5605 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 5606 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); 5607 break; 5608 case ISD::SETUO: 5609 Invert = true; 5610 LLVM_FALLTHROUGH; 5611 case ISD::SETO: 5612 // Expand this to (OLT | OGE). 5613 TmpOp0 = Op0; 5614 TmpOp1 = Op1; 5615 Opc = ISD::OR; 5616 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 5617 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); 5618 break; 5619 } 5620 } else { 5621 // Integer comparisons. 5622 switch (SetCCOpcode) { 5623 default: llvm_unreachable("Illegal integer comparison"); 5624 case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH; 5625 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 5626 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 5627 case ISD::SETGT: Opc = ARMISD::VCGT; break; 5628 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 5629 case ISD::SETGE: Opc = ARMISD::VCGE; break; 5630 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH; 5631 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 5632 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH; 5633 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 5634 } 5635 5636 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 5637 if (Opc == ARMISD::VCEQ) { 5638 SDValue AndOp; 5639 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 5640 AndOp = Op0; 5641 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 5642 AndOp = Op1; 5643 5644 // Ignore bitconvert. 5645 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 5646 AndOp = AndOp.getOperand(0); 5647 5648 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 5649 Opc = ARMISD::VTST; 5650 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 5651 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 5652 Invert = !Invert; 5653 } 5654 } 5655 } 5656 5657 if (Swap) 5658 std::swap(Op0, Op1); 5659 5660 // If one of the operands is a constant vector zero, attempt to fold the 5661 // comparison to a specialized compare-against-zero form. 5662 SDValue SingleOp; 5663 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 5664 SingleOp = Op0; 5665 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 5666 if (Opc == ARMISD::VCGE) 5667 Opc = ARMISD::VCLEZ; 5668 else if (Opc == ARMISD::VCGT) 5669 Opc = ARMISD::VCLTZ; 5670 SingleOp = Op1; 5671 } 5672 5673 SDValue Result; 5674 if (SingleOp.getNode()) { 5675 switch (Opc) { 5676 case ARMISD::VCEQ: 5677 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; 5678 case ARMISD::VCGE: 5679 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; 5680 case ARMISD::VCLEZ: 5681 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; 5682 case ARMISD::VCGT: 5683 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; 5684 case ARMISD::VCLTZ: 5685 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; 5686 default: 5687 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 5688 } 5689 } else { 5690 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 5691 } 5692 5693 Result = DAG.getSExtOrTrunc(Result, dl, VT); 5694 5695 if (Invert) 5696 Result = DAG.getNOT(dl, Result, VT); 5697 5698 return Result; 5699 } 5700 5701 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { 5702 SDValue LHS = Op.getOperand(0); 5703 SDValue RHS = Op.getOperand(1); 5704 SDValue Carry = Op.getOperand(2); 5705 SDValue Cond = Op.getOperand(3); 5706 SDLoc DL(Op); 5707 5708 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."); 5709 5710 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 5711 // have to invert the carry first. 5712 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 5713 DAG.getConstant(1, DL, MVT::i32), Carry); 5714 // This converts the boolean value carry into the carry flag. 5715 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 5716 5717 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 5718 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 5719 5720 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 5721 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 5722 SDValue ARMcc = DAG.getConstant( 5723 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 5724 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5725 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 5726 Cmp.getValue(1), SDValue()); 5727 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 5728 CCR, Chain.getValue(1)); 5729 } 5730 5731 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 5732 /// valid vector constant for a NEON instruction with a "modified immediate" 5733 /// operand (e.g., VMOV). If so, return the encoded value. 5734 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 5735 unsigned SplatBitSize, SelectionDAG &DAG, 5736 const SDLoc &dl, EVT &VT, bool is128Bits, 5737 NEONModImmType type) { 5738 unsigned OpCmode, Imm; 5739 5740 // SplatBitSize is set to the smallest size that splats the vector, so a 5741 // zero vector will always have SplatBitSize == 8. However, NEON modified 5742 // immediate instructions others than VMOV do not support the 8-bit encoding 5743 // of a zero vector, and the default encoding of zero is supposed to be the 5744 // 32-bit version. 5745 if (SplatBits == 0) 5746 SplatBitSize = 32; 5747 5748 switch (SplatBitSize) { 5749 case 8: 5750 if (type != VMOVModImm) 5751 return SDValue(); 5752 // Any 1-byte value is OK. Op=0, Cmode=1110. 5753 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 5754 OpCmode = 0xe; 5755 Imm = SplatBits; 5756 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 5757 break; 5758 5759 case 16: 5760 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 5761 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 5762 if ((SplatBits & ~0xff) == 0) { 5763 // Value = 0x00nn: Op=x, Cmode=100x. 5764 OpCmode = 0x8; 5765 Imm = SplatBits; 5766 break; 5767 } 5768 if ((SplatBits & ~0xff00) == 0) { 5769 // Value = 0xnn00: Op=x, Cmode=101x. 5770 OpCmode = 0xa; 5771 Imm = SplatBits >> 8; 5772 break; 5773 } 5774 return SDValue(); 5775 5776 case 32: 5777 // NEON's 32-bit VMOV supports splat values where: 5778 // * only one byte is nonzero, or 5779 // * the least significant byte is 0xff and the second byte is nonzero, or 5780 // * the least significant 2 bytes are 0xff and the third is nonzero. 5781 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 5782 if ((SplatBits & ~0xff) == 0) { 5783 // Value = 0x000000nn: Op=x, Cmode=000x. 5784 OpCmode = 0; 5785 Imm = SplatBits; 5786 break; 5787 } 5788 if ((SplatBits & ~0xff00) == 0) { 5789 // Value = 0x0000nn00: Op=x, Cmode=001x. 5790 OpCmode = 0x2; 5791 Imm = SplatBits >> 8; 5792 break; 5793 } 5794 if ((SplatBits & ~0xff0000) == 0) { 5795 // Value = 0x00nn0000: Op=x, Cmode=010x. 5796 OpCmode = 0x4; 5797 Imm = SplatBits >> 16; 5798 break; 5799 } 5800 if ((SplatBits & ~0xff000000) == 0) { 5801 // Value = 0xnn000000: Op=x, Cmode=011x. 5802 OpCmode = 0x6; 5803 Imm = SplatBits >> 24; 5804 break; 5805 } 5806 5807 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 5808 if (type == OtherModImm) return SDValue(); 5809 5810 if ((SplatBits & ~0xffff) == 0 && 5811 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 5812 // Value = 0x0000nnff: Op=x, Cmode=1100. 5813 OpCmode = 0xc; 5814 Imm = SplatBits >> 8; 5815 break; 5816 } 5817 5818 if ((SplatBits & ~0xffffff) == 0 && 5819 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 5820 // Value = 0x00nnffff: Op=x, Cmode=1101. 5821 OpCmode = 0xd; 5822 Imm = SplatBits >> 16; 5823 break; 5824 } 5825 5826 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 5827 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 5828 // VMOV.I32. A (very) minor optimization would be to replicate the value 5829 // and fall through here to test for a valid 64-bit splat. But, then the 5830 // caller would also need to check and handle the change in size. 5831 return SDValue(); 5832 5833 case 64: { 5834 if (type != VMOVModImm) 5835 return SDValue(); 5836 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 5837 uint64_t BitMask = 0xff; 5838 uint64_t Val = 0; 5839 unsigned ImmMask = 1; 5840 Imm = 0; 5841 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 5842 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 5843 Val |= BitMask; 5844 Imm |= ImmMask; 5845 } else if ((SplatBits & BitMask) != 0) { 5846 return SDValue(); 5847 } 5848 BitMask <<= 8; 5849 ImmMask <<= 1; 5850 } 5851 5852 if (DAG.getDataLayout().isBigEndian()) 5853 // swap higher and lower 32 bit word 5854 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); 5855 5856 // Op=1, Cmode=1110. 5857 OpCmode = 0x1e; 5858 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 5859 break; 5860 } 5861 5862 default: 5863 llvm_unreachable("unexpected size for isNEONModifiedImm"); 5864 } 5865 5866 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 5867 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 5868 } 5869 5870 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 5871 const ARMSubtarget *ST) const { 5872 EVT VT = Op.getValueType(); 5873 bool IsDouble = (VT == MVT::f64); 5874 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 5875 const APFloat &FPVal = CFP->getValueAPF(); 5876 5877 // Prevent floating-point constants from using literal loads 5878 // when execute-only is enabled. 5879 if (ST->genExecuteOnly()) { 5880 // If we can represent the constant as an immediate, don't lower it 5881 if (isFPImmLegal(FPVal, VT)) 5882 return Op; 5883 // Otherwise, construct as integer, and move to float register 5884 APInt INTVal = FPVal.bitcastToAPInt(); 5885 SDLoc DL(CFP); 5886 switch (VT.getSimpleVT().SimpleTy) { 5887 default: 5888 llvm_unreachable("Unknown floating point type!"); 5889 break; 5890 case MVT::f64: { 5891 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32); 5892 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32); 5893 if (!ST->isLittle()) 5894 std::swap(Lo, Hi); 5895 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi); 5896 } 5897 case MVT::f32: 5898 return DAG.getNode(ARMISD::VMOVSR, DL, VT, 5899 DAG.getConstant(INTVal, DL, MVT::i32)); 5900 } 5901 } 5902 5903 if (!ST->hasVFP3()) 5904 return SDValue(); 5905 5906 // Use the default (constant pool) lowering for double constants when we have 5907 // an SP-only FPU 5908 if (IsDouble && Subtarget->isFPOnlySP()) 5909 return SDValue(); 5910 5911 // Try splatting with a VMOV.f32... 5912 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 5913 5914 if (ImmVal != -1) { 5915 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 5916 // We have code in place to select a valid ConstantFP already, no need to 5917 // do any mangling. 5918 return Op; 5919 } 5920 5921 // It's a float and we are trying to use NEON operations where 5922 // possible. Lower it to a splat followed by an extract. 5923 SDLoc DL(Op); 5924 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 5925 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 5926 NewVal); 5927 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 5928 DAG.getConstant(0, DL, MVT::i32)); 5929 } 5930 5931 // The rest of our options are NEON only, make sure that's allowed before 5932 // proceeding.. 5933 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 5934 return SDValue(); 5935 5936 EVT VMovVT; 5937 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 5938 5939 // It wouldn't really be worth bothering for doubles except for one very 5940 // important value, which does happen to match: 0.0. So make sure we don't do 5941 // anything stupid. 5942 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 5943 return SDValue(); 5944 5945 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 5946 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 5947 VMovVT, false, VMOVModImm); 5948 if (NewVal != SDValue()) { 5949 SDLoc DL(Op); 5950 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 5951 NewVal); 5952 if (IsDouble) 5953 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5954 5955 // It's a float: cast and extract a vector element. 5956 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5957 VecConstant); 5958 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5959 DAG.getConstant(0, DL, MVT::i32)); 5960 } 5961 5962 // Finally, try a VMVN.i32 5963 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 5964 false, VMVNModImm); 5965 if (NewVal != SDValue()) { 5966 SDLoc DL(Op); 5967 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 5968 5969 if (IsDouble) 5970 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5971 5972 // It's a float: cast and extract a vector element. 5973 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5974 VecConstant); 5975 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5976 DAG.getConstant(0, DL, MVT::i32)); 5977 } 5978 5979 return SDValue(); 5980 } 5981 5982 // check if an VEXT instruction can handle the shuffle mask when the 5983 // vector sources of the shuffle are the same. 5984 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 5985 unsigned NumElts = VT.getVectorNumElements(); 5986 5987 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5988 if (M[0] < 0) 5989 return false; 5990 5991 Imm = M[0]; 5992 5993 // If this is a VEXT shuffle, the immediate value is the index of the first 5994 // element. The other shuffle indices must be the successive elements after 5995 // the first one. 5996 unsigned ExpectedElt = Imm; 5997 for (unsigned i = 1; i < NumElts; ++i) { 5998 // Increment the expected index. If it wraps around, just follow it 5999 // back to index zero and keep going. 6000 ++ExpectedElt; 6001 if (ExpectedElt == NumElts) 6002 ExpectedElt = 0; 6003 6004 if (M[i] < 0) continue; // ignore UNDEF indices 6005 if (ExpectedElt != static_cast<unsigned>(M[i])) 6006 return false; 6007 } 6008 6009 return true; 6010 } 6011 6012 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 6013 bool &ReverseVEXT, unsigned &Imm) { 6014 unsigned NumElts = VT.getVectorNumElements(); 6015 ReverseVEXT = false; 6016 6017 // Assume that the first shuffle index is not UNDEF. Fail if it is. 6018 if (M[0] < 0) 6019 return false; 6020 6021 Imm = M[0]; 6022 6023 // If this is a VEXT shuffle, the immediate value is the index of the first 6024 // element. The other shuffle indices must be the successive elements after 6025 // the first one. 6026 unsigned ExpectedElt = Imm; 6027 for (unsigned i = 1; i < NumElts; ++i) { 6028 // Increment the expected index. If it wraps around, it may still be 6029 // a VEXT but the source vectors must be swapped. 6030 ExpectedElt += 1; 6031 if (ExpectedElt == NumElts * 2) { 6032 ExpectedElt = 0; 6033 ReverseVEXT = true; 6034 } 6035 6036 if (M[i] < 0) continue; // ignore UNDEF indices 6037 if (ExpectedElt != static_cast<unsigned>(M[i])) 6038 return false; 6039 } 6040 6041 // Adjust the index value if the source operands will be swapped. 6042 if (ReverseVEXT) 6043 Imm -= NumElts; 6044 6045 return true; 6046 } 6047 6048 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 6049 /// instruction with the specified blocksize. (The order of the elements 6050 /// within each block of the vector is reversed.) 6051 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 6052 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 6053 "Only possible block sizes for VREV are: 16, 32, 64"); 6054 6055 unsigned EltSz = VT.getScalarSizeInBits(); 6056 if (EltSz == 64) 6057 return false; 6058 6059 unsigned NumElts = VT.getVectorNumElements(); 6060 unsigned BlockElts = M[0] + 1; 6061 // If the first shuffle index is UNDEF, be optimistic. 6062 if (M[0] < 0) 6063 BlockElts = BlockSize / EltSz; 6064 6065 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 6066 return false; 6067 6068 for (unsigned i = 0; i < NumElts; ++i) { 6069 if (M[i] < 0) continue; // ignore UNDEF indices 6070 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 6071 return false; 6072 } 6073 6074 return true; 6075 } 6076 6077 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 6078 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 6079 // range, then 0 is placed into the resulting vector. So pretty much any mask 6080 // of 8 elements can work here. 6081 return VT == MVT::v8i8 && M.size() == 8; 6082 } 6083 6084 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask, 6085 unsigned Index) { 6086 if (Mask.size() == Elements * 2) 6087 return Index / Elements; 6088 return Mask[Index] == 0 ? 0 : 1; 6089 } 6090 6091 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 6092 // checking that pairs of elements in the shuffle mask represent the same index 6093 // in each vector, incrementing the expected index by 2 at each step. 6094 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 6095 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 6096 // v2={e,f,g,h} 6097 // WhichResult gives the offset for each element in the mask based on which 6098 // of the two results it belongs to. 6099 // 6100 // The transpose can be represented either as: 6101 // result1 = shufflevector v1, v2, result1_shuffle_mask 6102 // result2 = shufflevector v1, v2, result2_shuffle_mask 6103 // where v1/v2 and the shuffle masks have the same number of elements 6104 // (here WhichResult (see below) indicates which result is being checked) 6105 // 6106 // or as: 6107 // results = shufflevector v1, v2, shuffle_mask 6108 // where both results are returned in one vector and the shuffle mask has twice 6109 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 6110 // want to check the low half and high half of the shuffle mask as if it were 6111 // the other case 6112 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6113 unsigned EltSz = VT.getScalarSizeInBits(); 6114 if (EltSz == 64) 6115 return false; 6116 6117 unsigned NumElts = VT.getVectorNumElements(); 6118 if (M.size() != NumElts && M.size() != NumElts*2) 6119 return false; 6120 6121 // If the mask is twice as long as the input vector then we need to check the 6122 // upper and lower parts of the mask with a matching value for WhichResult 6123 // FIXME: A mask with only even values will be rejected in case the first 6124 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 6125 // M[0] is used to determine WhichResult 6126 for (unsigned i = 0; i < M.size(); i += NumElts) { 6127 WhichResult = SelectPairHalf(NumElts, M, i); 6128 for (unsigned j = 0; j < NumElts; j += 2) { 6129 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 6130 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 6131 return false; 6132 } 6133 } 6134 6135 if (M.size() == NumElts*2) 6136 WhichResult = 0; 6137 6138 return true; 6139 } 6140 6141 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 6142 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6143 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 6144 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 6145 unsigned EltSz = VT.getScalarSizeInBits(); 6146 if (EltSz == 64) 6147 return false; 6148 6149 unsigned NumElts = VT.getVectorNumElements(); 6150 if (M.size() != NumElts && M.size() != NumElts*2) 6151 return false; 6152 6153 for (unsigned i = 0; i < M.size(); i += NumElts) { 6154 WhichResult = SelectPairHalf(NumElts, M, i); 6155 for (unsigned j = 0; j < NumElts; j += 2) { 6156 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 6157 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 6158 return false; 6159 } 6160 } 6161 6162 if (M.size() == NumElts*2) 6163 WhichResult = 0; 6164 6165 return true; 6166 } 6167 6168 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 6169 // that the mask elements are either all even and in steps of size 2 or all odd 6170 // and in steps of size 2. 6171 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 6172 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 6173 // v2={e,f,g,h} 6174 // Requires similar checks to that of isVTRNMask with 6175 // respect the how results are returned. 6176 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6177 unsigned EltSz = VT.getScalarSizeInBits(); 6178 if (EltSz == 64) 6179 return false; 6180 6181 unsigned NumElts = VT.getVectorNumElements(); 6182 if (M.size() != NumElts && M.size() != NumElts*2) 6183 return false; 6184 6185 for (unsigned i = 0; i < M.size(); i += NumElts) { 6186 WhichResult = SelectPairHalf(NumElts, M, i); 6187 for (unsigned j = 0; j < NumElts; ++j) { 6188 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 6189 return false; 6190 } 6191 } 6192 6193 if (M.size() == NumElts*2) 6194 WhichResult = 0; 6195 6196 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 6197 if (VT.is64BitVector() && EltSz == 32) 6198 return false; 6199 6200 return true; 6201 } 6202 6203 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 6204 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6205 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 6206 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 6207 unsigned EltSz = VT.getScalarSizeInBits(); 6208 if (EltSz == 64) 6209 return false; 6210 6211 unsigned NumElts = VT.getVectorNumElements(); 6212 if (M.size() != NumElts && M.size() != NumElts*2) 6213 return false; 6214 6215 unsigned Half = NumElts / 2; 6216 for (unsigned i = 0; i < M.size(); i += NumElts) { 6217 WhichResult = SelectPairHalf(NumElts, M, i); 6218 for (unsigned j = 0; j < NumElts; j += Half) { 6219 unsigned Idx = WhichResult; 6220 for (unsigned k = 0; k < Half; ++k) { 6221 int MIdx = M[i + j + k]; 6222 if (MIdx >= 0 && (unsigned) MIdx != Idx) 6223 return false; 6224 Idx += 2; 6225 } 6226 } 6227 } 6228 6229 if (M.size() == NumElts*2) 6230 WhichResult = 0; 6231 6232 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 6233 if (VT.is64BitVector() && EltSz == 32) 6234 return false; 6235 6236 return true; 6237 } 6238 6239 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 6240 // that pairs of elements of the shufflemask represent the same index in each 6241 // vector incrementing sequentially through the vectors. 6242 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 6243 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 6244 // v2={e,f,g,h} 6245 // Requires similar checks to that of isVTRNMask with respect the how results 6246 // are returned. 6247 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6248 unsigned EltSz = VT.getScalarSizeInBits(); 6249 if (EltSz == 64) 6250 return false; 6251 6252 unsigned NumElts = VT.getVectorNumElements(); 6253 if (M.size() != NumElts && M.size() != NumElts*2) 6254 return false; 6255 6256 for (unsigned i = 0; i < M.size(); i += NumElts) { 6257 WhichResult = SelectPairHalf(NumElts, M, i); 6258 unsigned Idx = WhichResult * NumElts / 2; 6259 for (unsigned j = 0; j < NumElts; j += 2) { 6260 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 6261 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 6262 return false; 6263 Idx += 1; 6264 } 6265 } 6266 6267 if (M.size() == NumElts*2) 6268 WhichResult = 0; 6269 6270 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 6271 if (VT.is64BitVector() && EltSz == 32) 6272 return false; 6273 6274 return true; 6275 } 6276 6277 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 6278 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6279 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 6280 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 6281 unsigned EltSz = VT.getScalarSizeInBits(); 6282 if (EltSz == 64) 6283 return false; 6284 6285 unsigned NumElts = VT.getVectorNumElements(); 6286 if (M.size() != NumElts && M.size() != NumElts*2) 6287 return false; 6288 6289 for (unsigned i = 0; i < M.size(); i += NumElts) { 6290 WhichResult = SelectPairHalf(NumElts, M, i); 6291 unsigned Idx = WhichResult * NumElts / 2; 6292 for (unsigned j = 0; j < NumElts; j += 2) { 6293 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 6294 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 6295 return false; 6296 Idx += 1; 6297 } 6298 } 6299 6300 if (M.size() == NumElts*2) 6301 WhichResult = 0; 6302 6303 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 6304 if (VT.is64BitVector() && EltSz == 32) 6305 return false; 6306 6307 return true; 6308 } 6309 6310 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 6311 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 6312 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 6313 unsigned &WhichResult, 6314 bool &isV_UNDEF) { 6315 isV_UNDEF = false; 6316 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 6317 return ARMISD::VTRN; 6318 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 6319 return ARMISD::VUZP; 6320 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 6321 return ARMISD::VZIP; 6322 6323 isV_UNDEF = true; 6324 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 6325 return ARMISD::VTRN; 6326 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 6327 return ARMISD::VUZP; 6328 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 6329 return ARMISD::VZIP; 6330 6331 return 0; 6332 } 6333 6334 /// \return true if this is a reverse operation on an vector. 6335 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 6336 unsigned NumElts = VT.getVectorNumElements(); 6337 // Make sure the mask has the right size. 6338 if (NumElts != M.size()) 6339 return false; 6340 6341 // Look for <15, ..., 3, -1, 1, 0>. 6342 for (unsigned i = 0; i != NumElts; ++i) 6343 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 6344 return false; 6345 6346 return true; 6347 } 6348 6349 // If N is an integer constant that can be moved into a register in one 6350 // instruction, return an SDValue of such a constant (will become a MOV 6351 // instruction). Otherwise return null. 6352 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 6353 const ARMSubtarget *ST, const SDLoc &dl) { 6354 uint64_t Val; 6355 if (!isa<ConstantSDNode>(N)) 6356 return SDValue(); 6357 Val = cast<ConstantSDNode>(N)->getZExtValue(); 6358 6359 if (ST->isThumb1Only()) { 6360 if (Val <= 255 || ~Val <= 255) 6361 return DAG.getConstant(Val, dl, MVT::i32); 6362 } else { 6363 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 6364 return DAG.getConstant(Val, dl, MVT::i32); 6365 } 6366 return SDValue(); 6367 } 6368 6369 // If this is a case we can't handle, return null and let the default 6370 // expansion code take care of it. 6371 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 6372 const ARMSubtarget *ST) const { 6373 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 6374 SDLoc dl(Op); 6375 EVT VT = Op.getValueType(); 6376 6377 APInt SplatBits, SplatUndef; 6378 unsigned SplatBitSize; 6379 bool HasAnyUndefs; 6380 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6381 if (SplatUndef.isAllOnesValue()) 6382 return DAG.getUNDEF(VT); 6383 6384 if (SplatBitSize <= 64) { 6385 // Check if an immediate VMOV works. 6386 EVT VmovVT; 6387 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6388 SplatUndef.getZExtValue(), SplatBitSize, 6389 DAG, dl, VmovVT, VT.is128BitVector(), 6390 VMOVModImm); 6391 if (Val.getNode()) { 6392 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 6393 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6394 } 6395 6396 // Try an immediate VMVN. 6397 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 6398 Val = isNEONModifiedImm(NegatedImm, 6399 SplatUndef.getZExtValue(), SplatBitSize, 6400 DAG, dl, VmovVT, VT.is128BitVector(), 6401 VMVNModImm); 6402 if (Val.getNode()) { 6403 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 6404 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6405 } 6406 6407 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 6408 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 6409 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 6410 if (ImmVal != -1) { 6411 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 6412 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 6413 } 6414 } 6415 } 6416 } 6417 6418 // Scan through the operands to see if only one value is used. 6419 // 6420 // As an optimisation, even if more than one value is used it may be more 6421 // profitable to splat with one value then change some lanes. 6422 // 6423 // Heuristically we decide to do this if the vector has a "dominant" value, 6424 // defined as splatted to more than half of the lanes. 6425 unsigned NumElts = VT.getVectorNumElements(); 6426 bool isOnlyLowElement = true; 6427 bool usesOnlyOneValue = true; 6428 bool hasDominantValue = false; 6429 bool isConstant = true; 6430 6431 // Map of the number of times a particular SDValue appears in the 6432 // element list. 6433 DenseMap<SDValue, unsigned> ValueCounts; 6434 SDValue Value; 6435 for (unsigned i = 0; i < NumElts; ++i) { 6436 SDValue V = Op.getOperand(i); 6437 if (V.isUndef()) 6438 continue; 6439 if (i > 0) 6440 isOnlyLowElement = false; 6441 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 6442 isConstant = false; 6443 6444 ValueCounts.insert(std::make_pair(V, 0)); 6445 unsigned &Count = ValueCounts[V]; 6446 6447 // Is this value dominant? (takes up more than half of the lanes) 6448 if (++Count > (NumElts / 2)) { 6449 hasDominantValue = true; 6450 Value = V; 6451 } 6452 } 6453 if (ValueCounts.size() != 1) 6454 usesOnlyOneValue = false; 6455 if (!Value.getNode() && !ValueCounts.empty()) 6456 Value = ValueCounts.begin()->first; 6457 6458 if (ValueCounts.empty()) 6459 return DAG.getUNDEF(VT); 6460 6461 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 6462 // Keep going if we are hitting this case. 6463 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 6464 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 6465 6466 unsigned EltSize = VT.getScalarSizeInBits(); 6467 6468 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 6469 // i32 and try again. 6470 if (hasDominantValue && EltSize <= 32) { 6471 if (!isConstant) { 6472 SDValue N; 6473 6474 // If we are VDUPing a value that comes directly from a vector, that will 6475 // cause an unnecessary move to and from a GPR, where instead we could 6476 // just use VDUPLANE. We can only do this if the lane being extracted 6477 // is at a constant index, as the VDUP from lane instructions only have 6478 // constant-index forms. 6479 ConstantSDNode *constIndex; 6480 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6481 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 6482 // We need to create a new undef vector to use for the VDUPLANE if the 6483 // size of the vector from which we get the value is different than the 6484 // size of the vector that we need to create. We will insert the element 6485 // such that the register coalescer will remove unnecessary copies. 6486 if (VT != Value->getOperand(0).getValueType()) { 6487 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 6488 VT.getVectorNumElements(); 6489 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6490 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 6491 Value, DAG.getConstant(index, dl, MVT::i32)), 6492 DAG.getConstant(index, dl, MVT::i32)); 6493 } else 6494 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6495 Value->getOperand(0), Value->getOperand(1)); 6496 } else 6497 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 6498 6499 if (!usesOnlyOneValue) { 6500 // The dominant value was splatted as 'N', but we now have to insert 6501 // all differing elements. 6502 for (unsigned I = 0; I < NumElts; ++I) { 6503 if (Op.getOperand(I) == Value) 6504 continue; 6505 SmallVector<SDValue, 3> Ops; 6506 Ops.push_back(N); 6507 Ops.push_back(Op.getOperand(I)); 6508 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 6509 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 6510 } 6511 } 6512 return N; 6513 } 6514 if (VT.getVectorElementType().isFloatingPoint()) { 6515 SmallVector<SDValue, 8> Ops; 6516 for (unsigned i = 0; i < NumElts; ++i) 6517 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 6518 Op.getOperand(i))); 6519 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 6520 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 6521 Val = LowerBUILD_VECTOR(Val, DAG, ST); 6522 if (Val.getNode()) 6523 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6524 } 6525 if (usesOnlyOneValue) { 6526 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 6527 if (isConstant && Val.getNode()) 6528 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 6529 } 6530 } 6531 6532 // If all elements are constants and the case above didn't get hit, fall back 6533 // to the default expansion, which will generate a load from the constant 6534 // pool. 6535 if (isConstant) 6536 return SDValue(); 6537 6538 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 6539 if (NumElts >= 4) { 6540 SDValue shuffle = ReconstructShuffle(Op, DAG); 6541 if (shuffle != SDValue()) 6542 return shuffle; 6543 } 6544 6545 if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) { 6546 // If we haven't found an efficient lowering, try splitting a 128-bit vector 6547 // into two 64-bit vectors; we might discover a better way to lower it. 6548 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts); 6549 EVT ExtVT = VT.getVectorElementType(); 6550 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2); 6551 SDValue Lower = 6552 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2)); 6553 if (Lower.getOpcode() == ISD::BUILD_VECTOR) 6554 Lower = LowerBUILD_VECTOR(Lower, DAG, ST); 6555 SDValue Upper = DAG.getBuildVector( 6556 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2)); 6557 if (Upper.getOpcode() == ISD::BUILD_VECTOR) 6558 Upper = LowerBUILD_VECTOR(Upper, DAG, ST); 6559 if (Lower && Upper) 6560 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper); 6561 } 6562 6563 // Vectors with 32- or 64-bit elements can be built by directly assigning 6564 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 6565 // will be legalized. 6566 if (EltSize >= 32) { 6567 // Do the expansion with floating-point types, since that is what the VFP 6568 // registers are defined to use, and since i64 is not legal. 6569 EVT EltVT = EVT::getFloatingPointVT(EltSize); 6570 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 6571 SmallVector<SDValue, 8> Ops; 6572 for (unsigned i = 0; i < NumElts; ++i) 6573 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 6574 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 6575 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6576 } 6577 6578 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 6579 // know the default expansion would otherwise fall back on something even 6580 // worse. For a vector with one or two non-undef values, that's 6581 // scalar_to_vector for the elements followed by a shuffle (provided the 6582 // shuffle is valid for the target) and materialization element by element 6583 // on the stack followed by a load for everything else. 6584 if (!isConstant && !usesOnlyOneValue) { 6585 SDValue Vec = DAG.getUNDEF(VT); 6586 for (unsigned i = 0 ; i < NumElts; ++i) { 6587 SDValue V = Op.getOperand(i); 6588 if (V.isUndef()) 6589 continue; 6590 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 6591 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 6592 } 6593 return Vec; 6594 } 6595 6596 return SDValue(); 6597 } 6598 6599 // Gather data to see if the operation can be modelled as a 6600 // shuffle in combination with VEXTs. 6601 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 6602 SelectionDAG &DAG) const { 6603 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 6604 SDLoc dl(Op); 6605 EVT VT = Op.getValueType(); 6606 unsigned NumElts = VT.getVectorNumElements(); 6607 6608 struct ShuffleSourceInfo { 6609 SDValue Vec; 6610 unsigned MinElt = std::numeric_limits<unsigned>::max(); 6611 unsigned MaxElt = 0; 6612 6613 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 6614 // be compatible with the shuffle we intend to construct. As a result 6615 // ShuffleVec will be some sliding window into the original Vec. 6616 SDValue ShuffleVec; 6617 6618 // Code should guarantee that element i in Vec starts at element "WindowBase 6619 // + i * WindowScale in ShuffleVec". 6620 int WindowBase = 0; 6621 int WindowScale = 1; 6622 6623 ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {} 6624 6625 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 6626 }; 6627 6628 // First gather all vectors used as an immediate source for this BUILD_VECTOR 6629 // node. 6630 SmallVector<ShuffleSourceInfo, 2> Sources; 6631 for (unsigned i = 0; i < NumElts; ++i) { 6632 SDValue V = Op.getOperand(i); 6633 if (V.isUndef()) 6634 continue; 6635 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 6636 // A shuffle can only come from building a vector from various 6637 // elements of other vectors. 6638 return SDValue(); 6639 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 6640 // Furthermore, shuffles require a constant mask, whereas extractelts 6641 // accept variable indices. 6642 return SDValue(); 6643 } 6644 6645 // Add this element source to the list if it's not already there. 6646 SDValue SourceVec = V.getOperand(0); 6647 auto Source = llvm::find(Sources, SourceVec); 6648 if (Source == Sources.end()) 6649 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 6650 6651 // Update the minimum and maximum lane number seen. 6652 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 6653 Source->MinElt = std::min(Source->MinElt, EltNo); 6654 Source->MaxElt = std::max(Source->MaxElt, EltNo); 6655 } 6656 6657 // Currently only do something sane when at most two source vectors 6658 // are involved. 6659 if (Sources.size() > 2) 6660 return SDValue(); 6661 6662 // Find out the smallest element size among result and two sources, and use 6663 // it as element size to build the shuffle_vector. 6664 EVT SmallestEltTy = VT.getVectorElementType(); 6665 for (auto &Source : Sources) { 6666 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 6667 if (SrcEltTy.bitsLT(SmallestEltTy)) 6668 SmallestEltTy = SrcEltTy; 6669 } 6670 unsigned ResMultiplier = 6671 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); 6672 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 6673 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 6674 6675 // If the source vector is too wide or too narrow, we may nevertheless be able 6676 // to construct a compatible shuffle either by concatenating it with UNDEF or 6677 // extracting a suitable range of elements. 6678 for (auto &Src : Sources) { 6679 EVT SrcVT = Src.ShuffleVec.getValueType(); 6680 6681 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 6682 continue; 6683 6684 // This stage of the search produces a source with the same element type as 6685 // the original, but with a total width matching the BUILD_VECTOR output. 6686 EVT EltVT = SrcVT.getVectorElementType(); 6687 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 6688 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 6689 6690 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 6691 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) 6692 return SDValue(); 6693 // We can pad out the smaller vector for free, so if it's part of a 6694 // shuffle... 6695 Src.ShuffleVec = 6696 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 6697 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 6698 continue; 6699 } 6700 6701 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) 6702 return SDValue(); 6703 6704 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 6705 // Span too large for a VEXT to cope 6706 return SDValue(); 6707 } 6708 6709 if (Src.MinElt >= NumSrcElts) { 6710 // The extraction can just take the second half 6711 Src.ShuffleVec = 6712 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6713 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 6714 Src.WindowBase = -NumSrcElts; 6715 } else if (Src.MaxElt < NumSrcElts) { 6716 // The extraction can just take the first half 6717 Src.ShuffleVec = 6718 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6719 DAG.getConstant(0, dl, MVT::i32)); 6720 } else { 6721 // An actual VEXT is needed 6722 SDValue VEXTSrc1 = 6723 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6724 DAG.getConstant(0, dl, MVT::i32)); 6725 SDValue VEXTSrc2 = 6726 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6727 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 6728 6729 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 6730 VEXTSrc2, 6731 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 6732 Src.WindowBase = -Src.MinElt; 6733 } 6734 } 6735 6736 // Another possible incompatibility occurs from the vector element types. We 6737 // can fix this by bitcasting the source vectors to the same type we intend 6738 // for the shuffle. 6739 for (auto &Src : Sources) { 6740 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 6741 if (SrcEltTy == SmallestEltTy) 6742 continue; 6743 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 6744 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); 6745 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 6746 Src.WindowBase *= Src.WindowScale; 6747 } 6748 6749 // Final sanity check before we try to actually produce a shuffle. 6750 LLVM_DEBUG(for (auto Src 6751 : Sources) 6752 assert(Src.ShuffleVec.getValueType() == ShuffleVT);); 6753 6754 // The stars all align, our next step is to produce the mask for the shuffle. 6755 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 6756 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); 6757 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 6758 SDValue Entry = Op.getOperand(i); 6759 if (Entry.isUndef()) 6760 continue; 6761 6762 auto Src = llvm::find(Sources, Entry.getOperand(0)); 6763 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 6764 6765 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 6766 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 6767 // segment. 6768 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 6769 int BitsDefined = std::min(OrigEltTy.getSizeInBits(), 6770 VT.getScalarSizeInBits()); 6771 int LanesDefined = BitsDefined / BitsPerShuffleLane; 6772 6773 // This source is expected to fill ResMultiplier lanes of the final shuffle, 6774 // starting at the appropriate offset. 6775 int *LaneMask = &Mask[i * ResMultiplier]; 6776 6777 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 6778 ExtractBase += NumElts * (Src - Sources.begin()); 6779 for (int j = 0; j < LanesDefined; ++j) 6780 LaneMask[j] = ExtractBase + j; 6781 } 6782 6783 // Final check before we try to produce nonsense... 6784 if (!isShuffleMaskLegal(Mask, ShuffleVT)) 6785 return SDValue(); 6786 6787 // We can't handle more than two sources. This should have already 6788 // been checked before this point. 6789 assert(Sources.size() <= 2 && "Too many sources!"); 6790 6791 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 6792 for (unsigned i = 0; i < Sources.size(); ++i) 6793 ShuffleOps[i] = Sources[i].ShuffleVec; 6794 6795 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 6796 ShuffleOps[1], Mask); 6797 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 6798 } 6799 6800 /// isShuffleMaskLegal - Targets can use this to indicate that they only 6801 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 6802 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 6803 /// are assumed to be legal. 6804 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 6805 if (VT.getVectorNumElements() == 4 && 6806 (VT.is128BitVector() || VT.is64BitVector())) { 6807 unsigned PFIndexes[4]; 6808 for (unsigned i = 0; i != 4; ++i) { 6809 if (M[i] < 0) 6810 PFIndexes[i] = 8; 6811 else 6812 PFIndexes[i] = M[i]; 6813 } 6814 6815 // Compute the index in the perfect shuffle table. 6816 unsigned PFTableIndex = 6817 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6818 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6819 unsigned Cost = (PFEntry >> 30); 6820 6821 if (Cost <= 4) 6822 return true; 6823 } 6824 6825 bool ReverseVEXT, isV_UNDEF; 6826 unsigned Imm, WhichResult; 6827 6828 unsigned EltSize = VT.getScalarSizeInBits(); 6829 return (EltSize >= 32 || 6830 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 6831 isVREVMask(M, VT, 64) || 6832 isVREVMask(M, VT, 32) || 6833 isVREVMask(M, VT, 16) || 6834 isVEXTMask(M, VT, ReverseVEXT, Imm) || 6835 isVTBLMask(M, VT) || 6836 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || 6837 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 6838 } 6839 6840 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 6841 /// the specified operations to build the shuffle. 6842 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 6843 SDValue RHS, SelectionDAG &DAG, 6844 const SDLoc &dl) { 6845 unsigned OpNum = (PFEntry >> 26) & 0x0F; 6846 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 6847 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 6848 6849 enum { 6850 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6851 OP_VREV, 6852 OP_VDUP0, 6853 OP_VDUP1, 6854 OP_VDUP2, 6855 OP_VDUP3, 6856 OP_VEXT1, 6857 OP_VEXT2, 6858 OP_VEXT3, 6859 OP_VUZPL, // VUZP, left result 6860 OP_VUZPR, // VUZP, right result 6861 OP_VZIPL, // VZIP, left result 6862 OP_VZIPR, // VZIP, right result 6863 OP_VTRNL, // VTRN, left result 6864 OP_VTRNR // VTRN, right result 6865 }; 6866 6867 if (OpNum == OP_COPY) { 6868 if (LHSID == (1*9+2)*9+3) return LHS; 6869 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6870 return RHS; 6871 } 6872 6873 SDValue OpLHS, OpRHS; 6874 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6875 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6876 EVT VT = OpLHS.getValueType(); 6877 6878 switch (OpNum) { 6879 default: llvm_unreachable("Unknown shuffle opcode!"); 6880 case OP_VREV: 6881 // VREV divides the vector in half and swaps within the half. 6882 if (VT.getVectorElementType() == MVT::i32 || 6883 VT.getVectorElementType() == MVT::f32) 6884 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 6885 // vrev <4 x i16> -> VREV32 6886 if (VT.getVectorElementType() == MVT::i16) 6887 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 6888 // vrev <4 x i8> -> VREV16 6889 assert(VT.getVectorElementType() == MVT::i8); 6890 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 6891 case OP_VDUP0: 6892 case OP_VDUP1: 6893 case OP_VDUP2: 6894 case OP_VDUP3: 6895 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6896 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 6897 case OP_VEXT1: 6898 case OP_VEXT2: 6899 case OP_VEXT3: 6900 return DAG.getNode(ARMISD::VEXT, dl, VT, 6901 OpLHS, OpRHS, 6902 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 6903 case OP_VUZPL: 6904 case OP_VUZPR: 6905 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 6906 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 6907 case OP_VZIPL: 6908 case OP_VZIPR: 6909 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 6910 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 6911 case OP_VTRNL: 6912 case OP_VTRNR: 6913 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 6914 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 6915 } 6916 } 6917 6918 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 6919 ArrayRef<int> ShuffleMask, 6920 SelectionDAG &DAG) { 6921 // Check to see if we can use the VTBL instruction. 6922 SDValue V1 = Op.getOperand(0); 6923 SDValue V2 = Op.getOperand(1); 6924 SDLoc DL(Op); 6925 6926 SmallVector<SDValue, 8> VTBLMask; 6927 for (ArrayRef<int>::iterator 6928 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 6929 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 6930 6931 if (V2.getNode()->isUndef()) 6932 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 6933 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6934 6935 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 6936 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6937 } 6938 6939 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 6940 SelectionDAG &DAG) { 6941 SDLoc DL(Op); 6942 SDValue OpLHS = Op.getOperand(0); 6943 EVT VT = OpLHS.getValueType(); 6944 6945 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 6946 "Expect an v8i16/v16i8 type"); 6947 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 6948 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 6949 // extract the first 8 bytes into the top double word and the last 8 bytes 6950 // into the bottom double word. The v8i16 case is similar. 6951 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 6952 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 6953 DAG.getConstant(ExtractNum, DL, MVT::i32)); 6954 } 6955 6956 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 6957 SDValue V1 = Op.getOperand(0); 6958 SDValue V2 = Op.getOperand(1); 6959 SDLoc dl(Op); 6960 EVT VT = Op.getValueType(); 6961 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 6962 6963 // Convert shuffles that are directly supported on NEON to target-specific 6964 // DAG nodes, instead of keeping them as shuffles and matching them again 6965 // during code selection. This is more efficient and avoids the possibility 6966 // of inconsistencies between legalization and selection. 6967 // FIXME: floating-point vectors should be canonicalized to integer vectors 6968 // of the same time so that they get CSEd properly. 6969 ArrayRef<int> ShuffleMask = SVN->getMask(); 6970 6971 unsigned EltSize = VT.getScalarSizeInBits(); 6972 if (EltSize <= 32) { 6973 if (SVN->isSplat()) { 6974 int Lane = SVN->getSplatIndex(); 6975 // If this is undef splat, generate it via "just" vdup, if possible. 6976 if (Lane == -1) Lane = 0; 6977 6978 // Test if V1 is a SCALAR_TO_VECTOR. 6979 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6980 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6981 } 6982 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 6983 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 6984 // reaches it). 6985 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 6986 !isa<ConstantSDNode>(V1.getOperand(0))) { 6987 bool IsScalarToVector = true; 6988 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 6989 if (!V1.getOperand(i).isUndef()) { 6990 IsScalarToVector = false; 6991 break; 6992 } 6993 if (IsScalarToVector) 6994 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6995 } 6996 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 6997 DAG.getConstant(Lane, dl, MVT::i32)); 6998 } 6999 7000 bool ReverseVEXT; 7001 unsigned Imm; 7002 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 7003 if (ReverseVEXT) 7004 std::swap(V1, V2); 7005 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 7006 DAG.getConstant(Imm, dl, MVT::i32)); 7007 } 7008 7009 if (isVREVMask(ShuffleMask, VT, 64)) 7010 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 7011 if (isVREVMask(ShuffleMask, VT, 32)) 7012 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 7013 if (isVREVMask(ShuffleMask, VT, 16)) 7014 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 7015 7016 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 7017 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 7018 DAG.getConstant(Imm, dl, MVT::i32)); 7019 } 7020 7021 // Check for Neon shuffles that modify both input vectors in place. 7022 // If both results are used, i.e., if there are two shuffles with the same 7023 // source operands and with masks corresponding to both results of one of 7024 // these operations, DAG memoization will ensure that a single node is 7025 // used for both shuffles. 7026 unsigned WhichResult; 7027 bool isV_UNDEF; 7028 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 7029 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 7030 if (isV_UNDEF) 7031 V2 = V1; 7032 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 7033 .getValue(WhichResult); 7034 } 7035 7036 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 7037 // shuffles that produce a result larger than their operands with: 7038 // shuffle(concat(v1, undef), concat(v2, undef)) 7039 // -> 7040 // shuffle(concat(v1, v2), undef) 7041 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 7042 // 7043 // This is useful in the general case, but there are special cases where 7044 // native shuffles produce larger results: the two-result ops. 7045 // 7046 // Look through the concat when lowering them: 7047 // shuffle(concat(v1, v2), undef) 7048 // -> 7049 // concat(VZIP(v1, v2):0, :1) 7050 // 7051 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 7052 SDValue SubV1 = V1->getOperand(0); 7053 SDValue SubV2 = V1->getOperand(1); 7054 EVT SubVT = SubV1.getValueType(); 7055 7056 // We expect these to have been canonicalized to -1. 7057 assert(llvm::all_of(ShuffleMask, [&](int i) { 7058 return i < (int)VT.getVectorNumElements(); 7059 }) && "Unexpected shuffle index into UNDEF operand!"); 7060 7061 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 7062 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 7063 if (isV_UNDEF) 7064 SubV2 = SubV1; 7065 assert((WhichResult == 0) && 7066 "In-place shuffle of concat can only have one result!"); 7067 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 7068 SubV1, SubV2); 7069 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 7070 Res.getValue(1)); 7071 } 7072 } 7073 } 7074 7075 // If the shuffle is not directly supported and it has 4 elements, use 7076 // the PerfectShuffle-generated table to synthesize it from other shuffles. 7077 unsigned NumElts = VT.getVectorNumElements(); 7078 if (NumElts == 4) { 7079 unsigned PFIndexes[4]; 7080 for (unsigned i = 0; i != 4; ++i) { 7081 if (ShuffleMask[i] < 0) 7082 PFIndexes[i] = 8; 7083 else 7084 PFIndexes[i] = ShuffleMask[i]; 7085 } 7086 7087 // Compute the index in the perfect shuffle table. 7088 unsigned PFTableIndex = 7089 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 7090 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7091 unsigned Cost = (PFEntry >> 30); 7092 7093 if (Cost <= 4) 7094 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 7095 } 7096 7097 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 7098 if (EltSize >= 32) { 7099 // Do the expansion with floating-point types, since that is what the VFP 7100 // registers are defined to use, and since i64 is not legal. 7101 EVT EltVT = EVT::getFloatingPointVT(EltSize); 7102 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 7103 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 7104 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 7105 SmallVector<SDValue, 8> Ops; 7106 for (unsigned i = 0; i < NumElts; ++i) { 7107 if (ShuffleMask[i] < 0) 7108 Ops.push_back(DAG.getUNDEF(EltVT)); 7109 else 7110 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 7111 ShuffleMask[i] < (int)NumElts ? V1 : V2, 7112 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 7113 dl, MVT::i32))); 7114 } 7115 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 7116 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7117 } 7118 7119 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 7120 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 7121 7122 if (VT == MVT::v8i8) 7123 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 7124 return NewOp; 7125 7126 return SDValue(); 7127 } 7128 7129 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 7130 // INSERT_VECTOR_ELT is legal only for immediate indexes. 7131 SDValue Lane = Op.getOperand(2); 7132 if (!isa<ConstantSDNode>(Lane)) 7133 return SDValue(); 7134 7135 return Op; 7136 } 7137 7138 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 7139 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 7140 SDValue Lane = Op.getOperand(1); 7141 if (!isa<ConstantSDNode>(Lane)) 7142 return SDValue(); 7143 7144 SDValue Vec = Op.getOperand(0); 7145 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { 7146 SDLoc dl(Op); 7147 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 7148 } 7149 7150 return Op; 7151 } 7152 7153 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 7154 // The only time a CONCAT_VECTORS operation can have legal types is when 7155 // two 64-bit vectors are concatenated to a 128-bit vector. 7156 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 7157 "unexpected CONCAT_VECTORS"); 7158 SDLoc dl(Op); 7159 SDValue Val = DAG.getUNDEF(MVT::v2f64); 7160 SDValue Op0 = Op.getOperand(0); 7161 SDValue Op1 = Op.getOperand(1); 7162 if (!Op0.isUndef()) 7163 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 7164 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 7165 DAG.getIntPtrConstant(0, dl)); 7166 if (!Op1.isUndef()) 7167 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 7168 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 7169 DAG.getIntPtrConstant(1, dl)); 7170 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 7171 } 7172 7173 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 7174 /// element has been zero/sign-extended, depending on the isSigned parameter, 7175 /// from an integer type half its size. 7176 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 7177 bool isSigned) { 7178 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 7179 EVT VT = N->getValueType(0); 7180 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 7181 SDNode *BVN = N->getOperand(0).getNode(); 7182 if (BVN->getValueType(0) != MVT::v4i32 || 7183 BVN->getOpcode() != ISD::BUILD_VECTOR) 7184 return false; 7185 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 7186 unsigned HiElt = 1 - LoElt; 7187 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 7188 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 7189 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 7190 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 7191 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 7192 return false; 7193 if (isSigned) { 7194 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 7195 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 7196 return true; 7197 } else { 7198 if (Hi0->isNullValue() && Hi1->isNullValue()) 7199 return true; 7200 } 7201 return false; 7202 } 7203 7204 if (N->getOpcode() != ISD::BUILD_VECTOR) 7205 return false; 7206 7207 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 7208 SDNode *Elt = N->getOperand(i).getNode(); 7209 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 7210 unsigned EltSize = VT.getScalarSizeInBits(); 7211 unsigned HalfSize = EltSize / 2; 7212 if (isSigned) { 7213 if (!isIntN(HalfSize, C->getSExtValue())) 7214 return false; 7215 } else { 7216 if (!isUIntN(HalfSize, C->getZExtValue())) 7217 return false; 7218 } 7219 continue; 7220 } 7221 return false; 7222 } 7223 7224 return true; 7225 } 7226 7227 /// isSignExtended - Check if a node is a vector value that is sign-extended 7228 /// or a constant BUILD_VECTOR with sign-extended elements. 7229 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 7230 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 7231 return true; 7232 if (isExtendedBUILD_VECTOR(N, DAG, true)) 7233 return true; 7234 return false; 7235 } 7236 7237 /// isZeroExtended - Check if a node is a vector value that is zero-extended 7238 /// or a constant BUILD_VECTOR with zero-extended elements. 7239 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 7240 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 7241 return true; 7242 if (isExtendedBUILD_VECTOR(N, DAG, false)) 7243 return true; 7244 return false; 7245 } 7246 7247 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 7248 if (OrigVT.getSizeInBits() >= 64) 7249 return OrigVT; 7250 7251 assert(OrigVT.isSimple() && "Expecting a simple value type"); 7252 7253 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 7254 switch (OrigSimpleTy) { 7255 default: llvm_unreachable("Unexpected Vector Type"); 7256 case MVT::v2i8: 7257 case MVT::v2i16: 7258 return MVT::v2i32; 7259 case MVT::v4i8: 7260 return MVT::v4i16; 7261 } 7262 } 7263 7264 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 7265 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 7266 /// We insert the required extension here to get the vector to fill a D register. 7267 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 7268 const EVT &OrigTy, 7269 const EVT &ExtTy, 7270 unsigned ExtOpcode) { 7271 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 7272 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 7273 // 64-bits we need to insert a new extension so that it will be 64-bits. 7274 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 7275 if (OrigTy.getSizeInBits() >= 64) 7276 return N; 7277 7278 // Must extend size to at least 64 bits to be used as an operand for VMULL. 7279 EVT NewVT = getExtensionTo64Bits(OrigTy); 7280 7281 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 7282 } 7283 7284 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 7285 /// does not do any sign/zero extension. If the original vector is less 7286 /// than 64 bits, an appropriate extension will be added after the load to 7287 /// reach a total size of 64 bits. We have to add the extension separately 7288 /// because ARM does not have a sign/zero extending load for vectors. 7289 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 7290 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 7291 7292 // The load already has the right type. 7293 if (ExtendedTy == LD->getMemoryVT()) 7294 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 7295 LD->getBasePtr(), LD->getPointerInfo(), 7296 LD->getAlignment(), LD->getMemOperand()->getFlags()); 7297 7298 // We need to create a zextload/sextload. We cannot just create a load 7299 // followed by a zext/zext node because LowerMUL is also run during normal 7300 // operation legalization where we can't create illegal types. 7301 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 7302 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 7303 LD->getMemoryVT(), LD->getAlignment(), 7304 LD->getMemOperand()->getFlags()); 7305 } 7306 7307 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 7308 /// extending load, or BUILD_VECTOR with extended elements, return the 7309 /// unextended value. The unextended vector should be 64 bits so that it can 7310 /// be used as an operand to a VMULL instruction. If the original vector size 7311 /// before extension is less than 64 bits we add a an extension to resize 7312 /// the vector to 64 bits. 7313 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 7314 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 7315 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 7316 N->getOperand(0)->getValueType(0), 7317 N->getValueType(0), 7318 N->getOpcode()); 7319 7320 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7321 assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) && 7322 "Expected extending load"); 7323 7324 SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG); 7325 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1)); 7326 unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 7327 SDValue extLoad = 7328 DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad); 7329 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad); 7330 7331 return newLoad; 7332 } 7333 7334 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 7335 // have been legalized as a BITCAST from v4i32. 7336 if (N->getOpcode() == ISD::BITCAST) { 7337 SDNode *BVN = N->getOperand(0).getNode(); 7338 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 7339 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 7340 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 7341 return DAG.getBuildVector( 7342 MVT::v2i32, SDLoc(N), 7343 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 7344 } 7345 // Construct a new BUILD_VECTOR with elements truncated to half the size. 7346 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 7347 EVT VT = N->getValueType(0); 7348 unsigned EltSize = VT.getScalarSizeInBits() / 2; 7349 unsigned NumElts = VT.getVectorNumElements(); 7350 MVT TruncVT = MVT::getIntegerVT(EltSize); 7351 SmallVector<SDValue, 8> Ops; 7352 SDLoc dl(N); 7353 for (unsigned i = 0; i != NumElts; ++i) { 7354 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 7355 const APInt &CInt = C->getAPIntValue(); 7356 // Element types smaller than 32 bits are not legal, so use i32 elements. 7357 // The values are implicitly truncated so sext vs. zext doesn't matter. 7358 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 7359 } 7360 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 7361 } 7362 7363 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 7364 unsigned Opcode = N->getOpcode(); 7365 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 7366 SDNode *N0 = N->getOperand(0).getNode(); 7367 SDNode *N1 = N->getOperand(1).getNode(); 7368 return N0->hasOneUse() && N1->hasOneUse() && 7369 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 7370 } 7371 return false; 7372 } 7373 7374 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 7375 unsigned Opcode = N->getOpcode(); 7376 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 7377 SDNode *N0 = N->getOperand(0).getNode(); 7378 SDNode *N1 = N->getOperand(1).getNode(); 7379 return N0->hasOneUse() && N1->hasOneUse() && 7380 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 7381 } 7382 return false; 7383 } 7384 7385 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 7386 // Multiplications are only custom-lowered for 128-bit vectors so that 7387 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 7388 EVT VT = Op.getValueType(); 7389 assert(VT.is128BitVector() && VT.isInteger() && 7390 "unexpected type for custom-lowering ISD::MUL"); 7391 SDNode *N0 = Op.getOperand(0).getNode(); 7392 SDNode *N1 = Op.getOperand(1).getNode(); 7393 unsigned NewOpc = 0; 7394 bool isMLA = false; 7395 bool isN0SExt = isSignExtended(N0, DAG); 7396 bool isN1SExt = isSignExtended(N1, DAG); 7397 if (isN0SExt && isN1SExt) 7398 NewOpc = ARMISD::VMULLs; 7399 else { 7400 bool isN0ZExt = isZeroExtended(N0, DAG); 7401 bool isN1ZExt = isZeroExtended(N1, DAG); 7402 if (isN0ZExt && isN1ZExt) 7403 NewOpc = ARMISD::VMULLu; 7404 else if (isN1SExt || isN1ZExt) { 7405 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 7406 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 7407 if (isN1SExt && isAddSubSExt(N0, DAG)) { 7408 NewOpc = ARMISD::VMULLs; 7409 isMLA = true; 7410 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 7411 NewOpc = ARMISD::VMULLu; 7412 isMLA = true; 7413 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 7414 std::swap(N0, N1); 7415 NewOpc = ARMISD::VMULLu; 7416 isMLA = true; 7417 } 7418 } 7419 7420 if (!NewOpc) { 7421 if (VT == MVT::v2i64) 7422 // Fall through to expand this. It is not legal. 7423 return SDValue(); 7424 else 7425 // Other vector multiplications are legal. 7426 return Op; 7427 } 7428 } 7429 7430 // Legalize to a VMULL instruction. 7431 SDLoc DL(Op); 7432 SDValue Op0; 7433 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 7434 if (!isMLA) { 7435 Op0 = SkipExtensionForVMULL(N0, DAG); 7436 assert(Op0.getValueType().is64BitVector() && 7437 Op1.getValueType().is64BitVector() && 7438 "unexpected types for extended operands to VMULL"); 7439 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 7440 } 7441 7442 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 7443 // isel lowering to take advantage of no-stall back to back vmul + vmla. 7444 // vmull q0, d4, d6 7445 // vmlal q0, d5, d6 7446 // is faster than 7447 // vaddl q0, d4, d5 7448 // vmovl q1, d6 7449 // vmul q0, q0, q1 7450 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 7451 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 7452 EVT Op1VT = Op1.getValueType(); 7453 return DAG.getNode(N0->getOpcode(), DL, VT, 7454 DAG.getNode(NewOpc, DL, VT, 7455 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 7456 DAG.getNode(NewOpc, DL, VT, 7457 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 7458 } 7459 7460 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, 7461 SelectionDAG &DAG) { 7462 // TODO: Should this propagate fast-math-flags? 7463 7464 // Convert to float 7465 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 7466 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 7467 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 7468 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 7469 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 7470 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 7471 // Get reciprocal estimate. 7472 // float4 recip = vrecpeq_f32(yf); 7473 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7474 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7475 Y); 7476 // Because char has a smaller range than uchar, we can actually get away 7477 // without any newton steps. This requires that we use a weird bias 7478 // of 0xb000, however (again, this has been exhaustively tested). 7479 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 7480 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 7481 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 7482 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 7483 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 7484 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 7485 // Convert back to short. 7486 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 7487 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 7488 return X; 7489 } 7490 7491 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, 7492 SelectionDAG &DAG) { 7493 // TODO: Should this propagate fast-math-flags? 7494 7495 SDValue N2; 7496 // Convert to float. 7497 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 7498 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 7499 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 7500 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 7501 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 7502 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 7503 7504 // Use reciprocal estimate and one refinement step. 7505 // float4 recip = vrecpeq_f32(yf); 7506 // recip *= vrecpsq_f32(yf, recip); 7507 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7508 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7509 N1); 7510 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7511 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7512 N1, N2); 7513 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7514 // Because short has a smaller range than ushort, we can actually get away 7515 // with only a single newton step. This requires that we use a weird bias 7516 // of 89, however (again, this has been exhaustively tested). 7517 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 7518 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 7519 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 7520 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 7521 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 7522 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 7523 // Convert back to integer and return. 7524 // return vmovn_s32(vcvt_s32_f32(result)); 7525 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 7526 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 7527 return N0; 7528 } 7529 7530 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 7531 EVT VT = Op.getValueType(); 7532 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 7533 "unexpected type for custom-lowering ISD::SDIV"); 7534 7535 SDLoc dl(Op); 7536 SDValue N0 = Op.getOperand(0); 7537 SDValue N1 = Op.getOperand(1); 7538 SDValue N2, N3; 7539 7540 if (VT == MVT::v8i8) { 7541 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 7542 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 7543 7544 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7545 DAG.getIntPtrConstant(4, dl)); 7546 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7547 DAG.getIntPtrConstant(4, dl)); 7548 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7549 DAG.getIntPtrConstant(0, dl)); 7550 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7551 DAG.getIntPtrConstant(0, dl)); 7552 7553 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 7554 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 7555 7556 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 7557 N0 = LowerCONCAT_VECTORS(N0, DAG); 7558 7559 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 7560 return N0; 7561 } 7562 return LowerSDIV_v4i16(N0, N1, dl, DAG); 7563 } 7564 7565 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 7566 // TODO: Should this propagate fast-math-flags? 7567 EVT VT = Op.getValueType(); 7568 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 7569 "unexpected type for custom-lowering ISD::UDIV"); 7570 7571 SDLoc dl(Op); 7572 SDValue N0 = Op.getOperand(0); 7573 SDValue N1 = Op.getOperand(1); 7574 SDValue N2, N3; 7575 7576 if (VT == MVT::v8i8) { 7577 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 7578 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 7579 7580 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7581 DAG.getIntPtrConstant(4, dl)); 7582 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7583 DAG.getIntPtrConstant(4, dl)); 7584 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7585 DAG.getIntPtrConstant(0, dl)); 7586 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7587 DAG.getIntPtrConstant(0, dl)); 7588 7589 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 7590 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 7591 7592 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 7593 N0 = LowerCONCAT_VECTORS(N0, DAG); 7594 7595 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 7596 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 7597 MVT::i32), 7598 N0); 7599 return N0; 7600 } 7601 7602 // v4i16 sdiv ... Convert to float. 7603 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 7604 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 7605 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 7606 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 7607 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 7608 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 7609 7610 // Use reciprocal estimate and two refinement steps. 7611 // float4 recip = vrecpeq_f32(yf); 7612 // recip *= vrecpsq_f32(yf, recip); 7613 // recip *= vrecpsq_f32(yf, recip); 7614 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7615 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7616 BN1); 7617 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7618 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7619 BN1, N2); 7620 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7621 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7622 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7623 BN1, N2); 7624 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7625 // Simply multiplying by the reciprocal estimate can leave us a few ulps 7626 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 7627 // and that it will never cause us to return an answer too large). 7628 // float4 result = as_float4(as_int4(xf*recip) + 2); 7629 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 7630 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 7631 N1 = DAG.getConstant(2, dl, MVT::v4i32); 7632 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 7633 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 7634 // Convert back to integer and return. 7635 // return vmovn_u32(vcvt_s32_f32(result)); 7636 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 7637 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 7638 return N0; 7639 } 7640 7641 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { 7642 SDNode *N = Op.getNode(); 7643 EVT VT = N->getValueType(0); 7644 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 7645 7646 SDValue Carry = Op.getOperand(2); 7647 7648 SDLoc DL(Op); 7649 7650 SDValue Result; 7651 if (Op.getOpcode() == ISD::ADDCARRY) { 7652 // This converts the boolean value carry into the carry flag. 7653 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 7654 7655 // Do the addition proper using the carry flag we wanted. 7656 Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0), 7657 Op.getOperand(1), Carry); 7658 7659 // Now convert the carry flag into a boolean value. 7660 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 7661 } else { 7662 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we 7663 // have to invert the carry first. 7664 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 7665 DAG.getConstant(1, DL, MVT::i32), Carry); 7666 // This converts the boolean value carry into the carry flag. 7667 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG); 7668 7669 // Do the subtraction proper using the carry flag we wanted. 7670 Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0), 7671 Op.getOperand(1), Carry); 7672 7673 // Now convert the carry flag into a boolean value. 7674 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG); 7675 // But the carry returned by ARMISD::SUBE is not a borrow as expected 7676 // by ISD::SUBCARRY, so compute 1 - C. 7677 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32, 7678 DAG.getConstant(1, DL, MVT::i32), Carry); 7679 } 7680 7681 // Return both values. 7682 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry); 7683 } 7684 7685 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 7686 assert(Subtarget->isTargetDarwin()); 7687 7688 // For iOS, we want to call an alternative entry point: __sincos_stret, 7689 // return values are passed via sret. 7690 SDLoc dl(Op); 7691 SDValue Arg = Op.getOperand(0); 7692 EVT ArgVT = Arg.getValueType(); 7693 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 7694 auto PtrVT = getPointerTy(DAG.getDataLayout()); 7695 7696 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7697 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7698 7699 // Pair of floats / doubles used to pass the result. 7700 Type *RetTy = StructType::get(ArgTy, ArgTy); 7701 auto &DL = DAG.getDataLayout(); 7702 7703 ArgListTy Args; 7704 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 7705 SDValue SRet; 7706 if (ShouldUseSRet) { 7707 // Create stack object for sret. 7708 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 7709 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); 7710 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 7711 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 7712 7713 ArgListEntry Entry; 7714 Entry.Node = SRet; 7715 Entry.Ty = RetTy->getPointerTo(); 7716 Entry.IsSExt = false; 7717 Entry.IsZExt = false; 7718 Entry.IsSRet = true; 7719 Args.push_back(Entry); 7720 RetTy = Type::getVoidTy(*DAG.getContext()); 7721 } 7722 7723 ArgListEntry Entry; 7724 Entry.Node = Arg; 7725 Entry.Ty = ArgTy; 7726 Entry.IsSExt = false; 7727 Entry.IsZExt = false; 7728 Args.push_back(Entry); 7729 7730 RTLIB::Libcall LC = 7731 (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; 7732 const char *LibcallName = getLibcallName(LC); 7733 CallingConv::ID CC = getLibcallCallingConv(LC); 7734 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 7735 7736 TargetLowering::CallLoweringInfo CLI(DAG); 7737 CLI.setDebugLoc(dl) 7738 .setChain(DAG.getEntryNode()) 7739 .setCallee(CC, RetTy, Callee, std::move(Args)) 7740 .setDiscardResult(ShouldUseSRet); 7741 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7742 7743 if (!ShouldUseSRet) 7744 return CallResult.first; 7745 7746 SDValue LoadSin = 7747 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); 7748 7749 // Address of cos field. 7750 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 7751 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 7752 SDValue LoadCos = 7753 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); 7754 7755 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 7756 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 7757 LoadSin.getValue(0), LoadCos.getValue(0)); 7758 } 7759 7760 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 7761 bool Signed, 7762 SDValue &Chain) const { 7763 EVT VT = Op.getValueType(); 7764 assert((VT == MVT::i32 || VT == MVT::i64) && 7765 "unexpected type for custom lowering DIV"); 7766 SDLoc dl(Op); 7767 7768 const auto &DL = DAG.getDataLayout(); 7769 const auto &TLI = DAG.getTargetLoweringInfo(); 7770 7771 const char *Name = nullptr; 7772 if (Signed) 7773 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 7774 else 7775 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 7776 7777 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 7778 7779 ARMTargetLowering::ArgListTy Args; 7780 7781 for (auto AI : {1, 0}) { 7782 ArgListEntry Arg; 7783 Arg.Node = Op.getOperand(AI); 7784 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 7785 Args.push_back(Arg); 7786 } 7787 7788 CallLoweringInfo CLI(DAG); 7789 CLI.setDebugLoc(dl) 7790 .setChain(Chain) 7791 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 7792 ES, std::move(Args)); 7793 7794 return LowerCallTo(CLI).first; 7795 } 7796 7797 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 7798 bool Signed) const { 7799 assert(Op.getValueType() == MVT::i32 && 7800 "unexpected type for custom lowering DIV"); 7801 SDLoc dl(Op); 7802 7803 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 7804 DAG.getEntryNode(), Op.getOperand(1)); 7805 7806 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 7807 } 7808 7809 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { 7810 SDLoc DL(N); 7811 SDValue Op = N->getOperand(1); 7812 if (N->getValueType(0) == MVT::i32) 7813 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op); 7814 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 7815 DAG.getConstant(0, DL, MVT::i32)); 7816 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op, 7817 DAG.getConstant(1, DL, MVT::i32)); 7818 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, 7819 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi)); 7820 } 7821 7822 void ARMTargetLowering::ExpandDIV_Windows( 7823 SDValue Op, SelectionDAG &DAG, bool Signed, 7824 SmallVectorImpl<SDValue> &Results) const { 7825 const auto &DL = DAG.getDataLayout(); 7826 const auto &TLI = DAG.getTargetLoweringInfo(); 7827 7828 assert(Op.getValueType() == MVT::i64 && 7829 "unexpected type for custom lowering DIV"); 7830 SDLoc dl(Op); 7831 7832 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode()); 7833 7834 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 7835 7836 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 7837 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 7838 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 7839 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 7840 7841 Results.push_back(Lower); 7842 Results.push_back(Upper); 7843 } 7844 7845 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 7846 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 7847 // Acquire/Release load/store is not legal for targets without a dmb or 7848 // equivalent available. 7849 return SDValue(); 7850 7851 // Monotonic load/store is legal for all targets. 7852 return Op; 7853 } 7854 7855 static void ReplaceREADCYCLECOUNTER(SDNode *N, 7856 SmallVectorImpl<SDValue> &Results, 7857 SelectionDAG &DAG, 7858 const ARMSubtarget *Subtarget) { 7859 SDLoc DL(N); 7860 // Under Power Management extensions, the cycle-count is: 7861 // mrc p15, #0, <Rt>, c9, c13, #0 7862 SDValue Ops[] = { N->getOperand(0), // Chain 7863 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 7864 DAG.getConstant(15, DL, MVT::i32), 7865 DAG.getConstant(0, DL, MVT::i32), 7866 DAG.getConstant(9, DL, MVT::i32), 7867 DAG.getConstant(13, DL, MVT::i32), 7868 DAG.getConstant(0, DL, MVT::i32) 7869 }; 7870 7871 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 7872 DAG.getVTList(MVT::i32, MVT::Other), Ops); 7873 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 7874 DAG.getConstant(0, DL, MVT::i32))); 7875 Results.push_back(Cycles32.getValue(1)); 7876 } 7877 7878 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 7879 SDLoc dl(V.getNode()); 7880 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 7881 SDValue VHi = DAG.getAnyExtOrTrunc( 7882 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 7883 dl, MVT::i32); 7884 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 7885 if (isBigEndian) 7886 std::swap (VLo, VHi); 7887 SDValue RegClass = 7888 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 7889 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 7890 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 7891 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 7892 return SDValue( 7893 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 7894 } 7895 7896 static void ReplaceCMP_SWAP_64Results(SDNode *N, 7897 SmallVectorImpl<SDValue> & Results, 7898 SelectionDAG &DAG) { 7899 assert(N->getValueType(0) == MVT::i64 && 7900 "AtomicCmpSwap on types less than 64 should be legal"); 7901 SDValue Ops[] = {N->getOperand(1), 7902 createGPRPairNode(DAG, N->getOperand(2)), 7903 createGPRPairNode(DAG, N->getOperand(3)), 7904 N->getOperand(0)}; 7905 SDNode *CmpSwap = DAG.getMachineNode( 7906 ARM::CMP_SWAP_64, SDLoc(N), 7907 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 7908 7909 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); 7910 DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp}); 7911 7912 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 7913 7914 Results.push_back( 7915 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0, 7916 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); 7917 Results.push_back( 7918 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1, 7919 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0))); 7920 Results.push_back(SDValue(CmpSwap, 2)); 7921 } 7922 7923 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, 7924 SelectionDAG &DAG) { 7925 const auto &TLI = DAG.getTargetLoweringInfo(); 7926 7927 assert(Subtarget.getTargetTriple().isOSMSVCRT() && 7928 "Custom lowering is MSVCRT specific!"); 7929 7930 SDLoc dl(Op); 7931 SDValue Val = Op.getOperand(0); 7932 MVT Ty = Val->getSimpleValueType(0); 7933 SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1)); 7934 SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow", 7935 TLI.getPointerTy(DAG.getDataLayout())); 7936 7937 TargetLowering::ArgListTy Args; 7938 TargetLowering::ArgListEntry Entry; 7939 7940 Entry.Node = Val; 7941 Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext()); 7942 Entry.IsZExt = true; 7943 Args.push_back(Entry); 7944 7945 Entry.Node = Exponent; 7946 Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext()); 7947 Entry.IsZExt = true; 7948 Args.push_back(Entry); 7949 7950 Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext()); 7951 7952 // In the in-chain to the call is the entry node If we are emitting a 7953 // tailcall, the chain will be mutated if the node has a non-entry input 7954 // chain. 7955 SDValue InChain = DAG.getEntryNode(); 7956 SDValue TCChain = InChain; 7957 7958 const Function &F = DAG.getMachineFunction().getFunction(); 7959 bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) && 7960 F.getReturnType() == LCRTy; 7961 if (IsTC) 7962 InChain = TCChain; 7963 7964 TargetLowering::CallLoweringInfo CLI(DAG); 7965 CLI.setDebugLoc(dl) 7966 .setChain(InChain) 7967 .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args)) 7968 .setTailCall(IsTC); 7969 std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI); 7970 7971 // Return the chain (the DAG root) if it is a tail call 7972 return !CI.second.getNode() ? DAG.getRoot() : CI.first; 7973 } 7974 7975 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7976 LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump()); 7977 switch (Op.getOpcode()) { 7978 default: llvm_unreachable("Don't know how to custom lower this!"); 7979 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 7980 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7981 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7982 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 7983 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7984 case ISD::SELECT: return LowerSELECT(Op, DAG); 7985 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7986 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 7987 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 7988 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 7989 case ISD::VASTART: return LowerVASTART(Op, DAG); 7990 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 7991 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 7992 case ISD::SINT_TO_FP: 7993 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7994 case ISD::FP_TO_SINT: 7995 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 7996 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 7997 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7998 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7999 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 8000 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 8001 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 8002 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 8003 Subtarget); 8004 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget); 8005 case ISD::SHL: 8006 case ISD::SRL: 8007 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 8008 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 8009 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 8010 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 8011 case ISD::SRL_PARTS: 8012 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 8013 case ISD::CTTZ: 8014 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 8015 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 8016 case ISD::SETCC: return LowerVSETCC(Op, DAG); 8017 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); 8018 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 8019 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 8020 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 8021 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 8022 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 8023 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 8024 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 8025 case ISD::MUL: return LowerMUL(Op, DAG); 8026 case ISD::SDIV: 8027 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 8028 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 8029 return LowerSDIV(Op, DAG); 8030 case ISD::UDIV: 8031 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector()) 8032 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 8033 return LowerUDIV(Op, DAG); 8034 case ISD::ADDCARRY: 8035 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); 8036 case ISD::SADDO: 8037 case ISD::SSUBO: 8038 return LowerSignedALUO(Op, DAG); 8039 case ISD::UADDO: 8040 case ISD::USUBO: 8041 return LowerUnsignedALUO(Op, DAG); 8042 case ISD::ATOMIC_LOAD: 8043 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 8044 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 8045 case ISD::SDIVREM: 8046 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 8047 case ISD::DYNAMIC_STACKALLOC: 8048 if (Subtarget->isTargetWindows()) 8049 return LowerDYNAMIC_STACKALLOC(Op, DAG); 8050 llvm_unreachable("Don't know how to custom lower this!"); 8051 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 8052 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 8053 case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG); 8054 case ARMISD::WIN__DBZCHK: return SDValue(); 8055 } 8056 } 8057 8058 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results, 8059 SelectionDAG &DAG) { 8060 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 8061 unsigned Opc = 0; 8062 if (IntNo == Intrinsic::arm_smlald) 8063 Opc = ARMISD::SMLALD; 8064 else if (IntNo == Intrinsic::arm_smlaldx) 8065 Opc = ARMISD::SMLALDX; 8066 else if (IntNo == Intrinsic::arm_smlsld) 8067 Opc = ARMISD::SMLSLD; 8068 else if (IntNo == Intrinsic::arm_smlsldx) 8069 Opc = ARMISD::SMLSLDX; 8070 else 8071 return; 8072 8073 SDLoc dl(N); 8074 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8075 N->getOperand(3), 8076 DAG.getConstant(0, dl, MVT::i32)); 8077 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 8078 N->getOperand(3), 8079 DAG.getConstant(1, dl, MVT::i32)); 8080 8081 SDValue LongMul = DAG.getNode(Opc, dl, 8082 DAG.getVTList(MVT::i32, MVT::i32), 8083 N->getOperand(1), N->getOperand(2), 8084 Lo, Hi); 8085 Results.push_back(LongMul.getValue(0)); 8086 Results.push_back(LongMul.getValue(1)); 8087 } 8088 8089 /// ReplaceNodeResults - Replace the results of node with an illegal result 8090 /// type with new values built out of custom code. 8091 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 8092 SmallVectorImpl<SDValue> &Results, 8093 SelectionDAG &DAG) const { 8094 SDValue Res; 8095 switch (N->getOpcode()) { 8096 default: 8097 llvm_unreachable("Don't know how to custom expand this!"); 8098 case ISD::READ_REGISTER: 8099 ExpandREAD_REGISTER(N, Results, DAG); 8100 break; 8101 case ISD::BITCAST: 8102 Res = ExpandBITCAST(N, DAG, Subtarget); 8103 break; 8104 case ISD::SRL: 8105 case ISD::SRA: 8106 Res = Expand64BitShift(N, DAG, Subtarget); 8107 break; 8108 case ISD::SREM: 8109 case ISD::UREM: 8110 Res = LowerREM(N, DAG); 8111 break; 8112 case ISD::SDIVREM: 8113 case ISD::UDIVREM: 8114 Res = LowerDivRem(SDValue(N, 0), DAG); 8115 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 8116 Results.push_back(Res.getValue(0)); 8117 Results.push_back(Res.getValue(1)); 8118 return; 8119 case ISD::READCYCLECOUNTER: 8120 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 8121 return; 8122 case ISD::UDIV: 8123 case ISD::SDIV: 8124 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 8125 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 8126 Results); 8127 case ISD::ATOMIC_CMP_SWAP: 8128 ReplaceCMP_SWAP_64Results(N, Results, DAG); 8129 return; 8130 case ISD::INTRINSIC_WO_CHAIN: 8131 return ReplaceLongIntrinsic(N, Results, DAG); 8132 } 8133 if (Res.getNode()) 8134 Results.push_back(Res); 8135 } 8136 8137 //===----------------------------------------------------------------------===// 8138 // ARM Scheduler Hooks 8139 //===----------------------------------------------------------------------===// 8140 8141 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 8142 /// registers the function context. 8143 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, 8144 MachineBasicBlock *MBB, 8145 MachineBasicBlock *DispatchBB, 8146 int FI) const { 8147 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 8148 "ROPI/RWPI not currently supported with SjLj"); 8149 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8150 DebugLoc dl = MI.getDebugLoc(); 8151 MachineFunction *MF = MBB->getParent(); 8152 MachineRegisterInfo *MRI = &MF->getRegInfo(); 8153 MachineConstantPool *MCP = MF->getConstantPool(); 8154 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 8155 const Function &F = MF->getFunction(); 8156 8157 bool isThumb = Subtarget->isThumb(); 8158 bool isThumb2 = Subtarget->isThumb2(); 8159 8160 unsigned PCLabelId = AFI->createPICLabelUId(); 8161 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 8162 ARMConstantPoolValue *CPV = 8163 ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj); 8164 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 8165 8166 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 8167 : &ARM::GPRRegClass; 8168 8169 // Grab constant pool and fixed stack memory operands. 8170 MachineMemOperand *CPMMO = 8171 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 8172 MachineMemOperand::MOLoad, 4, 4); 8173 8174 MachineMemOperand *FIMMOSt = 8175 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 8176 MachineMemOperand::MOStore, 4, 4); 8177 8178 // Load the address of the dispatch MBB into the jump buffer. 8179 if (isThumb2) { 8180 // Incoming value: jbuf 8181 // ldr.n r5, LCPI1_1 8182 // orr r5, r5, #1 8183 // add r5, pc 8184 // str r5, [$jbuf, #+4] ; &jbuf[1] 8185 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8186 BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 8187 .addConstantPoolIndex(CPI) 8188 .addMemOperand(CPMMO) 8189 .add(predOps(ARMCC::AL)); 8190 // Set the low bit because of thumb mode. 8191 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 8192 BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 8193 .addReg(NewVReg1, RegState::Kill) 8194 .addImm(0x01) 8195 .add(predOps(ARMCC::AL)) 8196 .add(condCodeOp()); 8197 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8198 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 8199 .addReg(NewVReg2, RegState::Kill) 8200 .addImm(PCLabelId); 8201 BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 8202 .addReg(NewVReg3, RegState::Kill) 8203 .addFrameIndex(FI) 8204 .addImm(36) // &jbuf[1] :: pc 8205 .addMemOperand(FIMMOSt) 8206 .add(predOps(ARMCC::AL)); 8207 } else if (isThumb) { 8208 // Incoming value: jbuf 8209 // ldr.n r1, LCPI1_4 8210 // add r1, pc 8211 // mov r2, #1 8212 // orrs r1, r2 8213 // add r2, $jbuf, #+4 ; &jbuf[1] 8214 // str r1, [r2] 8215 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8216 BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 8217 .addConstantPoolIndex(CPI) 8218 .addMemOperand(CPMMO) 8219 .add(predOps(ARMCC::AL)); 8220 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 8221 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 8222 .addReg(NewVReg1, RegState::Kill) 8223 .addImm(PCLabelId); 8224 // Set the low bit because of thumb mode. 8225 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8226 BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 8227 .addReg(ARM::CPSR, RegState::Define) 8228 .addImm(1) 8229 .add(predOps(ARMCC::AL)); 8230 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 8231 BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 8232 .addReg(ARM::CPSR, RegState::Define) 8233 .addReg(NewVReg2, RegState::Kill) 8234 .addReg(NewVReg3, RegState::Kill) 8235 .add(predOps(ARMCC::AL)); 8236 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 8237 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 8238 .addFrameIndex(FI) 8239 .addImm(36); // &jbuf[1] :: pc 8240 BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 8241 .addReg(NewVReg4, RegState::Kill) 8242 .addReg(NewVReg5, RegState::Kill) 8243 .addImm(0) 8244 .addMemOperand(FIMMOSt) 8245 .add(predOps(ARMCC::AL)); 8246 } else { 8247 // Incoming value: jbuf 8248 // ldr r1, LCPI1_1 8249 // add r1, pc, r1 8250 // str r1, [$jbuf, #+4] ; &jbuf[1] 8251 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8252 BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 8253 .addConstantPoolIndex(CPI) 8254 .addImm(0) 8255 .addMemOperand(CPMMO) 8256 .add(predOps(ARMCC::AL)); 8257 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 8258 BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 8259 .addReg(NewVReg1, RegState::Kill) 8260 .addImm(PCLabelId) 8261 .add(predOps(ARMCC::AL)); 8262 BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 8263 .addReg(NewVReg2, RegState::Kill) 8264 .addFrameIndex(FI) 8265 .addImm(36) // &jbuf[1] :: pc 8266 .addMemOperand(FIMMOSt) 8267 .add(predOps(ARMCC::AL)); 8268 } 8269 } 8270 8271 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, 8272 MachineBasicBlock *MBB) const { 8273 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8274 DebugLoc dl = MI.getDebugLoc(); 8275 MachineFunction *MF = MBB->getParent(); 8276 MachineRegisterInfo *MRI = &MF->getRegInfo(); 8277 MachineFrameInfo &MFI = MF->getFrameInfo(); 8278 int FI = MFI.getFunctionContextIndex(); 8279 8280 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 8281 : &ARM::GPRnopcRegClass; 8282 8283 // Get a mapping of the call site numbers to all of the landing pads they're 8284 // associated with. 8285 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad; 8286 unsigned MaxCSNum = 0; 8287 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 8288 ++BB) { 8289 if (!BB->isEHPad()) continue; 8290 8291 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 8292 // pad. 8293 for (MachineBasicBlock::iterator 8294 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 8295 if (!II->isEHLabel()) continue; 8296 8297 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 8298 if (!MF->hasCallSiteLandingPad(Sym)) continue; 8299 8300 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym); 8301 for (SmallVectorImpl<unsigned>::iterator 8302 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 8303 CSI != CSE; ++CSI) { 8304 CallSiteNumToLPad[*CSI].push_back(&*BB); 8305 MaxCSNum = std::max(MaxCSNum, *CSI); 8306 } 8307 break; 8308 } 8309 } 8310 8311 // Get an ordered list of the machine basic blocks for the jump table. 8312 std::vector<MachineBasicBlock*> LPadList; 8313 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 8314 LPadList.reserve(CallSiteNumToLPad.size()); 8315 for (unsigned I = 1; I <= MaxCSNum; ++I) { 8316 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 8317 for (SmallVectorImpl<MachineBasicBlock*>::iterator 8318 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 8319 LPadList.push_back(*II); 8320 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 8321 } 8322 } 8323 8324 assert(!LPadList.empty() && 8325 "No landing pad destinations for the dispatch jump table!"); 8326 8327 // Create the jump table and associated information. 8328 MachineJumpTableInfo *JTI = 8329 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 8330 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 8331 8332 // Create the MBBs for the dispatch code. 8333 8334 // Shove the dispatch's address into the return slot in the function context. 8335 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 8336 DispatchBB->setIsEHPad(); 8337 8338 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 8339 unsigned trap_opcode; 8340 if (Subtarget->isThumb()) 8341 trap_opcode = ARM::tTRAP; 8342 else 8343 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 8344 8345 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 8346 DispatchBB->addSuccessor(TrapBB); 8347 8348 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 8349 DispatchBB->addSuccessor(DispContBB); 8350 8351 // Insert and MBBs. 8352 MF->insert(MF->end(), DispatchBB); 8353 MF->insert(MF->end(), DispContBB); 8354 MF->insert(MF->end(), TrapBB); 8355 8356 // Insert code into the entry block that creates and registers the function 8357 // context. 8358 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 8359 8360 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 8361 MachinePointerInfo::getFixedStack(*MF, FI), 8362 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); 8363 8364 MachineInstrBuilder MIB; 8365 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 8366 8367 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 8368 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 8369 8370 // Add a register mask with no preserved registers. This results in all 8371 // registers being marked as clobbered. This can't work if the dispatch block 8372 // is in a Thumb1 function and is linked with ARM code which uses the FP 8373 // registers, as there is no way to preserve the FP registers in Thumb1 mode. 8374 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF)); 8375 8376 bool IsPositionIndependent = isPositionIndependent(); 8377 unsigned NumLPads = LPadList.size(); 8378 if (Subtarget->isThumb2()) { 8379 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8380 BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 8381 .addFrameIndex(FI) 8382 .addImm(4) 8383 .addMemOperand(FIMMOLd) 8384 .add(predOps(ARMCC::AL)); 8385 8386 if (NumLPads < 256) { 8387 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 8388 .addReg(NewVReg1) 8389 .addImm(LPadList.size()) 8390 .add(predOps(ARMCC::AL)); 8391 } else { 8392 unsigned VReg1 = MRI->createVirtualRegister(TRC); 8393 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 8394 .addImm(NumLPads & 0xFFFF) 8395 .add(predOps(ARMCC::AL)); 8396 8397 unsigned VReg2 = VReg1; 8398 if ((NumLPads & 0xFFFF0000) != 0) { 8399 VReg2 = MRI->createVirtualRegister(TRC); 8400 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 8401 .addReg(VReg1) 8402 .addImm(NumLPads >> 16) 8403 .add(predOps(ARMCC::AL)); 8404 } 8405 8406 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 8407 .addReg(NewVReg1) 8408 .addReg(VReg2) 8409 .add(predOps(ARMCC::AL)); 8410 } 8411 8412 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 8413 .addMBB(TrapBB) 8414 .addImm(ARMCC::HI) 8415 .addReg(ARM::CPSR); 8416 8417 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8418 BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3) 8419 .addJumpTableIndex(MJTI) 8420 .add(predOps(ARMCC::AL)); 8421 8422 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 8423 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 8424 .addReg(NewVReg3, RegState::Kill) 8425 .addReg(NewVReg1) 8426 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 8427 .add(predOps(ARMCC::AL)) 8428 .add(condCodeOp()); 8429 8430 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 8431 .addReg(NewVReg4, RegState::Kill) 8432 .addReg(NewVReg1) 8433 .addJumpTableIndex(MJTI); 8434 } else if (Subtarget->isThumb()) { 8435 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8436 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 8437 .addFrameIndex(FI) 8438 .addImm(1) 8439 .addMemOperand(FIMMOLd) 8440 .add(predOps(ARMCC::AL)); 8441 8442 if (NumLPads < 256) { 8443 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 8444 .addReg(NewVReg1) 8445 .addImm(NumLPads) 8446 .add(predOps(ARMCC::AL)); 8447 } else { 8448 MachineConstantPool *ConstantPool = MF->getConstantPool(); 8449 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 8450 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 8451 8452 // MachineConstantPool wants an explicit alignment. 8453 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 8454 if (Align == 0) 8455 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 8456 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 8457 8458 unsigned VReg1 = MRI->createVirtualRegister(TRC); 8459 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 8460 .addReg(VReg1, RegState::Define) 8461 .addConstantPoolIndex(Idx) 8462 .add(predOps(ARMCC::AL)); 8463 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 8464 .addReg(NewVReg1) 8465 .addReg(VReg1) 8466 .add(predOps(ARMCC::AL)); 8467 } 8468 8469 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 8470 .addMBB(TrapBB) 8471 .addImm(ARMCC::HI) 8472 .addReg(ARM::CPSR); 8473 8474 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 8475 BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 8476 .addReg(ARM::CPSR, RegState::Define) 8477 .addReg(NewVReg1) 8478 .addImm(2) 8479 .add(predOps(ARMCC::AL)); 8480 8481 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8482 BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 8483 .addJumpTableIndex(MJTI) 8484 .add(predOps(ARMCC::AL)); 8485 8486 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 8487 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 8488 .addReg(ARM::CPSR, RegState::Define) 8489 .addReg(NewVReg2, RegState::Kill) 8490 .addReg(NewVReg3) 8491 .add(predOps(ARMCC::AL)); 8492 8493 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 8494 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 8495 8496 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 8497 BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 8498 .addReg(NewVReg4, RegState::Kill) 8499 .addImm(0) 8500 .addMemOperand(JTMMOLd) 8501 .add(predOps(ARMCC::AL)); 8502 8503 unsigned NewVReg6 = NewVReg5; 8504 if (IsPositionIndependent) { 8505 NewVReg6 = MRI->createVirtualRegister(TRC); 8506 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 8507 .addReg(ARM::CPSR, RegState::Define) 8508 .addReg(NewVReg5, RegState::Kill) 8509 .addReg(NewVReg3) 8510 .add(predOps(ARMCC::AL)); 8511 } 8512 8513 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 8514 .addReg(NewVReg6, RegState::Kill) 8515 .addJumpTableIndex(MJTI); 8516 } else { 8517 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 8518 BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 8519 .addFrameIndex(FI) 8520 .addImm(4) 8521 .addMemOperand(FIMMOLd) 8522 .add(predOps(ARMCC::AL)); 8523 8524 if (NumLPads < 256) { 8525 BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 8526 .addReg(NewVReg1) 8527 .addImm(NumLPads) 8528 .add(predOps(ARMCC::AL)); 8529 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 8530 unsigned VReg1 = MRI->createVirtualRegister(TRC); 8531 BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 8532 .addImm(NumLPads & 0xFFFF) 8533 .add(predOps(ARMCC::AL)); 8534 8535 unsigned VReg2 = VReg1; 8536 if ((NumLPads & 0xFFFF0000) != 0) { 8537 VReg2 = MRI->createVirtualRegister(TRC); 8538 BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 8539 .addReg(VReg1) 8540 .addImm(NumLPads >> 16) 8541 .add(predOps(ARMCC::AL)); 8542 } 8543 8544 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 8545 .addReg(NewVReg1) 8546 .addReg(VReg2) 8547 .add(predOps(ARMCC::AL)); 8548 } else { 8549 MachineConstantPool *ConstantPool = MF->getConstantPool(); 8550 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 8551 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 8552 8553 // MachineConstantPool wants an explicit alignment. 8554 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 8555 if (Align == 0) 8556 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 8557 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 8558 8559 unsigned VReg1 = MRI->createVirtualRegister(TRC); 8560 BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 8561 .addReg(VReg1, RegState::Define) 8562 .addConstantPoolIndex(Idx) 8563 .addImm(0) 8564 .add(predOps(ARMCC::AL)); 8565 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 8566 .addReg(NewVReg1) 8567 .addReg(VReg1, RegState::Kill) 8568 .add(predOps(ARMCC::AL)); 8569 } 8570 8571 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 8572 .addMBB(TrapBB) 8573 .addImm(ARMCC::HI) 8574 .addReg(ARM::CPSR); 8575 8576 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8577 BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 8578 .addReg(NewVReg1) 8579 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)) 8580 .add(predOps(ARMCC::AL)) 8581 .add(condCodeOp()); 8582 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 8583 BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 8584 .addJumpTableIndex(MJTI) 8585 .add(predOps(ARMCC::AL)); 8586 8587 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 8588 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 8589 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 8590 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 8591 .addReg(NewVReg3, RegState::Kill) 8592 .addReg(NewVReg4) 8593 .addImm(0) 8594 .addMemOperand(JTMMOLd) 8595 .add(predOps(ARMCC::AL)); 8596 8597 if (IsPositionIndependent) { 8598 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 8599 .addReg(NewVReg5, RegState::Kill) 8600 .addReg(NewVReg4) 8601 .addJumpTableIndex(MJTI); 8602 } else { 8603 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 8604 .addReg(NewVReg5, RegState::Kill) 8605 .addJumpTableIndex(MJTI); 8606 } 8607 } 8608 8609 // Add the jump table entries as successors to the MBB. 8610 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 8611 for (std::vector<MachineBasicBlock*>::iterator 8612 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 8613 MachineBasicBlock *CurMBB = *I; 8614 if (SeenMBBs.insert(CurMBB).second) 8615 DispContBB->addSuccessor(CurMBB); 8616 } 8617 8618 // N.B. the order the invoke BBs are processed in doesn't matter here. 8619 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 8620 SmallVector<MachineBasicBlock*, 64> MBBLPads; 8621 for (MachineBasicBlock *BB : InvokeBBs) { 8622 8623 // Remove the landing pad successor from the invoke block and replace it 8624 // with the new dispatch block. 8625 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 8626 BB->succ_end()); 8627 while (!Successors.empty()) { 8628 MachineBasicBlock *SMBB = Successors.pop_back_val(); 8629 if (SMBB->isEHPad()) { 8630 BB->removeSuccessor(SMBB); 8631 MBBLPads.push_back(SMBB); 8632 } 8633 } 8634 8635 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 8636 BB->normalizeSuccProbs(); 8637 8638 // Find the invoke call and mark all of the callee-saved registers as 8639 // 'implicit defined' so that they're spilled. This prevents code from 8640 // moving instructions to before the EH block, where they will never be 8641 // executed. 8642 for (MachineBasicBlock::reverse_iterator 8643 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 8644 if (!II->isCall()) continue; 8645 8646 DenseMap<unsigned, bool> DefRegs; 8647 for (MachineInstr::mop_iterator 8648 OI = II->operands_begin(), OE = II->operands_end(); 8649 OI != OE; ++OI) { 8650 if (!OI->isReg()) continue; 8651 DefRegs[OI->getReg()] = true; 8652 } 8653 8654 MachineInstrBuilder MIB(*MF, &*II); 8655 8656 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 8657 unsigned Reg = SavedRegs[i]; 8658 if (Subtarget->isThumb2() && 8659 !ARM::tGPRRegClass.contains(Reg) && 8660 !ARM::hGPRRegClass.contains(Reg)) 8661 continue; 8662 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 8663 continue; 8664 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 8665 continue; 8666 if (!DefRegs[Reg]) 8667 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 8668 } 8669 8670 break; 8671 } 8672 } 8673 8674 // Mark all former landing pads as non-landing pads. The dispatch is the only 8675 // landing pad now. 8676 for (SmallVectorImpl<MachineBasicBlock*>::iterator 8677 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 8678 (*I)->setIsEHPad(false); 8679 8680 // The instruction is gone now. 8681 MI.eraseFromParent(); 8682 } 8683 8684 static 8685 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 8686 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 8687 E = MBB->succ_end(); I != E; ++I) 8688 if (*I != Succ) 8689 return *I; 8690 llvm_unreachable("Expecting a BB with two successors!"); 8691 } 8692 8693 /// Return the load opcode for a given load size. If load size >= 8, 8694 /// neon opcode will be returned. 8695 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 8696 if (LdSize >= 8) 8697 return LdSize == 16 ? ARM::VLD1q32wb_fixed 8698 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 8699 if (IsThumb1) 8700 return LdSize == 4 ? ARM::tLDRi 8701 : LdSize == 2 ? ARM::tLDRHi 8702 : LdSize == 1 ? ARM::tLDRBi : 0; 8703 if (IsThumb2) 8704 return LdSize == 4 ? ARM::t2LDR_POST 8705 : LdSize == 2 ? ARM::t2LDRH_POST 8706 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 8707 return LdSize == 4 ? ARM::LDR_POST_IMM 8708 : LdSize == 2 ? ARM::LDRH_POST 8709 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 8710 } 8711 8712 /// Return the store opcode for a given store size. If store size >= 8, 8713 /// neon opcode will be returned. 8714 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 8715 if (StSize >= 8) 8716 return StSize == 16 ? ARM::VST1q32wb_fixed 8717 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 8718 if (IsThumb1) 8719 return StSize == 4 ? ARM::tSTRi 8720 : StSize == 2 ? ARM::tSTRHi 8721 : StSize == 1 ? ARM::tSTRBi : 0; 8722 if (IsThumb2) 8723 return StSize == 4 ? ARM::t2STR_POST 8724 : StSize == 2 ? ARM::t2STRH_POST 8725 : StSize == 1 ? ARM::t2STRB_POST : 0; 8726 return StSize == 4 ? ARM::STR_POST_IMM 8727 : StSize == 2 ? ARM::STRH_POST 8728 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 8729 } 8730 8731 /// Emit a post-increment load operation with given size. The instructions 8732 /// will be added to BB at Pos. 8733 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 8734 const TargetInstrInfo *TII, const DebugLoc &dl, 8735 unsigned LdSize, unsigned Data, unsigned AddrIn, 8736 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 8737 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 8738 assert(LdOpc != 0 && "Should have a load opcode"); 8739 if (LdSize >= 8) { 8740 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8741 .addReg(AddrOut, RegState::Define) 8742 .addReg(AddrIn) 8743 .addImm(0) 8744 .add(predOps(ARMCC::AL)); 8745 } else if (IsThumb1) { 8746 // load + update AddrIn 8747 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8748 .addReg(AddrIn) 8749 .addImm(0) 8750 .add(predOps(ARMCC::AL)); 8751 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 8752 .add(t1CondCodeOp()) 8753 .addReg(AddrIn) 8754 .addImm(LdSize) 8755 .add(predOps(ARMCC::AL)); 8756 } else if (IsThumb2) { 8757 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8758 .addReg(AddrOut, RegState::Define) 8759 .addReg(AddrIn) 8760 .addImm(LdSize) 8761 .add(predOps(ARMCC::AL)); 8762 } else { // arm 8763 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8764 .addReg(AddrOut, RegState::Define) 8765 .addReg(AddrIn) 8766 .addReg(0) 8767 .addImm(LdSize) 8768 .add(predOps(ARMCC::AL)); 8769 } 8770 } 8771 8772 /// Emit a post-increment store operation with given size. The instructions 8773 /// will be added to BB at Pos. 8774 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 8775 const TargetInstrInfo *TII, const DebugLoc &dl, 8776 unsigned StSize, unsigned Data, unsigned AddrIn, 8777 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 8778 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 8779 assert(StOpc != 0 && "Should have a store opcode"); 8780 if (StSize >= 8) { 8781 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8782 .addReg(AddrIn) 8783 .addImm(0) 8784 .addReg(Data) 8785 .add(predOps(ARMCC::AL)); 8786 } else if (IsThumb1) { 8787 // store + update AddrIn 8788 BuildMI(*BB, Pos, dl, TII->get(StOpc)) 8789 .addReg(Data) 8790 .addReg(AddrIn) 8791 .addImm(0) 8792 .add(predOps(ARMCC::AL)); 8793 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut) 8794 .add(t1CondCodeOp()) 8795 .addReg(AddrIn) 8796 .addImm(StSize) 8797 .add(predOps(ARMCC::AL)); 8798 } else if (IsThumb2) { 8799 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8800 .addReg(Data) 8801 .addReg(AddrIn) 8802 .addImm(StSize) 8803 .add(predOps(ARMCC::AL)); 8804 } else { // arm 8805 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8806 .addReg(Data) 8807 .addReg(AddrIn) 8808 .addReg(0) 8809 .addImm(StSize) 8810 .add(predOps(ARMCC::AL)); 8811 } 8812 } 8813 8814 MachineBasicBlock * 8815 ARMTargetLowering::EmitStructByval(MachineInstr &MI, 8816 MachineBasicBlock *BB) const { 8817 // This pseudo instruction has 3 operands: dst, src, size 8818 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 8819 // Otherwise, we will generate unrolled scalar copies. 8820 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8821 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8822 MachineFunction::iterator It = ++BB->getIterator(); 8823 8824 unsigned dest = MI.getOperand(0).getReg(); 8825 unsigned src = MI.getOperand(1).getReg(); 8826 unsigned SizeVal = MI.getOperand(2).getImm(); 8827 unsigned Align = MI.getOperand(3).getImm(); 8828 DebugLoc dl = MI.getDebugLoc(); 8829 8830 MachineFunction *MF = BB->getParent(); 8831 MachineRegisterInfo &MRI = MF->getRegInfo(); 8832 unsigned UnitSize = 0; 8833 const TargetRegisterClass *TRC = nullptr; 8834 const TargetRegisterClass *VecTRC = nullptr; 8835 8836 bool IsThumb1 = Subtarget->isThumb1Only(); 8837 bool IsThumb2 = Subtarget->isThumb2(); 8838 bool IsThumb = Subtarget->isThumb(); 8839 8840 if (Align & 1) { 8841 UnitSize = 1; 8842 } else if (Align & 2) { 8843 UnitSize = 2; 8844 } else { 8845 // Check whether we can use NEON instructions. 8846 if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && 8847 Subtarget->hasNEON()) { 8848 if ((Align % 16 == 0) && SizeVal >= 16) 8849 UnitSize = 16; 8850 else if ((Align % 8 == 0) && SizeVal >= 8) 8851 UnitSize = 8; 8852 } 8853 // Can't use NEON instructions. 8854 if (UnitSize == 0) 8855 UnitSize = 4; 8856 } 8857 8858 // Select the correct opcode and register class for unit size load/store 8859 bool IsNeon = UnitSize >= 8; 8860 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 8861 if (IsNeon) 8862 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 8863 : UnitSize == 8 ? &ARM::DPRRegClass 8864 : nullptr; 8865 8866 unsigned BytesLeft = SizeVal % UnitSize; 8867 unsigned LoopSize = SizeVal - BytesLeft; 8868 8869 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 8870 // Use LDR and STR to copy. 8871 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 8872 // [destOut] = STR_POST(scratch, destIn, UnitSize) 8873 unsigned srcIn = src; 8874 unsigned destIn = dest; 8875 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 8876 unsigned srcOut = MRI.createVirtualRegister(TRC); 8877 unsigned destOut = MRI.createVirtualRegister(TRC); 8878 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 8879 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 8880 IsThumb1, IsThumb2); 8881 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 8882 IsThumb1, IsThumb2); 8883 srcIn = srcOut; 8884 destIn = destOut; 8885 } 8886 8887 // Handle the leftover bytes with LDRB and STRB. 8888 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 8889 // [destOut] = STRB_POST(scratch, destIn, 1) 8890 for (unsigned i = 0; i < BytesLeft; i++) { 8891 unsigned srcOut = MRI.createVirtualRegister(TRC); 8892 unsigned destOut = MRI.createVirtualRegister(TRC); 8893 unsigned scratch = MRI.createVirtualRegister(TRC); 8894 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 8895 IsThumb1, IsThumb2); 8896 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 8897 IsThumb1, IsThumb2); 8898 srcIn = srcOut; 8899 destIn = destOut; 8900 } 8901 MI.eraseFromParent(); // The instruction is gone now. 8902 return BB; 8903 } 8904 8905 // Expand the pseudo op to a loop. 8906 // thisMBB: 8907 // ... 8908 // movw varEnd, # --> with thumb2 8909 // movt varEnd, # 8910 // ldrcp varEnd, idx --> without thumb2 8911 // fallthrough --> loopMBB 8912 // loopMBB: 8913 // PHI varPhi, varEnd, varLoop 8914 // PHI srcPhi, src, srcLoop 8915 // PHI destPhi, dst, destLoop 8916 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 8917 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 8918 // subs varLoop, varPhi, #UnitSize 8919 // bne loopMBB 8920 // fallthrough --> exitMBB 8921 // exitMBB: 8922 // epilogue to handle left-over bytes 8923 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 8924 // [destOut] = STRB_POST(scratch, destLoop, 1) 8925 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 8926 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 8927 MF->insert(It, loopMBB); 8928 MF->insert(It, exitMBB); 8929 8930 // Transfer the remainder of BB and its successor edges to exitMBB. 8931 exitMBB->splice(exitMBB->begin(), BB, 8932 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8933 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8934 8935 // Load an immediate to varEnd. 8936 unsigned varEnd = MRI.createVirtualRegister(TRC); 8937 if (Subtarget->useMovt(*MF)) { 8938 unsigned Vtmp = varEnd; 8939 if ((LoopSize & 0xFFFF0000) != 0) 8940 Vtmp = MRI.createVirtualRegister(TRC); 8941 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp) 8942 .addImm(LoopSize & 0xFFFF) 8943 .add(predOps(ARMCC::AL)); 8944 8945 if ((LoopSize & 0xFFFF0000) != 0) 8946 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd) 8947 .addReg(Vtmp) 8948 .addImm(LoopSize >> 16) 8949 .add(predOps(ARMCC::AL)); 8950 } else { 8951 MachineConstantPool *ConstantPool = MF->getConstantPool(); 8952 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext()); 8953 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 8954 8955 // MachineConstantPool wants an explicit alignment. 8956 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 8957 if (Align == 0) 8958 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 8959 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 8960 8961 if (IsThumb) 8962 BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) 8963 .addReg(varEnd, RegState::Define) 8964 .addConstantPoolIndex(Idx) 8965 .add(predOps(ARMCC::AL)); 8966 else 8967 BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)) 8968 .addReg(varEnd, RegState::Define) 8969 .addConstantPoolIndex(Idx) 8970 .addImm(0) 8971 .add(predOps(ARMCC::AL)); 8972 } 8973 BB->addSuccessor(loopMBB); 8974 8975 // Generate the loop body: 8976 // varPhi = PHI(varLoop, varEnd) 8977 // srcPhi = PHI(srcLoop, src) 8978 // destPhi = PHI(destLoop, dst) 8979 MachineBasicBlock *entryBB = BB; 8980 BB = loopMBB; 8981 unsigned varLoop = MRI.createVirtualRegister(TRC); 8982 unsigned varPhi = MRI.createVirtualRegister(TRC); 8983 unsigned srcLoop = MRI.createVirtualRegister(TRC); 8984 unsigned srcPhi = MRI.createVirtualRegister(TRC); 8985 unsigned destLoop = MRI.createVirtualRegister(TRC); 8986 unsigned destPhi = MRI.createVirtualRegister(TRC); 8987 8988 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 8989 .addReg(varLoop).addMBB(loopMBB) 8990 .addReg(varEnd).addMBB(entryBB); 8991 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 8992 .addReg(srcLoop).addMBB(loopMBB) 8993 .addReg(src).addMBB(entryBB); 8994 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 8995 .addReg(destLoop).addMBB(loopMBB) 8996 .addReg(dest).addMBB(entryBB); 8997 8998 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 8999 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 9000 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 9001 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 9002 IsThumb1, IsThumb2); 9003 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 9004 IsThumb1, IsThumb2); 9005 9006 // Decrement loop variable by UnitSize. 9007 if (IsThumb1) { 9008 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop) 9009 .add(t1CondCodeOp()) 9010 .addReg(varPhi) 9011 .addImm(UnitSize) 9012 .add(predOps(ARMCC::AL)); 9013 } else { 9014 MachineInstrBuilder MIB = 9015 BuildMI(*BB, BB->end(), dl, 9016 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 9017 MIB.addReg(varPhi) 9018 .addImm(UnitSize) 9019 .add(predOps(ARMCC::AL)) 9020 .add(condCodeOp()); 9021 MIB->getOperand(5).setReg(ARM::CPSR); 9022 MIB->getOperand(5).setIsDef(true); 9023 } 9024 BuildMI(*BB, BB->end(), dl, 9025 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 9026 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 9027 9028 // loopMBB can loop back to loopMBB or fall through to exitMBB. 9029 BB->addSuccessor(loopMBB); 9030 BB->addSuccessor(exitMBB); 9031 9032 // Add epilogue to handle BytesLeft. 9033 BB = exitMBB; 9034 auto StartOfExit = exitMBB->begin(); 9035 9036 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 9037 // [destOut] = STRB_POST(scratch, destLoop, 1) 9038 unsigned srcIn = srcLoop; 9039 unsigned destIn = destLoop; 9040 for (unsigned i = 0; i < BytesLeft; i++) { 9041 unsigned srcOut = MRI.createVirtualRegister(TRC); 9042 unsigned destOut = MRI.createVirtualRegister(TRC); 9043 unsigned scratch = MRI.createVirtualRegister(TRC); 9044 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 9045 IsThumb1, IsThumb2); 9046 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 9047 IsThumb1, IsThumb2); 9048 srcIn = srcOut; 9049 destIn = destOut; 9050 } 9051 9052 MI.eraseFromParent(); // The instruction is gone now. 9053 return BB; 9054 } 9055 9056 MachineBasicBlock * 9057 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, 9058 MachineBasicBlock *MBB) const { 9059 const TargetMachine &TM = getTargetMachine(); 9060 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 9061 DebugLoc DL = MI.getDebugLoc(); 9062 9063 assert(Subtarget->isTargetWindows() && 9064 "__chkstk is only supported on Windows"); 9065 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 9066 9067 // __chkstk takes the number of words to allocate on the stack in R4, and 9068 // returns the stack adjustment in number of bytes in R4. This will not 9069 // clober any other registers (other than the obvious lr). 9070 // 9071 // Although, technically, IP should be considered a register which may be 9072 // clobbered, the call itself will not touch it. Windows on ARM is a pure 9073 // thumb-2 environment, so there is no interworking required. As a result, we 9074 // do not expect a veneer to be emitted by the linker, clobbering IP. 9075 // 9076 // Each module receives its own copy of __chkstk, so no import thunk is 9077 // required, again, ensuring that IP is not clobbered. 9078 // 9079 // Finally, although some linkers may theoretically provide a trampoline for 9080 // out of range calls (which is quite common due to a 32M range limitation of 9081 // branches for Thumb), we can generate the long-call version via 9082 // -mcmodel=large, alleviating the need for the trampoline which may clobber 9083 // IP. 9084 9085 switch (TM.getCodeModel()) { 9086 case CodeModel::Tiny: 9087 llvm_unreachable("Tiny code model not available on ARM."); 9088 case CodeModel::Small: 9089 case CodeModel::Medium: 9090 case CodeModel::Kernel: 9091 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 9092 .add(predOps(ARMCC::AL)) 9093 .addExternalSymbol("__chkstk") 9094 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 9095 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 9096 .addReg(ARM::R12, 9097 RegState::Implicit | RegState::Define | RegState::Dead) 9098 .addReg(ARM::CPSR, 9099 RegState::Implicit | RegState::Define | RegState::Dead); 9100 break; 9101 case CodeModel::Large: { 9102 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 9103 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 9104 9105 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 9106 .addExternalSymbol("__chkstk"); 9107 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 9108 .add(predOps(ARMCC::AL)) 9109 .addReg(Reg, RegState::Kill) 9110 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 9111 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 9112 .addReg(ARM::R12, 9113 RegState::Implicit | RegState::Define | RegState::Dead) 9114 .addReg(ARM::CPSR, 9115 RegState::Implicit | RegState::Define | RegState::Dead); 9116 break; 9117 } 9118 } 9119 9120 BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP) 9121 .addReg(ARM::SP, RegState::Kill) 9122 .addReg(ARM::R4, RegState::Kill) 9123 .setMIFlags(MachineInstr::FrameSetup) 9124 .add(predOps(ARMCC::AL)) 9125 .add(condCodeOp()); 9126 9127 MI.eraseFromParent(); 9128 return MBB; 9129 } 9130 9131 MachineBasicBlock * 9132 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, 9133 MachineBasicBlock *MBB) const { 9134 DebugLoc DL = MI.getDebugLoc(); 9135 MachineFunction *MF = MBB->getParent(); 9136 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 9137 9138 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 9139 MF->insert(++MBB->getIterator(), ContBB); 9140 ContBB->splice(ContBB->begin(), MBB, 9141 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 9142 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 9143 MBB->addSuccessor(ContBB); 9144 9145 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 9146 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0)); 9147 MF->push_back(TrapBB); 9148 MBB->addSuccessor(TrapBB); 9149 9150 BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8)) 9151 .addReg(MI.getOperand(0).getReg()) 9152 .addImm(0) 9153 .add(predOps(ARMCC::AL)); 9154 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc)) 9155 .addMBB(TrapBB) 9156 .addImm(ARMCC::EQ) 9157 .addReg(ARM::CPSR); 9158 9159 MI.eraseFromParent(); 9160 return ContBB; 9161 } 9162 9163 // The CPSR operand of SelectItr might be missing a kill marker 9164 // because there were multiple uses of CPSR, and ISel didn't know 9165 // which to mark. Figure out whether SelectItr should have had a 9166 // kill marker, and set it if it should. Returns the correct kill 9167 // marker value. 9168 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, 9169 MachineBasicBlock* BB, 9170 const TargetRegisterInfo* TRI) { 9171 // Scan forward through BB for a use/def of CPSR. 9172 MachineBasicBlock::iterator miI(std::next(SelectItr)); 9173 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { 9174 const MachineInstr& mi = *miI; 9175 if (mi.readsRegister(ARM::CPSR)) 9176 return false; 9177 if (mi.definesRegister(ARM::CPSR)) 9178 break; // Should have kill-flag - update below. 9179 } 9180 9181 // If we hit the end of the block, check whether CPSR is live into a 9182 // successor. 9183 if (miI == BB->end()) { 9184 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), 9185 sEnd = BB->succ_end(); 9186 sItr != sEnd; ++sItr) { 9187 MachineBasicBlock* succ = *sItr; 9188 if (succ->isLiveIn(ARM::CPSR)) 9189 return false; 9190 } 9191 } 9192 9193 // We found a def, or hit the end of the basic block and CPSR wasn't live 9194 // out. SelectMI should have a kill flag on CPSR. 9195 SelectItr->addRegisterKilled(ARM::CPSR, TRI); 9196 return true; 9197 } 9198 9199 MachineBasicBlock * 9200 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9201 MachineBasicBlock *BB) const { 9202 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 9203 DebugLoc dl = MI.getDebugLoc(); 9204 bool isThumb2 = Subtarget->isThumb2(); 9205 switch (MI.getOpcode()) { 9206 default: { 9207 MI.print(errs()); 9208 llvm_unreachable("Unexpected instr type to insert"); 9209 } 9210 9211 // Thumb1 post-indexed loads are really just single-register LDMs. 9212 case ARM::tLDR_postidx: { 9213 MachineOperand Def(MI.getOperand(1)); 9214 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) 9215 .add(Def) // Rn_wb 9216 .add(MI.getOperand(2)) // Rn 9217 .add(MI.getOperand(3)) // PredImm 9218 .add(MI.getOperand(4)) // PredReg 9219 .add(MI.getOperand(0)); // Rt 9220 MI.eraseFromParent(); 9221 return BB; 9222 } 9223 9224 // The Thumb2 pre-indexed stores have the same MI operands, they just 9225 // define them differently in the .td files from the isel patterns, so 9226 // they need pseudos. 9227 case ARM::t2STR_preidx: 9228 MI.setDesc(TII->get(ARM::t2STR_PRE)); 9229 return BB; 9230 case ARM::t2STRB_preidx: 9231 MI.setDesc(TII->get(ARM::t2STRB_PRE)); 9232 return BB; 9233 case ARM::t2STRH_preidx: 9234 MI.setDesc(TII->get(ARM::t2STRH_PRE)); 9235 return BB; 9236 9237 case ARM::STRi_preidx: 9238 case ARM::STRBi_preidx: { 9239 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM 9240 : ARM::STRB_PRE_IMM; 9241 // Decode the offset. 9242 unsigned Offset = MI.getOperand(4).getImm(); 9243 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 9244 Offset = ARM_AM::getAM2Offset(Offset); 9245 if (isSub) 9246 Offset = -Offset; 9247 9248 MachineMemOperand *MMO = *MI.memoperands_begin(); 9249 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 9250 .add(MI.getOperand(0)) // Rn_wb 9251 .add(MI.getOperand(1)) // Rt 9252 .add(MI.getOperand(2)) // Rn 9253 .addImm(Offset) // offset (skip GPR==zero_reg) 9254 .add(MI.getOperand(5)) // pred 9255 .add(MI.getOperand(6)) 9256 .addMemOperand(MMO); 9257 MI.eraseFromParent(); 9258 return BB; 9259 } 9260 case ARM::STRr_preidx: 9261 case ARM::STRBr_preidx: 9262 case ARM::STRH_preidx: { 9263 unsigned NewOpc; 9264 switch (MI.getOpcode()) { 9265 default: llvm_unreachable("unexpected opcode!"); 9266 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 9267 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 9268 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 9269 } 9270 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 9271 for (unsigned i = 0; i < MI.getNumOperands(); ++i) 9272 MIB.add(MI.getOperand(i)); 9273 MI.eraseFromParent(); 9274 return BB; 9275 } 9276 9277 case ARM::tMOVCCr_pseudo: { 9278 // To "insert" a SELECT_CC instruction, we actually have to insert the 9279 // diamond control-flow pattern. The incoming instruction knows the 9280 // destination vreg to set, the condition code register to branch on, the 9281 // true/false values to select between, and a branch opcode to use. 9282 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9283 MachineFunction::iterator It = ++BB->getIterator(); 9284 9285 // thisMBB: 9286 // ... 9287 // TrueVal = ... 9288 // cmpTY ccX, r1, r2 9289 // bCC copy1MBB 9290 // fallthrough --> copy0MBB 9291 MachineBasicBlock *thisMBB = BB; 9292 MachineFunction *F = BB->getParent(); 9293 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 9294 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 9295 F->insert(It, copy0MBB); 9296 F->insert(It, sinkMBB); 9297 9298 // Check whether CPSR is live past the tMOVCCr_pseudo. 9299 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); 9300 if (!MI.killsRegister(ARM::CPSR) && 9301 !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) { 9302 copy0MBB->addLiveIn(ARM::CPSR); 9303 sinkMBB->addLiveIn(ARM::CPSR); 9304 } 9305 9306 // Transfer the remainder of BB and its successor edges to sinkMBB. 9307 sinkMBB->splice(sinkMBB->begin(), BB, 9308 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9309 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 9310 9311 BB->addSuccessor(copy0MBB); 9312 BB->addSuccessor(sinkMBB); 9313 9314 BuildMI(BB, dl, TII->get(ARM::tBcc)) 9315 .addMBB(sinkMBB) 9316 .addImm(MI.getOperand(3).getImm()) 9317 .addReg(MI.getOperand(4).getReg()); 9318 9319 // copy0MBB: 9320 // %FalseValue = ... 9321 // # fallthrough to sinkMBB 9322 BB = copy0MBB; 9323 9324 // Update machine-CFG edges 9325 BB->addSuccessor(sinkMBB); 9326 9327 // sinkMBB: 9328 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 9329 // ... 9330 BB = sinkMBB; 9331 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) 9332 .addReg(MI.getOperand(1).getReg()) 9333 .addMBB(copy0MBB) 9334 .addReg(MI.getOperand(2).getReg()) 9335 .addMBB(thisMBB); 9336 9337 MI.eraseFromParent(); // The pseudo instruction is gone now. 9338 return BB; 9339 } 9340 9341 case ARM::BCCi64: 9342 case ARM::BCCZi64: { 9343 // If there is an unconditional branch to the other successor, remove it. 9344 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9345 9346 // Compare both parts that make up the double comparison separately for 9347 // equality. 9348 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; 9349 9350 unsigned LHS1 = MI.getOperand(1).getReg(); 9351 unsigned LHS2 = MI.getOperand(2).getReg(); 9352 if (RHSisZero) { 9353 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 9354 .addReg(LHS1) 9355 .addImm(0) 9356 .add(predOps(ARMCC::AL)); 9357 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 9358 .addReg(LHS2).addImm(0) 9359 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 9360 } else { 9361 unsigned RHS1 = MI.getOperand(3).getReg(); 9362 unsigned RHS2 = MI.getOperand(4).getReg(); 9363 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 9364 .addReg(LHS1) 9365 .addReg(RHS1) 9366 .add(predOps(ARMCC::AL)); 9367 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 9368 .addReg(LHS2).addReg(RHS2) 9369 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 9370 } 9371 9372 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); 9373 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 9374 if (MI.getOperand(0).getImm() == ARMCC::NE) 9375 std::swap(destMBB, exitMBB); 9376 9377 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 9378 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 9379 if (isThumb2) 9380 BuildMI(BB, dl, TII->get(ARM::t2B)) 9381 .addMBB(exitMBB) 9382 .add(predOps(ARMCC::AL)); 9383 else 9384 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 9385 9386 MI.eraseFromParent(); // The pseudo instruction is gone now. 9387 return BB; 9388 } 9389 9390 case ARM::Int_eh_sjlj_setjmp: 9391 case ARM::Int_eh_sjlj_setjmp_nofp: 9392 case ARM::tInt_eh_sjlj_setjmp: 9393 case ARM::t2Int_eh_sjlj_setjmp: 9394 case ARM::t2Int_eh_sjlj_setjmp_nofp: 9395 return BB; 9396 9397 case ARM::Int_eh_sjlj_setup_dispatch: 9398 EmitSjLjDispatchBlock(MI, BB); 9399 return BB; 9400 9401 case ARM::ABS: 9402 case ARM::t2ABS: { 9403 // To insert an ABS instruction, we have to insert the 9404 // diamond control-flow pattern. The incoming instruction knows the 9405 // source vreg to test against 0, the destination vreg to set, 9406 // the condition code register to branch on, the 9407 // true/false values to select between, and a branch opcode to use. 9408 // It transforms 9409 // V1 = ABS V0 9410 // into 9411 // V2 = MOVS V0 9412 // BCC (branch to SinkBB if V0 >= 0) 9413 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 9414 // SinkBB: V1 = PHI(V2, V3) 9415 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9416 MachineFunction::iterator BBI = ++BB->getIterator(); 9417 MachineFunction *Fn = BB->getParent(); 9418 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 9419 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 9420 Fn->insert(BBI, RSBBB); 9421 Fn->insert(BBI, SinkBB); 9422 9423 unsigned int ABSSrcReg = MI.getOperand(1).getReg(); 9424 unsigned int ABSDstReg = MI.getOperand(0).getReg(); 9425 bool ABSSrcKIll = MI.getOperand(1).isKill(); 9426 bool isThumb2 = Subtarget->isThumb2(); 9427 MachineRegisterInfo &MRI = Fn->getRegInfo(); 9428 // In Thumb mode S must not be specified if source register is the SP or 9429 // PC and if destination register is the SP, so restrict register class 9430 unsigned NewRsbDstReg = 9431 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 9432 9433 // Transfer the remainder of BB and its successor edges to sinkMBB. 9434 SinkBB->splice(SinkBB->begin(), BB, 9435 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9436 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 9437 9438 BB->addSuccessor(RSBBB); 9439 BB->addSuccessor(SinkBB); 9440 9441 // fall through to SinkMBB 9442 RSBBB->addSuccessor(SinkBB); 9443 9444 // insert a cmp at the end of BB 9445 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 9446 .addReg(ABSSrcReg) 9447 .addImm(0) 9448 .add(predOps(ARMCC::AL)); 9449 9450 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 9451 BuildMI(BB, dl, 9452 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 9453 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 9454 9455 // insert rsbri in RSBBB 9456 // Note: BCC and rsbri will be converted into predicated rsbmi 9457 // by if-conversion pass 9458 BuildMI(*RSBBB, RSBBB->begin(), dl, 9459 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 9460 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 9461 .addImm(0) 9462 .add(predOps(ARMCC::AL)) 9463 .add(condCodeOp()); 9464 9465 // insert PHI in SinkBB, 9466 // reuse ABSDstReg to not change uses of ABS instruction 9467 BuildMI(*SinkBB, SinkBB->begin(), dl, 9468 TII->get(ARM::PHI), ABSDstReg) 9469 .addReg(NewRsbDstReg).addMBB(RSBBB) 9470 .addReg(ABSSrcReg).addMBB(BB); 9471 9472 // remove ABS instruction 9473 MI.eraseFromParent(); 9474 9475 // return last added BB 9476 return SinkBB; 9477 } 9478 case ARM::COPY_STRUCT_BYVAL_I32: 9479 ++NumLoopByVals; 9480 return EmitStructByval(MI, BB); 9481 case ARM::WIN__CHKSTK: 9482 return EmitLowered__chkstk(MI, BB); 9483 case ARM::WIN__DBZCHK: 9484 return EmitLowered__dbzchk(MI, BB); 9485 } 9486 } 9487 9488 /// Attaches vregs to MEMCPY that it will use as scratch registers 9489 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 9490 /// instead of as a custom inserter because we need the use list from the SDNode. 9491 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 9492 MachineInstr &MI, const SDNode *Node) { 9493 bool isThumb1 = Subtarget->isThumb1Only(); 9494 9495 DebugLoc DL = MI.getDebugLoc(); 9496 MachineFunction *MF = MI.getParent()->getParent(); 9497 MachineRegisterInfo &MRI = MF->getRegInfo(); 9498 MachineInstrBuilder MIB(*MF, MI); 9499 9500 // If the new dst/src is unused mark it as dead. 9501 if (!Node->hasAnyUseOfValue(0)) { 9502 MI.getOperand(0).setIsDead(true); 9503 } 9504 if (!Node->hasAnyUseOfValue(1)) { 9505 MI.getOperand(1).setIsDead(true); 9506 } 9507 9508 // The MEMCPY both defines and kills the scratch registers. 9509 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { 9510 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 9511 : &ARM::GPRRegClass); 9512 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 9513 } 9514 } 9515 9516 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 9517 SDNode *Node) const { 9518 if (MI.getOpcode() == ARM::MEMCPY) { 9519 attachMEMCPYScratchRegs(Subtarget, MI, Node); 9520 return; 9521 } 9522 9523 const MCInstrDesc *MCID = &MI.getDesc(); 9524 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 9525 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 9526 // operand is still set to noreg. If needed, set the optional operand's 9527 // register to CPSR, and remove the redundant implicit def. 9528 // 9529 // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR). 9530 9531 // Rename pseudo opcodes. 9532 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); 9533 unsigned ccOutIdx; 9534 if (NewOpc) { 9535 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 9536 MCID = &TII->get(NewOpc); 9537 9538 assert(MCID->getNumOperands() == 9539 MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize() 9540 && "converted opcode should be the same except for cc_out" 9541 " (and, on Thumb1, pred)"); 9542 9543 MI.setDesc(*MCID); 9544 9545 // Add the optional cc_out operand 9546 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 9547 9548 // On Thumb1, move all input operands to the end, then add the predicate 9549 if (Subtarget->isThumb1Only()) { 9550 for (unsigned c = MCID->getNumOperands() - 4; c--;) { 9551 MI.addOperand(MI.getOperand(1)); 9552 MI.RemoveOperand(1); 9553 } 9554 9555 // Restore the ties 9556 for (unsigned i = MI.getNumOperands(); i--;) { 9557 const MachineOperand& op = MI.getOperand(i); 9558 if (op.isReg() && op.isUse()) { 9559 int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO); 9560 if (DefIdx != -1) 9561 MI.tieOperands(DefIdx, i); 9562 } 9563 } 9564 9565 MI.addOperand(MachineOperand::CreateImm(ARMCC::AL)); 9566 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false)); 9567 ccOutIdx = 1; 9568 } else 9569 ccOutIdx = MCID->getNumOperands() - 1; 9570 } else 9571 ccOutIdx = MCID->getNumOperands() - 1; 9572 9573 // Any ARM instruction that sets the 's' bit should specify an optional 9574 // "cc_out" operand in the last operand position. 9575 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 9576 assert(!NewOpc && "Optional cc_out operand required"); 9577 return; 9578 } 9579 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 9580 // since we already have an optional CPSR def. 9581 bool definesCPSR = false; 9582 bool deadCPSR = false; 9583 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; 9584 ++i) { 9585 const MachineOperand &MO = MI.getOperand(i); 9586 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 9587 definesCPSR = true; 9588 if (MO.isDead()) 9589 deadCPSR = true; 9590 MI.RemoveOperand(i); 9591 break; 9592 } 9593 } 9594 if (!definesCPSR) { 9595 assert(!NewOpc && "Optional cc_out operand required"); 9596 return; 9597 } 9598 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 9599 if (deadCPSR) { 9600 assert(!MI.getOperand(ccOutIdx).getReg() && 9601 "expect uninitialized optional cc_out operand"); 9602 // Thumb1 instructions must have the S bit even if the CPSR is dead. 9603 if (!Subtarget->isThumb1Only()) 9604 return; 9605 } 9606 9607 // If this instruction was defined with an optional CPSR def and its dag node 9608 // had a live implicit CPSR def, then activate the optional CPSR def. 9609 MachineOperand &MO = MI.getOperand(ccOutIdx); 9610 MO.setReg(ARM::CPSR); 9611 MO.setIsDef(true); 9612 } 9613 9614 //===----------------------------------------------------------------------===// 9615 // ARM Optimization Hooks 9616 //===----------------------------------------------------------------------===// 9617 9618 // Helper function that checks if N is a null or all ones constant. 9619 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 9620 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 9621 } 9622 9623 // Return true if N is conditionally 0 or all ones. 9624 // Detects these expressions where cc is an i1 value: 9625 // 9626 // (select cc 0, y) [AllOnes=0] 9627 // (select cc y, 0) [AllOnes=0] 9628 // (zext cc) [AllOnes=0] 9629 // (sext cc) [AllOnes=0/1] 9630 // (select cc -1, y) [AllOnes=1] 9631 // (select cc y, -1) [AllOnes=1] 9632 // 9633 // Invert is set when N is the null/all ones constant when CC is false. 9634 // OtherOp is set to the alternative value of N. 9635 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 9636 SDValue &CC, bool &Invert, 9637 SDValue &OtherOp, 9638 SelectionDAG &DAG) { 9639 switch (N->getOpcode()) { 9640 default: return false; 9641 case ISD::SELECT: { 9642 CC = N->getOperand(0); 9643 SDValue N1 = N->getOperand(1); 9644 SDValue N2 = N->getOperand(2); 9645 if (isZeroOrAllOnes(N1, AllOnes)) { 9646 Invert = false; 9647 OtherOp = N2; 9648 return true; 9649 } 9650 if (isZeroOrAllOnes(N2, AllOnes)) { 9651 Invert = true; 9652 OtherOp = N1; 9653 return true; 9654 } 9655 return false; 9656 } 9657 case ISD::ZERO_EXTEND: 9658 // (zext cc) can never be the all ones value. 9659 if (AllOnes) 9660 return false; 9661 LLVM_FALLTHROUGH; 9662 case ISD::SIGN_EXTEND: { 9663 SDLoc dl(N); 9664 EVT VT = N->getValueType(0); 9665 CC = N->getOperand(0); 9666 if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC) 9667 return false; 9668 Invert = !AllOnes; 9669 if (AllOnes) 9670 // When looking for an AllOnes constant, N is an sext, and the 'other' 9671 // value is 0. 9672 OtherOp = DAG.getConstant(0, dl, VT); 9673 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9674 // When looking for a 0 constant, N can be zext or sext. 9675 OtherOp = DAG.getConstant(1, dl, VT); 9676 else 9677 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 9678 VT); 9679 return true; 9680 } 9681 } 9682 } 9683 9684 // Combine a constant select operand into its use: 9685 // 9686 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 9687 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 9688 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 9689 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 9690 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 9691 // 9692 // The transform is rejected if the select doesn't have a constant operand that 9693 // is null, or all ones when AllOnes is set. 9694 // 9695 // Also recognize sext/zext from i1: 9696 // 9697 // (add (zext cc), x) -> (select cc (add x, 1), x) 9698 // (add (sext cc), x) -> (select cc (add x, -1), x) 9699 // 9700 // These transformations eventually create predicated instructions. 9701 // 9702 // @param N The node to transform. 9703 // @param Slct The N operand that is a select. 9704 // @param OtherOp The other N operand (x above). 9705 // @param DCI Context. 9706 // @param AllOnes Require the select constant to be all ones instead of null. 9707 // @returns The new node, or SDValue() on failure. 9708 static 9709 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 9710 TargetLowering::DAGCombinerInfo &DCI, 9711 bool AllOnes = false) { 9712 SelectionDAG &DAG = DCI.DAG; 9713 EVT VT = N->getValueType(0); 9714 SDValue NonConstantVal; 9715 SDValue CCOp; 9716 bool SwapSelectOps; 9717 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 9718 NonConstantVal, DAG)) 9719 return SDValue(); 9720 9721 // Slct is now know to be the desired identity constant when CC is true. 9722 SDValue TrueVal = OtherOp; 9723 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 9724 OtherOp, NonConstantVal); 9725 // Unless SwapSelectOps says CC should be false. 9726 if (SwapSelectOps) 9727 std::swap(TrueVal, FalseVal); 9728 9729 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 9730 CCOp, TrueVal, FalseVal); 9731 } 9732 9733 // Attempt combineSelectAndUse on each operand of a commutative operator N. 9734 static 9735 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 9736 TargetLowering::DAGCombinerInfo &DCI) { 9737 SDValue N0 = N->getOperand(0); 9738 SDValue N1 = N->getOperand(1); 9739 if (N0.getNode()->hasOneUse()) 9740 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 9741 return Result; 9742 if (N1.getNode()->hasOneUse()) 9743 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 9744 return Result; 9745 return SDValue(); 9746 } 9747 9748 static bool IsVUZPShuffleNode(SDNode *N) { 9749 // VUZP shuffle node. 9750 if (N->getOpcode() == ARMISD::VUZP) 9751 return true; 9752 9753 // "VUZP" on i32 is an alias for VTRN. 9754 if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32) 9755 return true; 9756 9757 return false; 9758 } 9759 9760 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, 9761 TargetLowering::DAGCombinerInfo &DCI, 9762 const ARMSubtarget *Subtarget) { 9763 // Look for ADD(VUZP.0, VUZP.1). 9764 if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() || 9765 N0 == N1) 9766 return SDValue(); 9767 9768 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD. 9769 if (!N->getValueType(0).is64BitVector()) 9770 return SDValue(); 9771 9772 // Generate vpadd. 9773 SelectionDAG &DAG = DCI.DAG; 9774 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9775 SDLoc dl(N); 9776 SDNode *Unzip = N0.getNode(); 9777 EVT VT = N->getValueType(0); 9778 9779 SmallVector<SDValue, 8> Ops; 9780 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl, 9781 TLI.getPointerTy(DAG.getDataLayout()))); 9782 Ops.push_back(Unzip->getOperand(0)); 9783 Ops.push_back(Unzip->getOperand(1)); 9784 9785 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 9786 } 9787 9788 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, 9789 TargetLowering::DAGCombinerInfo &DCI, 9790 const ARMSubtarget *Subtarget) { 9791 // Check for two extended operands. 9792 if (!(N0.getOpcode() == ISD::SIGN_EXTEND && 9793 N1.getOpcode() == ISD::SIGN_EXTEND) && 9794 !(N0.getOpcode() == ISD::ZERO_EXTEND && 9795 N1.getOpcode() == ISD::ZERO_EXTEND)) 9796 return SDValue(); 9797 9798 SDValue N00 = N0.getOperand(0); 9799 SDValue N10 = N1.getOperand(0); 9800 9801 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1)) 9802 if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() || 9803 N00 == N10) 9804 return SDValue(); 9805 9806 // We only recognize Q register paddl here; this can't be reached until 9807 // after type legalization. 9808 if (!N00.getValueType().is64BitVector() || 9809 !N0.getValueType().is128BitVector()) 9810 return SDValue(); 9811 9812 // Generate vpaddl. 9813 SelectionDAG &DAG = DCI.DAG; 9814 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9815 SDLoc dl(N); 9816 EVT VT = N->getValueType(0); 9817 9818 SmallVector<SDValue, 8> Ops; 9819 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension. 9820 unsigned Opcode; 9821 if (N0.getOpcode() == ISD::SIGN_EXTEND) 9822 Opcode = Intrinsic::arm_neon_vpaddls; 9823 else 9824 Opcode = Intrinsic::arm_neon_vpaddlu; 9825 Ops.push_back(DAG.getConstant(Opcode, dl, 9826 TLI.getPointerTy(DAG.getDataLayout()))); 9827 EVT ElemTy = N00.getValueType().getVectorElementType(); 9828 unsigned NumElts = VT.getVectorNumElements(); 9829 EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2); 9830 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT, 9831 N00.getOperand(0), N00.getOperand(1)); 9832 Ops.push_back(Concat); 9833 9834 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops); 9835 } 9836 9837 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in 9838 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is 9839 // much easier to match. 9840 static SDValue 9841 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, 9842 TargetLowering::DAGCombinerInfo &DCI, 9843 const ARMSubtarget *Subtarget) { 9844 // Only perform optimization if after legalize, and if NEON is available. We 9845 // also expected both operands to be BUILD_VECTORs. 9846 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 9847 || N0.getOpcode() != ISD::BUILD_VECTOR 9848 || N1.getOpcode() != ISD::BUILD_VECTOR) 9849 return SDValue(); 9850 9851 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 9852 EVT VT = N->getValueType(0); 9853 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 9854 return SDValue(); 9855 9856 // Check that the vector operands are of the right form. 9857 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 9858 // operands, where N is the size of the formed vector. 9859 // Each EXTRACT_VECTOR should have the same input vector and odd or even 9860 // index such that we have a pair wise add pattern. 9861 9862 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 9863 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9864 return SDValue(); 9865 SDValue Vec = N0->getOperand(0)->getOperand(0); 9866 SDNode *V = Vec.getNode(); 9867 unsigned nextIndex = 0; 9868 9869 // For each operands to the ADD which are BUILD_VECTORs, 9870 // check to see if each of their operands are an EXTRACT_VECTOR with 9871 // the same vector and appropriate index. 9872 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 9873 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 9874 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 9875 9876 SDValue ExtVec0 = N0->getOperand(i); 9877 SDValue ExtVec1 = N1->getOperand(i); 9878 9879 // First operand is the vector, verify its the same. 9880 if (V != ExtVec0->getOperand(0).getNode() || 9881 V != ExtVec1->getOperand(0).getNode()) 9882 return SDValue(); 9883 9884 // Second is the constant, verify its correct. 9885 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 9886 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 9887 9888 // For the constant, we want to see all the even or all the odd. 9889 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 9890 || C1->getZExtValue() != nextIndex+1) 9891 return SDValue(); 9892 9893 // Increment index. 9894 nextIndex+=2; 9895 } else 9896 return SDValue(); 9897 } 9898 9899 // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure 9900 // we're using the entire input vector, otherwise there's a size/legality 9901 // mismatch somewhere. 9902 if (nextIndex != Vec.getValueType().getVectorNumElements() || 9903 Vec.getValueType().getVectorElementType() == VT.getVectorElementType()) 9904 return SDValue(); 9905 9906 // Create VPADDL node. 9907 SelectionDAG &DAG = DCI.DAG; 9908 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9909 9910 SDLoc dl(N); 9911 9912 // Build operand list. 9913 SmallVector<SDValue, 8> Ops; 9914 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 9915 TLI.getPointerTy(DAG.getDataLayout()))); 9916 9917 // Input is the vector. 9918 Ops.push_back(Vec); 9919 9920 // Get widened type and narrowed type. 9921 MVT widenType; 9922 unsigned numElem = VT.getVectorNumElements(); 9923 9924 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 9925 switch (inputLaneType.getSimpleVT().SimpleTy) { 9926 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 9927 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 9928 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 9929 default: 9930 llvm_unreachable("Invalid vector element type for padd optimization."); 9931 } 9932 9933 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 9934 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 9935 return DAG.getNode(ExtOp, dl, VT, tmp); 9936 } 9937 9938 static SDValue findMUL_LOHI(SDValue V) { 9939 if (V->getOpcode() == ISD::UMUL_LOHI || 9940 V->getOpcode() == ISD::SMUL_LOHI) 9941 return V; 9942 return SDValue(); 9943 } 9944 9945 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, 9946 TargetLowering::DAGCombinerInfo &DCI, 9947 const ARMSubtarget *Subtarget) { 9948 if (Subtarget->isThumb()) { 9949 if (!Subtarget->hasDSP()) 9950 return SDValue(); 9951 } else if (!Subtarget->hasV5TEOps()) 9952 return SDValue(); 9953 9954 // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and 9955 // accumulates the product into a 64-bit value. The 16-bit values will 9956 // be sign extended somehow or SRA'd into 32-bit values 9957 // (addc (adde (mul 16bit, 16bit), lo), hi) 9958 SDValue Mul = AddcNode->getOperand(0); 9959 SDValue Lo = AddcNode->getOperand(1); 9960 if (Mul.getOpcode() != ISD::MUL) { 9961 Lo = AddcNode->getOperand(0); 9962 Mul = AddcNode->getOperand(1); 9963 if (Mul.getOpcode() != ISD::MUL) 9964 return SDValue(); 9965 } 9966 9967 SDValue SRA = AddeNode->getOperand(0); 9968 SDValue Hi = AddeNode->getOperand(1); 9969 if (SRA.getOpcode() != ISD::SRA) { 9970 SRA = AddeNode->getOperand(1); 9971 Hi = AddeNode->getOperand(0); 9972 if (SRA.getOpcode() != ISD::SRA) 9973 return SDValue(); 9974 } 9975 if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) { 9976 if (Const->getZExtValue() != 31) 9977 return SDValue(); 9978 } else 9979 return SDValue(); 9980 9981 if (SRA.getOperand(0) != Mul) 9982 return SDValue(); 9983 9984 SelectionDAG &DAG = DCI.DAG; 9985 SDLoc dl(AddcNode); 9986 unsigned Opcode = 0; 9987 SDValue Op0; 9988 SDValue Op1; 9989 9990 if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) { 9991 Opcode = ARMISD::SMLALBB; 9992 Op0 = Mul.getOperand(0); 9993 Op1 = Mul.getOperand(1); 9994 } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) { 9995 Opcode = ARMISD::SMLALBT; 9996 Op0 = Mul.getOperand(0); 9997 Op1 = Mul.getOperand(1).getOperand(0); 9998 } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) { 9999 Opcode = ARMISD::SMLALTB; 10000 Op0 = Mul.getOperand(0).getOperand(0); 10001 Op1 = Mul.getOperand(1); 10002 } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) { 10003 Opcode = ARMISD::SMLALTT; 10004 Op0 = Mul->getOperand(0).getOperand(0); 10005 Op1 = Mul->getOperand(1).getOperand(0); 10006 } 10007 10008 if (!Op0 || !Op1) 10009 return SDValue(); 10010 10011 SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 10012 Op0, Op1, Lo, Hi); 10013 // Replace the ADDs' nodes uses by the MLA node's values. 10014 SDValue HiMLALResult(SMLAL.getNode(), 1); 10015 SDValue LoMLALResult(SMLAL.getNode(), 0); 10016 10017 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 10018 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 10019 10020 // Return original node to notify the driver to stop replacing. 10021 SDValue resNode(AddcNode, 0); 10022 return resNode; 10023 } 10024 10025 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, 10026 TargetLowering::DAGCombinerInfo &DCI, 10027 const ARMSubtarget *Subtarget) { 10028 // Look for multiply add opportunities. 10029 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 10030 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 10031 // a glue link from the first add to the second add. 10032 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 10033 // a S/UMLAL instruction. 10034 // UMUL_LOHI 10035 // / :lo \ :hi 10036 // V \ [no multiline comment] 10037 // loAdd -> ADDC | 10038 // \ :carry / 10039 // V V 10040 // ADDE <- hiAdd 10041 // 10042 // In the special case where only the higher part of a signed result is used 10043 // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts 10044 // a constant with the exact value of 0x80000000, we recognize we are dealing 10045 // with a "rounded multiply and add" (or subtract) and transform it into 10046 // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively. 10047 10048 assert((AddeSubeNode->getOpcode() == ARMISD::ADDE || 10049 AddeSubeNode->getOpcode() == ARMISD::SUBE) && 10050 "Expect an ADDE or SUBE"); 10051 10052 assert(AddeSubeNode->getNumOperands() == 3 && 10053 AddeSubeNode->getOperand(2).getValueType() == MVT::i32 && 10054 "ADDE node has the wrong inputs"); 10055 10056 // Check that we are chained to the right ADDC or SUBC node. 10057 SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode(); 10058 if ((AddeSubeNode->getOpcode() == ARMISD::ADDE && 10059 AddcSubcNode->getOpcode() != ARMISD::ADDC) || 10060 (AddeSubeNode->getOpcode() == ARMISD::SUBE && 10061 AddcSubcNode->getOpcode() != ARMISD::SUBC)) 10062 return SDValue(); 10063 10064 SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0); 10065 SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1); 10066 10067 // Check if the two operands are from the same mul_lohi node. 10068 if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode()) 10069 return SDValue(); 10070 10071 assert(AddcSubcNode->getNumValues() == 2 && 10072 AddcSubcNode->getValueType(0) == MVT::i32 && 10073 "Expect ADDC with two result values. First: i32"); 10074 10075 // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it 10076 // maybe a SMLAL which multiplies two 16-bit values. 10077 if (AddeSubeNode->getOpcode() == ARMISD::ADDE && 10078 AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI && 10079 AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI && 10080 AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI && 10081 AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI) 10082 return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget); 10083 10084 // Check for the triangle shape. 10085 SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0); 10086 SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1); 10087 10088 // Make sure that the ADDE/SUBE operands are not coming from the same node. 10089 if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode()) 10090 return SDValue(); 10091 10092 // Find the MUL_LOHI node walking up ADDE/SUBE's operands. 10093 bool IsLeftOperandMUL = false; 10094 SDValue MULOp = findMUL_LOHI(AddeSubeOp0); 10095 if (MULOp == SDValue()) 10096 MULOp = findMUL_LOHI(AddeSubeOp1); 10097 else 10098 IsLeftOperandMUL = true; 10099 if (MULOp == SDValue()) 10100 return SDValue(); 10101 10102 // Figure out the right opcode. 10103 unsigned Opc = MULOp->getOpcode(); 10104 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 10105 10106 // Figure out the high and low input values to the MLAL node. 10107 SDValue *HiAddSub = nullptr; 10108 SDValue *LoMul = nullptr; 10109 SDValue *LowAddSub = nullptr; 10110 10111 // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI. 10112 if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1))) 10113 return SDValue(); 10114 10115 if (IsLeftOperandMUL) 10116 HiAddSub = &AddeSubeOp1; 10117 else 10118 HiAddSub = &AddeSubeOp0; 10119 10120 // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node 10121 // whose low result is fed to the ADDC/SUBC we are checking. 10122 10123 if (AddcSubcOp0 == MULOp.getValue(0)) { 10124 LoMul = &AddcSubcOp0; 10125 LowAddSub = &AddcSubcOp1; 10126 } 10127 if (AddcSubcOp1 == MULOp.getValue(0)) { 10128 LoMul = &AddcSubcOp1; 10129 LowAddSub = &AddcSubcOp0; 10130 } 10131 10132 if (!LoMul) 10133 return SDValue(); 10134 10135 // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC 10136 // the replacement below will create a cycle. 10137 if (AddcSubcNode == HiAddSub->getNode() || 10138 AddcSubcNode->isPredecessorOf(HiAddSub->getNode())) 10139 return SDValue(); 10140 10141 // Create the merged node. 10142 SelectionDAG &DAG = DCI.DAG; 10143 10144 // Start building operand list. 10145 SmallVector<SDValue, 8> Ops; 10146 Ops.push_back(LoMul->getOperand(0)); 10147 Ops.push_back(LoMul->getOperand(1)); 10148 10149 // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be 10150 // the case, we must be doing signed multiplication and only use the higher 10151 // part of the result of the MLAL, furthermore the LowAddSub must be a constant 10152 // addition or subtraction with the value of 0x800000. 10153 if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() && 10154 FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) && 10155 LowAddSub->getNode()->getOpcode() == ISD::Constant && 10156 static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() == 10157 0x80000000) { 10158 Ops.push_back(*HiAddSub); 10159 if (AddcSubcNode->getOpcode() == ARMISD::SUBC) { 10160 FinalOpc = ARMISD::SMMLSR; 10161 } else { 10162 FinalOpc = ARMISD::SMMLAR; 10163 } 10164 SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops); 10165 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode); 10166 10167 return SDValue(AddeSubeNode, 0); 10168 } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC) 10169 // SMMLS is generated during instruction selection and the rest of this 10170 // function can not handle the case where AddcSubcNode is a SUBC. 10171 return SDValue(); 10172 10173 // Finish building the operand list for {U/S}MLAL 10174 Ops.push_back(*LowAddSub); 10175 Ops.push_back(*HiAddSub); 10176 10177 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), 10178 DAG.getVTList(MVT::i32, MVT::i32), Ops); 10179 10180 // Replace the ADDs' nodes uses by the MLA node's values. 10181 SDValue HiMLALResult(MLALNode.getNode(), 1); 10182 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult); 10183 10184 SDValue LoMLALResult(MLALNode.getNode(), 0); 10185 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult); 10186 10187 // Return original node to notify the driver to stop replacing. 10188 return SDValue(AddeSubeNode, 0); 10189 } 10190 10191 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, 10192 TargetLowering::DAGCombinerInfo &DCI, 10193 const ARMSubtarget *Subtarget) { 10194 // UMAAL is similar to UMLAL except that it adds two unsigned values. 10195 // While trying to combine for the other MLAL nodes, first search for the 10196 // chance to use UMAAL. Check if Addc uses a node which has already 10197 // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde 10198 // as the addend, and it's handled in PerformUMLALCombine. 10199 10200 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 10201 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 10202 10203 // Check that we have a glued ADDC node. 10204 SDNode* AddcNode = AddeNode->getOperand(2).getNode(); 10205 if (AddcNode->getOpcode() != ARMISD::ADDC) 10206 return SDValue(); 10207 10208 // Find the converted UMAAL or quit if it doesn't exist. 10209 SDNode *UmlalNode = nullptr; 10210 SDValue AddHi; 10211 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { 10212 UmlalNode = AddcNode->getOperand(0).getNode(); 10213 AddHi = AddcNode->getOperand(1); 10214 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { 10215 UmlalNode = AddcNode->getOperand(1).getNode(); 10216 AddHi = AddcNode->getOperand(0); 10217 } else { 10218 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget); 10219 } 10220 10221 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as 10222 // the ADDC as well as Zero. 10223 if (!isNullConstant(UmlalNode->getOperand(3))) 10224 return SDValue(); 10225 10226 if ((isNullConstant(AddeNode->getOperand(0)) && 10227 AddeNode->getOperand(1).getNode() == UmlalNode) || 10228 (AddeNode->getOperand(0).getNode() == UmlalNode && 10229 isNullConstant(AddeNode->getOperand(1)))) { 10230 SelectionDAG &DAG = DCI.DAG; 10231 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), 10232 UmlalNode->getOperand(2), AddHi }; 10233 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), 10234 DAG.getVTList(MVT::i32, MVT::i32), Ops); 10235 10236 // Replace the ADDs' nodes uses by the UMAAL node's values. 10237 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); 10238 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); 10239 10240 // Return original node to notify the driver to stop replacing. 10241 return SDValue(AddeNode, 0); 10242 } 10243 return SDValue(); 10244 } 10245 10246 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, 10247 const ARMSubtarget *Subtarget) { 10248 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP()) 10249 return SDValue(); 10250 10251 // Check that we have a pair of ADDC and ADDE as operands. 10252 // Both addends of the ADDE must be zero. 10253 SDNode* AddcNode = N->getOperand(2).getNode(); 10254 SDNode* AddeNode = N->getOperand(3).getNode(); 10255 if ((AddcNode->getOpcode() == ARMISD::ADDC) && 10256 (AddeNode->getOpcode() == ARMISD::ADDE) && 10257 isNullConstant(AddeNode->getOperand(0)) && 10258 isNullConstant(AddeNode->getOperand(1)) && 10259 (AddeNode->getOperand(2).getNode() == AddcNode)) 10260 return DAG.getNode(ARMISD::UMAAL, SDLoc(N), 10261 DAG.getVTList(MVT::i32, MVT::i32), 10262 {N->getOperand(0), N->getOperand(1), 10263 AddcNode->getOperand(0), AddcNode->getOperand(1)}); 10264 else 10265 return SDValue(); 10266 } 10267 10268 static SDValue PerformAddcSubcCombine(SDNode *N, 10269 TargetLowering::DAGCombinerInfo &DCI, 10270 const ARMSubtarget *Subtarget) { 10271 SelectionDAG &DAG(DCI.DAG); 10272 10273 if (N->getOpcode() == ARMISD::SUBC) { 10274 // (SUBC (ADDE 0, 0, C), 1) -> C 10275 SDValue LHS = N->getOperand(0); 10276 SDValue RHS = N->getOperand(1); 10277 if (LHS->getOpcode() == ARMISD::ADDE && 10278 isNullConstant(LHS->getOperand(0)) && 10279 isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) { 10280 return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2)); 10281 } 10282 } 10283 10284 if (Subtarget->isThumb1Only()) { 10285 SDValue RHS = N->getOperand(1); 10286 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 10287 int32_t imm = C->getSExtValue(); 10288 if (imm < 0 && imm > std::numeric_limits<int>::min()) { 10289 SDLoc DL(N); 10290 RHS = DAG.getConstant(-imm, DL, MVT::i32); 10291 unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC 10292 : ARMISD::ADDC; 10293 return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS); 10294 } 10295 } 10296 } 10297 10298 return SDValue(); 10299 } 10300 10301 static SDValue PerformAddeSubeCombine(SDNode *N, 10302 TargetLowering::DAGCombinerInfo &DCI, 10303 const ARMSubtarget *Subtarget) { 10304 if (Subtarget->isThumb1Only()) { 10305 SelectionDAG &DAG = DCI.DAG; 10306 SDValue RHS = N->getOperand(1); 10307 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 10308 int64_t imm = C->getSExtValue(); 10309 if (imm < 0) { 10310 SDLoc DL(N); 10311 10312 // The with-carry-in form matches bitwise not instead of the negation. 10313 // Effectively, the inverse interpretation of the carry flag already 10314 // accounts for part of the negation. 10315 RHS = DAG.getConstant(~imm, DL, MVT::i32); 10316 10317 unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE 10318 : ARMISD::ADDE; 10319 return DAG.getNode(Opcode, DL, N->getVTList(), 10320 N->getOperand(0), RHS, N->getOperand(2)); 10321 } 10322 } 10323 } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) { 10324 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 10325 } 10326 return SDValue(); 10327 } 10328 10329 /// PerformADDECombine - Target-specific dag combine transform from 10330 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or 10331 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL 10332 static SDValue PerformADDECombine(SDNode *N, 10333 TargetLowering::DAGCombinerInfo &DCI, 10334 const ARMSubtarget *Subtarget) { 10335 // Only ARM and Thumb2 support UMLAL/SMLAL. 10336 if (Subtarget->isThumb1Only()) 10337 return PerformAddeSubeCombine(N, DCI, Subtarget); 10338 10339 // Only perform the checks after legalize when the pattern is available. 10340 if (DCI.isBeforeLegalize()) return SDValue(); 10341 10342 return AddCombineTo64bitUMAAL(N, DCI, Subtarget); 10343 } 10344 10345 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 10346 /// operands N0 and N1. This is a helper for PerformADDCombine that is 10347 /// called with the default operands, and if that fails, with commuted 10348 /// operands. 10349 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 10350 TargetLowering::DAGCombinerInfo &DCI, 10351 const ARMSubtarget *Subtarget){ 10352 // Attempt to create vpadd for this add. 10353 if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget)) 10354 return Result; 10355 10356 // Attempt to create vpaddl for this add. 10357 if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget)) 10358 return Result; 10359 if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI, 10360 Subtarget)) 10361 return Result; 10362 10363 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 10364 if (N0.getNode()->hasOneUse()) 10365 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 10366 return Result; 10367 return SDValue(); 10368 } 10369 10370 bool 10371 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, 10372 CombineLevel Level) const { 10373 if (Level == BeforeLegalizeTypes) 10374 return true; 10375 10376 if (Subtarget->isThumb() && Subtarget->isThumb1Only()) 10377 return true; 10378 10379 if (N->getOpcode() != ISD::SHL) 10380 return true; 10381 10382 // Turn off commute-with-shift transform after legalization, so it doesn't 10383 // conflict with PerformSHLSimplify. (We could try to detect when 10384 // PerformSHLSimplify would trigger more precisely, but it isn't 10385 // really necessary.) 10386 return false; 10387 } 10388 10389 static SDValue PerformSHLSimplify(SDNode *N, 10390 TargetLowering::DAGCombinerInfo &DCI, 10391 const ARMSubtarget *ST) { 10392 // Allow the generic combiner to identify potential bswaps. 10393 if (DCI.isBeforeLegalize()) 10394 return SDValue(); 10395 10396 // DAG combiner will fold: 10397 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 10398 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2 10399 // Other code patterns that can be also be modified have the following form: 10400 // b + ((a << 1) | 510) 10401 // b + ((a << 1) & 510) 10402 // b + ((a << 1) ^ 510) 10403 // b + ((a << 1) + 510) 10404 10405 // Many instructions can perform the shift for free, but it requires both 10406 // the operands to be registers. If c1 << c2 is too large, a mov immediate 10407 // instruction will needed. So, unfold back to the original pattern if: 10408 // - if c1 and c2 are small enough that they don't require mov imms. 10409 // - the user(s) of the node can perform an shl 10410 10411 // No shifted operands for 16-bit instructions. 10412 if (ST->isThumb() && ST->isThumb1Only()) 10413 return SDValue(); 10414 10415 // Check that all the users could perform the shl themselves. 10416 for (auto U : N->uses()) { 10417 switch(U->getOpcode()) { 10418 default: 10419 return SDValue(); 10420 case ISD::SUB: 10421 case ISD::ADD: 10422 case ISD::AND: 10423 case ISD::OR: 10424 case ISD::XOR: 10425 case ISD::SETCC: 10426 case ARMISD::CMP: 10427 // Check that the user isn't already using a constant because there 10428 // aren't any instructions that support an immediate operand and a 10429 // shifted operand. 10430 if (isa<ConstantSDNode>(U->getOperand(0)) || 10431 isa<ConstantSDNode>(U->getOperand(1))) 10432 return SDValue(); 10433 10434 // Check that it's not already using a shift. 10435 if (U->getOperand(0).getOpcode() == ISD::SHL || 10436 U->getOperand(1).getOpcode() == ISD::SHL) 10437 return SDValue(); 10438 break; 10439 } 10440 } 10441 10442 if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR && 10443 N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND) 10444 return SDValue(); 10445 10446 if (N->getOperand(0).getOpcode() != ISD::SHL) 10447 return SDValue(); 10448 10449 SDValue SHL = N->getOperand(0); 10450 10451 auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10452 auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1)); 10453 if (!C1ShlC2 || !C2) 10454 return SDValue(); 10455 10456 APInt C2Int = C2->getAPIntValue(); 10457 APInt C1Int = C1ShlC2->getAPIntValue(); 10458 10459 // Check that performing a lshr will not lose any information. 10460 APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(), 10461 C2Int.getBitWidth() - C2->getZExtValue()); 10462 if ((C1Int & Mask) != C1Int) 10463 return SDValue(); 10464 10465 // Shift the first constant. 10466 C1Int.lshrInPlace(C2Int); 10467 10468 // The immediates are encoded as an 8-bit value that can be rotated. 10469 auto LargeImm = [](const APInt &Imm) { 10470 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros(); 10471 return Imm.getBitWidth() - Zeros > 8; 10472 }; 10473 10474 if (LargeImm(C1Int) || LargeImm(C2Int)) 10475 return SDValue(); 10476 10477 SelectionDAG &DAG = DCI.DAG; 10478 SDLoc dl(N); 10479 SDValue X = SHL.getOperand(0); 10480 SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X, 10481 DAG.getConstant(C1Int, dl, MVT::i32)); 10482 // Shift left to compensate for the lshr of C1Int. 10483 SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1)); 10484 10485 LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump(); 10486 SHL.dump(); N->dump()); 10487 LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump()); 10488 return Res; 10489 } 10490 10491 10492 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 10493 /// 10494 static SDValue PerformADDCombine(SDNode *N, 10495 TargetLowering::DAGCombinerInfo &DCI, 10496 const ARMSubtarget *Subtarget) { 10497 SDValue N0 = N->getOperand(0); 10498 SDValue N1 = N->getOperand(1); 10499 10500 // Only works one way, because it needs an immediate operand. 10501 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 10502 return Result; 10503 10504 // First try with the default operand order. 10505 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 10506 return Result; 10507 10508 // If that didn't work, try again with the operands commuted. 10509 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 10510 } 10511 10512 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 10513 /// 10514 static SDValue PerformSUBCombine(SDNode *N, 10515 TargetLowering::DAGCombinerInfo &DCI) { 10516 SDValue N0 = N->getOperand(0); 10517 SDValue N1 = N->getOperand(1); 10518 10519 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 10520 if (N1.getNode()->hasOneUse()) 10521 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 10522 return Result; 10523 10524 return SDValue(); 10525 } 10526 10527 /// PerformVMULCombine 10528 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 10529 /// special multiplier accumulator forwarding. 10530 /// vmul d3, d0, d2 10531 /// vmla d3, d1, d2 10532 /// is faster than 10533 /// vadd d3, d0, d1 10534 /// vmul d3, d3, d2 10535 // However, for (A + B) * (A + B), 10536 // vadd d2, d0, d1 10537 // vmul d3, d0, d2 10538 // vmla d3, d1, d2 10539 // is slower than 10540 // vadd d2, d0, d1 10541 // vmul d3, d2, d2 10542 static SDValue PerformVMULCombine(SDNode *N, 10543 TargetLowering::DAGCombinerInfo &DCI, 10544 const ARMSubtarget *Subtarget) { 10545 if (!Subtarget->hasVMLxForwarding()) 10546 return SDValue(); 10547 10548 SelectionDAG &DAG = DCI.DAG; 10549 SDValue N0 = N->getOperand(0); 10550 SDValue N1 = N->getOperand(1); 10551 unsigned Opcode = N0.getOpcode(); 10552 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 10553 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 10554 Opcode = N1.getOpcode(); 10555 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 10556 Opcode != ISD::FADD && Opcode != ISD::FSUB) 10557 return SDValue(); 10558 std::swap(N0, N1); 10559 } 10560 10561 if (N0 == N1) 10562 return SDValue(); 10563 10564 EVT VT = N->getValueType(0); 10565 SDLoc DL(N); 10566 SDValue N00 = N0->getOperand(0); 10567 SDValue N01 = N0->getOperand(1); 10568 return DAG.getNode(Opcode, DL, VT, 10569 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 10570 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 10571 } 10572 10573 static SDValue PerformMULCombine(SDNode *N, 10574 TargetLowering::DAGCombinerInfo &DCI, 10575 const ARMSubtarget *Subtarget) { 10576 SelectionDAG &DAG = DCI.DAG; 10577 10578 if (Subtarget->isThumb1Only()) 10579 return SDValue(); 10580 10581 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10582 return SDValue(); 10583 10584 EVT VT = N->getValueType(0); 10585 if (VT.is64BitVector() || VT.is128BitVector()) 10586 return PerformVMULCombine(N, DCI, Subtarget); 10587 if (VT != MVT::i32) 10588 return SDValue(); 10589 10590 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10591 if (!C) 10592 return SDValue(); 10593 10594 int64_t MulAmt = C->getSExtValue(); 10595 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 10596 10597 ShiftAmt = ShiftAmt & (32 - 1); 10598 SDValue V = N->getOperand(0); 10599 SDLoc DL(N); 10600 10601 SDValue Res; 10602 MulAmt >>= ShiftAmt; 10603 10604 if (MulAmt >= 0) { 10605 if (isPowerOf2_32(MulAmt - 1)) { 10606 // (mul x, 2^N + 1) => (add (shl x, N), x) 10607 Res = DAG.getNode(ISD::ADD, DL, VT, 10608 V, 10609 DAG.getNode(ISD::SHL, DL, VT, 10610 V, 10611 DAG.getConstant(Log2_32(MulAmt - 1), DL, 10612 MVT::i32))); 10613 } else if (isPowerOf2_32(MulAmt + 1)) { 10614 // (mul x, 2^N - 1) => (sub (shl x, N), x) 10615 Res = DAG.getNode(ISD::SUB, DL, VT, 10616 DAG.getNode(ISD::SHL, DL, VT, 10617 V, 10618 DAG.getConstant(Log2_32(MulAmt + 1), DL, 10619 MVT::i32)), 10620 V); 10621 } else 10622 return SDValue(); 10623 } else { 10624 uint64_t MulAmtAbs = -MulAmt; 10625 if (isPowerOf2_32(MulAmtAbs + 1)) { 10626 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 10627 Res = DAG.getNode(ISD::SUB, DL, VT, 10628 V, 10629 DAG.getNode(ISD::SHL, DL, VT, 10630 V, 10631 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 10632 MVT::i32))); 10633 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 10634 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 10635 Res = DAG.getNode(ISD::ADD, DL, VT, 10636 V, 10637 DAG.getNode(ISD::SHL, DL, VT, 10638 V, 10639 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 10640 MVT::i32))); 10641 Res = DAG.getNode(ISD::SUB, DL, VT, 10642 DAG.getConstant(0, DL, MVT::i32), Res); 10643 } else 10644 return SDValue(); 10645 } 10646 10647 if (ShiftAmt != 0) 10648 Res = DAG.getNode(ISD::SHL, DL, VT, 10649 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 10650 10651 // Do not add new nodes to DAG combiner worklist. 10652 DCI.CombineTo(N, Res, false); 10653 return SDValue(); 10654 } 10655 10656 static SDValue CombineANDShift(SDNode *N, 10657 TargetLowering::DAGCombinerInfo &DCI, 10658 const ARMSubtarget *Subtarget) { 10659 // Allow DAGCombine to pattern-match before we touch the canonical form. 10660 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10661 return SDValue(); 10662 10663 if (N->getValueType(0) != MVT::i32) 10664 return SDValue(); 10665 10666 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10667 if (!N1C) 10668 return SDValue(); 10669 10670 uint32_t C1 = (uint32_t)N1C->getZExtValue(); 10671 // Don't transform uxtb/uxth. 10672 if (C1 == 255 || C1 == 65535) 10673 return SDValue(); 10674 10675 SDNode *N0 = N->getOperand(0).getNode(); 10676 if (!N0->hasOneUse()) 10677 return SDValue(); 10678 10679 if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL) 10680 return SDValue(); 10681 10682 bool LeftShift = N0->getOpcode() == ISD::SHL; 10683 10684 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 10685 if (!N01C) 10686 return SDValue(); 10687 10688 uint32_t C2 = (uint32_t)N01C->getZExtValue(); 10689 if (!C2 || C2 >= 32) 10690 return SDValue(); 10691 10692 SelectionDAG &DAG = DCI.DAG; 10693 SDLoc DL(N); 10694 10695 // We have a pattern of the form "(and (shl x, c2) c1)" or 10696 // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to 10697 // transform to a pair of shifts, to save materializing c1. 10698 10699 // First pattern: right shift, and c1+1 is a power of two. 10700 // FIXME: Also check reversed pattern (left shift, and ~c1+1 is a power 10701 // of two). 10702 // FIXME: Use demanded bits? 10703 if (!LeftShift && isMask_32(C1)) { 10704 uint32_t C3 = countLeadingZeros(C1); 10705 if (C2 < C3) { 10706 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 10707 DAG.getConstant(C3 - C2, DL, MVT::i32)); 10708 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 10709 DAG.getConstant(C3, DL, MVT::i32)); 10710 } 10711 } 10712 10713 // Second pattern: left shift, and (c1>>c2)+1 is a power of two. 10714 // FIXME: Also check reversed pattern (right shift, and ~(c1<<c2)+1 10715 // is a power of two). 10716 // FIXME: Use demanded bits? 10717 if (LeftShift && isShiftedMask_32(C1)) { 10718 uint32_t C3 = countLeadingZeros(C1); 10719 if (C2 + C3 < 32 && C1 == ((-1U << (C2 + C3)) >> C3)) { 10720 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0), 10721 DAG.getConstant(C2 + C3, DL, MVT::i32)); 10722 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL, 10723 DAG.getConstant(C3, DL, MVT::i32)); 10724 } 10725 } 10726 10727 // FIXME: Transform "(and (shl x, c2) c1)" -> 10728 // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than 10729 // c1. 10730 return SDValue(); 10731 } 10732 10733 static SDValue PerformANDCombine(SDNode *N, 10734 TargetLowering::DAGCombinerInfo &DCI, 10735 const ARMSubtarget *Subtarget) { 10736 // Attempt to use immediate-form VBIC 10737 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 10738 SDLoc dl(N); 10739 EVT VT = N->getValueType(0); 10740 SelectionDAG &DAG = DCI.DAG; 10741 10742 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10743 return SDValue(); 10744 10745 APInt SplatBits, SplatUndef; 10746 unsigned SplatBitSize; 10747 bool HasAnyUndefs; 10748 if (BVN && 10749 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 10750 if (SplatBitSize <= 64) { 10751 EVT VbicVT; 10752 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 10753 SplatUndef.getZExtValue(), SplatBitSize, 10754 DAG, dl, VbicVT, VT.is128BitVector(), 10755 OtherModImm); 10756 if (Val.getNode()) { 10757 SDValue Input = 10758 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 10759 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 10760 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 10761 } 10762 } 10763 } 10764 10765 if (!Subtarget->isThumb1Only()) { 10766 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 10767 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 10768 return Result; 10769 10770 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 10771 return Result; 10772 } 10773 10774 if (Subtarget->isThumb1Only()) 10775 if (SDValue Result = CombineANDShift(N, DCI, Subtarget)) 10776 return Result; 10777 10778 return SDValue(); 10779 } 10780 10781 // Try combining OR nodes to SMULWB, SMULWT. 10782 static SDValue PerformORCombineToSMULWBT(SDNode *OR, 10783 TargetLowering::DAGCombinerInfo &DCI, 10784 const ARMSubtarget *Subtarget) { 10785 if (!Subtarget->hasV6Ops() || 10786 (Subtarget->isThumb() && 10787 (!Subtarget->hasThumb2() || !Subtarget->hasDSP()))) 10788 return SDValue(); 10789 10790 SDValue SRL = OR->getOperand(0); 10791 SDValue SHL = OR->getOperand(1); 10792 10793 if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) { 10794 SRL = OR->getOperand(1); 10795 SHL = OR->getOperand(0); 10796 } 10797 if (!isSRL16(SRL) || !isSHL16(SHL)) 10798 return SDValue(); 10799 10800 // The first operands to the shifts need to be the two results from the 10801 // same smul_lohi node. 10802 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) || 10803 SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI) 10804 return SDValue(); 10805 10806 SDNode *SMULLOHI = SRL.getOperand(0).getNode(); 10807 if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) || 10808 SHL.getOperand(0) != SDValue(SMULLOHI, 1)) 10809 return SDValue(); 10810 10811 // Now we have: 10812 // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16))) 10813 // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments. 10814 // For SMUWB the 16-bit value will signed extended somehow. 10815 // For SMULWT only the SRA is required. 10816 // Check both sides of SMUL_LOHI 10817 SDValue OpS16 = SMULLOHI->getOperand(0); 10818 SDValue OpS32 = SMULLOHI->getOperand(1); 10819 10820 SelectionDAG &DAG = DCI.DAG; 10821 if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) { 10822 OpS16 = OpS32; 10823 OpS32 = SMULLOHI->getOperand(0); 10824 } 10825 10826 SDLoc dl(OR); 10827 unsigned Opcode = 0; 10828 if (isS16(OpS16, DAG)) 10829 Opcode = ARMISD::SMULWB; 10830 else if (isSRA16(OpS16)) { 10831 Opcode = ARMISD::SMULWT; 10832 OpS16 = OpS16->getOperand(0); 10833 } 10834 else 10835 return SDValue(); 10836 10837 SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16); 10838 DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res); 10839 return SDValue(OR, 0); 10840 } 10841 10842 static SDValue PerformORCombineToBFI(SDNode *N, 10843 TargetLowering::DAGCombinerInfo &DCI, 10844 const ARMSubtarget *Subtarget) { 10845 // BFI is only available on V6T2+ 10846 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 10847 return SDValue(); 10848 10849 EVT VT = N->getValueType(0); 10850 SDValue N0 = N->getOperand(0); 10851 SDValue N1 = N->getOperand(1); 10852 SelectionDAG &DAG = DCI.DAG; 10853 SDLoc DL(N); 10854 // 1) or (and A, mask), val => ARMbfi A, val, mask 10855 // iff (val & mask) == val 10856 // 10857 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 10858 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 10859 // && mask == ~mask2 10860 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 10861 // && ~mask == mask2 10862 // (i.e., copy a bitfield value into another bitfield of the same width) 10863 10864 if (VT != MVT::i32) 10865 return SDValue(); 10866 10867 SDValue N00 = N0.getOperand(0); 10868 10869 // The value and the mask need to be constants so we can verify this is 10870 // actually a bitfield set. If the mask is 0xffff, we can do better 10871 // via a movt instruction, so don't use BFI in that case. 10872 SDValue MaskOp = N0.getOperand(1); 10873 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 10874 if (!MaskC) 10875 return SDValue(); 10876 unsigned Mask = MaskC->getZExtValue(); 10877 if (Mask == 0xffff) 10878 return SDValue(); 10879 SDValue Res; 10880 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 10881 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 10882 if (N1C) { 10883 unsigned Val = N1C->getZExtValue(); 10884 if ((Val & ~Mask) != Val) 10885 return SDValue(); 10886 10887 if (ARM::isBitFieldInvertedMask(Mask)) { 10888 Val >>= countTrailingZeros(~Mask); 10889 10890 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 10891 DAG.getConstant(Val, DL, MVT::i32), 10892 DAG.getConstant(Mask, DL, MVT::i32)); 10893 10894 DCI.CombineTo(N, Res, false); 10895 // Return value from the original node to inform the combiner than N is 10896 // now dead. 10897 return SDValue(N, 0); 10898 } 10899 } else if (N1.getOpcode() == ISD::AND) { 10900 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 10901 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 10902 if (!N11C) 10903 return SDValue(); 10904 unsigned Mask2 = N11C->getZExtValue(); 10905 10906 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 10907 // as is to match. 10908 if (ARM::isBitFieldInvertedMask(Mask) && 10909 (Mask == ~Mask2)) { 10910 // The pack halfword instruction works better for masks that fit it, 10911 // so use that when it's available. 10912 if (Subtarget->hasDSP() && 10913 (Mask == 0xffff || Mask == 0xffff0000)) 10914 return SDValue(); 10915 // 2a 10916 unsigned amt = countTrailingZeros(Mask2); 10917 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 10918 DAG.getConstant(amt, DL, MVT::i32)); 10919 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 10920 DAG.getConstant(Mask, DL, MVT::i32)); 10921 DCI.CombineTo(N, Res, false); 10922 // Return value from the original node to inform the combiner than N is 10923 // now dead. 10924 return SDValue(N, 0); 10925 } else if (ARM::isBitFieldInvertedMask(~Mask) && 10926 (~Mask == Mask2)) { 10927 // The pack halfword instruction works better for masks that fit it, 10928 // so use that when it's available. 10929 if (Subtarget->hasDSP() && 10930 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 10931 return SDValue(); 10932 // 2b 10933 unsigned lsb = countTrailingZeros(Mask); 10934 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 10935 DAG.getConstant(lsb, DL, MVT::i32)); 10936 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 10937 DAG.getConstant(Mask2, DL, MVT::i32)); 10938 DCI.CombineTo(N, Res, false); 10939 // Return value from the original node to inform the combiner than N is 10940 // now dead. 10941 return SDValue(N, 0); 10942 } 10943 } 10944 10945 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 10946 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 10947 ARM::isBitFieldInvertedMask(~Mask)) { 10948 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 10949 // where lsb(mask) == #shamt and masked bits of B are known zero. 10950 SDValue ShAmt = N00.getOperand(1); 10951 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 10952 unsigned LSB = countTrailingZeros(Mask); 10953 if (ShAmtC != LSB) 10954 return SDValue(); 10955 10956 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 10957 DAG.getConstant(~Mask, DL, MVT::i32)); 10958 10959 DCI.CombineTo(N, Res, false); 10960 // Return value from the original node to inform the combiner than N is 10961 // now dead. 10962 return SDValue(N, 0); 10963 } 10964 10965 return SDValue(); 10966 } 10967 10968 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 10969 static SDValue PerformORCombine(SDNode *N, 10970 TargetLowering::DAGCombinerInfo &DCI, 10971 const ARMSubtarget *Subtarget) { 10972 // Attempt to use immediate-form VORR 10973 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 10974 SDLoc dl(N); 10975 EVT VT = N->getValueType(0); 10976 SelectionDAG &DAG = DCI.DAG; 10977 10978 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10979 return SDValue(); 10980 10981 APInt SplatBits, SplatUndef; 10982 unsigned SplatBitSize; 10983 bool HasAnyUndefs; 10984 if (BVN && Subtarget->hasNEON() && 10985 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 10986 if (SplatBitSize <= 64) { 10987 EVT VorrVT; 10988 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 10989 SplatUndef.getZExtValue(), SplatBitSize, 10990 DAG, dl, VorrVT, VT.is128BitVector(), 10991 OtherModImm); 10992 if (Val.getNode()) { 10993 SDValue Input = 10994 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 10995 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 10996 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 10997 } 10998 } 10999 } 11000 11001 if (!Subtarget->isThumb1Only()) { 11002 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 11003 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 11004 return Result; 11005 if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget)) 11006 return Result; 11007 } 11008 11009 SDValue N0 = N->getOperand(0); 11010 SDValue N1 = N->getOperand(1); 11011 11012 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 11013 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 11014 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 11015 11016 // The code below optimizes (or (and X, Y), Z). 11017 // The AND operand needs to have a single user to make these optimizations 11018 // profitable. 11019 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 11020 return SDValue(); 11021 11022 APInt SplatUndef; 11023 unsigned SplatBitSize; 11024 bool HasAnyUndefs; 11025 11026 APInt SplatBits0, SplatBits1; 11027 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 11028 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 11029 // Ensure that the second operand of both ands are constants 11030 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 11031 HasAnyUndefs) && !HasAnyUndefs) { 11032 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 11033 HasAnyUndefs) && !HasAnyUndefs) { 11034 // Ensure that the bit width of the constants are the same and that 11035 // the splat arguments are logical inverses as per the pattern we 11036 // are trying to simplify. 11037 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 11038 SplatBits0 == ~SplatBits1) { 11039 // Canonicalize the vector type to make instruction selection 11040 // simpler. 11041 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 11042 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 11043 N0->getOperand(1), 11044 N0->getOperand(0), 11045 N1->getOperand(0)); 11046 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 11047 } 11048 } 11049 } 11050 } 11051 11052 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 11053 // reasonable. 11054 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 11055 if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget)) 11056 return Res; 11057 } 11058 11059 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 11060 return Result; 11061 11062 return SDValue(); 11063 } 11064 11065 static SDValue PerformXORCombine(SDNode *N, 11066 TargetLowering::DAGCombinerInfo &DCI, 11067 const ARMSubtarget *Subtarget) { 11068 EVT VT = N->getValueType(0); 11069 SelectionDAG &DAG = DCI.DAG; 11070 11071 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 11072 return SDValue(); 11073 11074 if (!Subtarget->isThumb1Only()) { 11075 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 11076 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 11077 return Result; 11078 11079 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget)) 11080 return Result; 11081 } 11082 11083 return SDValue(); 11084 } 11085 11086 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 11087 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 11088 // their position in "to" (Rd). 11089 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 11090 assert(N->getOpcode() == ARMISD::BFI); 11091 11092 SDValue From = N->getOperand(1); 11093 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 11094 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 11095 11096 // If the Base came from a SHR #C, we can deduce that it is really testing bit 11097 // #C in the base of the SHR. 11098 if (From->getOpcode() == ISD::SRL && 11099 isa<ConstantSDNode>(From->getOperand(1))) { 11100 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 11101 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 11102 FromMask <<= Shift.getLimitedValue(31); 11103 From = From->getOperand(0); 11104 } 11105 11106 return From; 11107 } 11108 11109 // If A and B contain one contiguous set of bits, does A | B == A . B? 11110 // 11111 // Neither A nor B must be zero. 11112 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 11113 unsigned LastActiveBitInA = A.countTrailingZeros(); 11114 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 11115 return LastActiveBitInA - 1 == FirstActiveBitInB; 11116 } 11117 11118 static SDValue FindBFIToCombineWith(SDNode *N) { 11119 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 11120 // if one exists. 11121 APInt ToMask, FromMask; 11122 SDValue From = ParseBFI(N, ToMask, FromMask); 11123 SDValue To = N->getOperand(0); 11124 11125 // Now check for a compatible BFI to merge with. We can pass through BFIs that 11126 // aren't compatible, but not if they set the same bit in their destination as 11127 // we do (or that of any BFI we're going to combine with). 11128 SDValue V = To; 11129 APInt CombinedToMask = ToMask; 11130 while (V.getOpcode() == ARMISD::BFI) { 11131 APInt NewToMask, NewFromMask; 11132 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 11133 if (NewFrom != From) { 11134 // This BFI has a different base. Keep going. 11135 CombinedToMask |= NewToMask; 11136 V = V.getOperand(0); 11137 continue; 11138 } 11139 11140 // Do the written bits conflict with any we've seen so far? 11141 if ((NewToMask & CombinedToMask).getBoolValue()) 11142 // Conflicting bits - bail out because going further is unsafe. 11143 return SDValue(); 11144 11145 // Are the new bits contiguous when combined with the old bits? 11146 if (BitsProperlyConcatenate(ToMask, NewToMask) && 11147 BitsProperlyConcatenate(FromMask, NewFromMask)) 11148 return V; 11149 if (BitsProperlyConcatenate(NewToMask, ToMask) && 11150 BitsProperlyConcatenate(NewFromMask, FromMask)) 11151 return V; 11152 11153 // We've seen a write to some bits, so track it. 11154 CombinedToMask |= NewToMask; 11155 // Keep going... 11156 V = V.getOperand(0); 11157 } 11158 11159 return SDValue(); 11160 } 11161 11162 static SDValue PerformBFICombine(SDNode *N, 11163 TargetLowering::DAGCombinerInfo &DCI) { 11164 SDValue N1 = N->getOperand(1); 11165 if (N1.getOpcode() == ISD::AND) { 11166 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 11167 // the bits being cleared by the AND are not demanded by the BFI. 11168 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 11169 if (!N11C) 11170 return SDValue(); 11171 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 11172 unsigned LSB = countTrailingZeros(~InvMask); 11173 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 11174 assert(Width < 11175 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 11176 "undefined behavior"); 11177 unsigned Mask = (1u << Width) - 1; 11178 unsigned Mask2 = N11C->getZExtValue(); 11179 if ((Mask & (~Mask2)) == 0) 11180 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 11181 N->getOperand(0), N1.getOperand(0), 11182 N->getOperand(2)); 11183 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 11184 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 11185 // Keep track of any consecutive bits set that all come from the same base 11186 // value. We can combine these together into a single BFI. 11187 SDValue CombineBFI = FindBFIToCombineWith(N); 11188 if (CombineBFI == SDValue()) 11189 return SDValue(); 11190 11191 // We've found a BFI. 11192 APInt ToMask1, FromMask1; 11193 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 11194 11195 APInt ToMask2, FromMask2; 11196 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 11197 assert(From1 == From2); 11198 (void)From2; 11199 11200 // First, unlink CombineBFI. 11201 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 11202 // Then create a new BFI, combining the two together. 11203 APInt NewFromMask = FromMask1 | FromMask2; 11204 APInt NewToMask = ToMask1 | ToMask2; 11205 11206 EVT VT = N->getValueType(0); 11207 SDLoc dl(N); 11208 11209 if (NewFromMask[0] == 0) 11210 From1 = DCI.DAG.getNode( 11211 ISD::SRL, dl, VT, From1, 11212 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 11213 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 11214 DCI.DAG.getConstant(~NewToMask, dl, VT)); 11215 } 11216 return SDValue(); 11217 } 11218 11219 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 11220 /// ARMISD::VMOVRRD. 11221 static SDValue PerformVMOVRRDCombine(SDNode *N, 11222 TargetLowering::DAGCombinerInfo &DCI, 11223 const ARMSubtarget *Subtarget) { 11224 // vmovrrd(vmovdrr x, y) -> x,y 11225 SDValue InDouble = N->getOperand(0); 11226 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) 11227 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 11228 11229 // vmovrrd(load f64) -> (load i32), (load i32) 11230 SDNode *InNode = InDouble.getNode(); 11231 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 11232 InNode->getValueType(0) == MVT::f64 && 11233 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 11234 !cast<LoadSDNode>(InNode)->isVolatile()) { 11235 // TODO: Should this be done for non-FrameIndex operands? 11236 LoadSDNode *LD = cast<LoadSDNode>(InNode); 11237 11238 SelectionDAG &DAG = DCI.DAG; 11239 SDLoc DL(LD); 11240 SDValue BasePtr = LD->getBasePtr(); 11241 SDValue NewLD1 = 11242 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), 11243 LD->getAlignment(), LD->getMemOperand()->getFlags()); 11244 11245 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 11246 DAG.getConstant(4, DL, MVT::i32)); 11247 SDValue NewLD2 = DAG.getLoad( 11248 MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(), 11249 std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags()); 11250 11251 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 11252 if (DCI.DAG.getDataLayout().isBigEndian()) 11253 std::swap (NewLD1, NewLD2); 11254 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 11255 return Result; 11256 } 11257 11258 return SDValue(); 11259 } 11260 11261 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 11262 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 11263 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 11264 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 11265 SDValue Op0 = N->getOperand(0); 11266 SDValue Op1 = N->getOperand(1); 11267 if (Op0.getOpcode() == ISD::BITCAST) 11268 Op0 = Op0.getOperand(0); 11269 if (Op1.getOpcode() == ISD::BITCAST) 11270 Op1 = Op1.getOperand(0); 11271 if (Op0.getOpcode() == ARMISD::VMOVRRD && 11272 Op0.getNode() == Op1.getNode() && 11273 Op0.getResNo() == 0 && Op1.getResNo() == 1) 11274 return DAG.getNode(ISD::BITCAST, SDLoc(N), 11275 N->getValueType(0), Op0.getOperand(0)); 11276 return SDValue(); 11277 } 11278 11279 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 11280 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 11281 /// i64 vector to have f64 elements, since the value can then be loaded 11282 /// directly into a VFP register. 11283 static bool hasNormalLoadOperand(SDNode *N) { 11284 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 11285 for (unsigned i = 0; i < NumElts; ++i) { 11286 SDNode *Elt = N->getOperand(i).getNode(); 11287 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 11288 return true; 11289 } 11290 return false; 11291 } 11292 11293 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 11294 /// ISD::BUILD_VECTOR. 11295 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 11296 TargetLowering::DAGCombinerInfo &DCI, 11297 const ARMSubtarget *Subtarget) { 11298 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 11299 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 11300 // into a pair of GPRs, which is fine when the value is used as a scalar, 11301 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 11302 SelectionDAG &DAG = DCI.DAG; 11303 if (N->getNumOperands() == 2) 11304 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 11305 return RV; 11306 11307 // Load i64 elements as f64 values so that type legalization does not split 11308 // them up into i32 values. 11309 EVT VT = N->getValueType(0); 11310 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 11311 return SDValue(); 11312 SDLoc dl(N); 11313 SmallVector<SDValue, 8> Ops; 11314 unsigned NumElts = VT.getVectorNumElements(); 11315 for (unsigned i = 0; i < NumElts; ++i) { 11316 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 11317 Ops.push_back(V); 11318 // Make the DAGCombiner fold the bitcast. 11319 DCI.AddToWorklist(V.getNode()); 11320 } 11321 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 11322 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 11323 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 11324 } 11325 11326 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 11327 static SDValue 11328 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 11329 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 11330 // At that time, we may have inserted bitcasts from integer to float. 11331 // If these bitcasts have survived DAGCombine, change the lowering of this 11332 // BUILD_VECTOR in something more vector friendly, i.e., that does not 11333 // force to use floating point types. 11334 11335 // Make sure we can change the type of the vector. 11336 // This is possible iff: 11337 // 1. The vector is only used in a bitcast to a integer type. I.e., 11338 // 1.1. Vector is used only once. 11339 // 1.2. Use is a bit convert to an integer type. 11340 // 2. The size of its operands are 32-bits (64-bits are not legal). 11341 EVT VT = N->getValueType(0); 11342 EVT EltVT = VT.getVectorElementType(); 11343 11344 // Check 1.1. and 2. 11345 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 11346 return SDValue(); 11347 11348 // By construction, the input type must be float. 11349 assert(EltVT == MVT::f32 && "Unexpected type!"); 11350 11351 // Check 1.2. 11352 SDNode *Use = *N->use_begin(); 11353 if (Use->getOpcode() != ISD::BITCAST || 11354 Use->getValueType(0).isFloatingPoint()) 11355 return SDValue(); 11356 11357 // Check profitability. 11358 // Model is, if more than half of the relevant operands are bitcast from 11359 // i32, turn the build_vector into a sequence of insert_vector_elt. 11360 // Relevant operands are everything that is not statically 11361 // (i.e., at compile time) bitcasted. 11362 unsigned NumOfBitCastedElts = 0; 11363 unsigned NumElts = VT.getVectorNumElements(); 11364 unsigned NumOfRelevantElts = NumElts; 11365 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 11366 SDValue Elt = N->getOperand(Idx); 11367 if (Elt->getOpcode() == ISD::BITCAST) { 11368 // Assume only bit cast to i32 will go away. 11369 if (Elt->getOperand(0).getValueType() == MVT::i32) 11370 ++NumOfBitCastedElts; 11371 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 11372 // Constants are statically casted, thus do not count them as 11373 // relevant operands. 11374 --NumOfRelevantElts; 11375 } 11376 11377 // Check if more than half of the elements require a non-free bitcast. 11378 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 11379 return SDValue(); 11380 11381 SelectionDAG &DAG = DCI.DAG; 11382 // Create the new vector type. 11383 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 11384 // Check if the type is legal. 11385 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11386 if (!TLI.isTypeLegal(VecVT)) 11387 return SDValue(); 11388 11389 // Combine: 11390 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 11391 // => BITCAST INSERT_VECTOR_ELT 11392 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 11393 // (BITCAST EN), N. 11394 SDValue Vec = DAG.getUNDEF(VecVT); 11395 SDLoc dl(N); 11396 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 11397 SDValue V = N->getOperand(Idx); 11398 if (V.isUndef()) 11399 continue; 11400 if (V.getOpcode() == ISD::BITCAST && 11401 V->getOperand(0).getValueType() == MVT::i32) 11402 // Fold obvious case. 11403 V = V.getOperand(0); 11404 else { 11405 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 11406 // Make the DAGCombiner fold the bitcasts. 11407 DCI.AddToWorklist(V.getNode()); 11408 } 11409 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 11410 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 11411 } 11412 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 11413 // Make the DAGCombiner fold the bitcasts. 11414 DCI.AddToWorklist(Vec.getNode()); 11415 return Vec; 11416 } 11417 11418 /// PerformInsertEltCombine - Target-specific dag combine xforms for 11419 /// ISD::INSERT_VECTOR_ELT. 11420 static SDValue PerformInsertEltCombine(SDNode *N, 11421 TargetLowering::DAGCombinerInfo &DCI) { 11422 // Bitcast an i64 load inserted into a vector to f64. 11423 // Otherwise, the i64 value will be legalized to a pair of i32 values. 11424 EVT VT = N->getValueType(0); 11425 SDNode *Elt = N->getOperand(1).getNode(); 11426 if (VT.getVectorElementType() != MVT::i64 || 11427 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 11428 return SDValue(); 11429 11430 SelectionDAG &DAG = DCI.DAG; 11431 SDLoc dl(N); 11432 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 11433 VT.getVectorNumElements()); 11434 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 11435 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 11436 // Make the DAGCombiner fold the bitcasts. 11437 DCI.AddToWorklist(Vec.getNode()); 11438 DCI.AddToWorklist(V.getNode()); 11439 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 11440 Vec, V, N->getOperand(2)); 11441 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 11442 } 11443 11444 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 11445 /// ISD::VECTOR_SHUFFLE. 11446 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 11447 // The LLVM shufflevector instruction does not require the shuffle mask 11448 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 11449 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 11450 // operands do not match the mask length, they are extended by concatenating 11451 // them with undef vectors. That is probably the right thing for other 11452 // targets, but for NEON it is better to concatenate two double-register 11453 // size vector operands into a single quad-register size vector. Do that 11454 // transformation here: 11455 // shuffle(concat(v1, undef), concat(v2, undef)) -> 11456 // shuffle(concat(v1, v2), undef) 11457 SDValue Op0 = N->getOperand(0); 11458 SDValue Op1 = N->getOperand(1); 11459 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 11460 Op1.getOpcode() != ISD::CONCAT_VECTORS || 11461 Op0.getNumOperands() != 2 || 11462 Op1.getNumOperands() != 2) 11463 return SDValue(); 11464 SDValue Concat0Op1 = Op0.getOperand(1); 11465 SDValue Concat1Op1 = Op1.getOperand(1); 11466 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 11467 return SDValue(); 11468 // Skip the transformation if any of the types are illegal. 11469 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11470 EVT VT = N->getValueType(0); 11471 if (!TLI.isTypeLegal(VT) || 11472 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 11473 !TLI.isTypeLegal(Concat1Op1.getValueType())) 11474 return SDValue(); 11475 11476 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 11477 Op0.getOperand(0), Op1.getOperand(0)); 11478 // Translate the shuffle mask. 11479 SmallVector<int, 16> NewMask; 11480 unsigned NumElts = VT.getVectorNumElements(); 11481 unsigned HalfElts = NumElts/2; 11482 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 11483 for (unsigned n = 0; n < NumElts; ++n) { 11484 int MaskElt = SVN->getMaskElt(n); 11485 int NewElt = -1; 11486 if (MaskElt < (int)HalfElts) 11487 NewElt = MaskElt; 11488 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 11489 NewElt = HalfElts + MaskElt - NumElts; 11490 NewMask.push_back(NewElt); 11491 } 11492 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 11493 DAG.getUNDEF(VT), NewMask); 11494 } 11495 11496 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 11497 /// NEON load/store intrinsics, and generic vector load/stores, to merge 11498 /// base address updates. 11499 /// For generic load/stores, the memory type is assumed to be a vector. 11500 /// The caller is assumed to have checked legality. 11501 static SDValue CombineBaseUpdate(SDNode *N, 11502 TargetLowering::DAGCombinerInfo &DCI) { 11503 SelectionDAG &DAG = DCI.DAG; 11504 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 11505 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 11506 const bool isStore = N->getOpcode() == ISD::STORE; 11507 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 11508 SDValue Addr = N->getOperand(AddrOpIdx); 11509 MemSDNode *MemN = cast<MemSDNode>(N); 11510 SDLoc dl(N); 11511 11512 // Search for a use of the address operand that is an increment. 11513 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 11514 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 11515 SDNode *User = *UI; 11516 if (User->getOpcode() != ISD::ADD || 11517 UI.getUse().getResNo() != Addr.getResNo()) 11518 continue; 11519 11520 // Check that the add is independent of the load/store. Otherwise, folding 11521 // it would create a cycle. We can avoid searching through Addr as it's a 11522 // predecessor to both. 11523 SmallPtrSet<const SDNode *, 32> Visited; 11524 SmallVector<const SDNode *, 16> Worklist; 11525 Visited.insert(Addr.getNode()); 11526 Worklist.push_back(N); 11527 Worklist.push_back(User); 11528 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) || 11529 SDNode::hasPredecessorHelper(User, Visited, Worklist)) 11530 continue; 11531 11532 // Find the new opcode for the updating load/store. 11533 bool isLoadOp = true; 11534 bool isLaneOp = false; 11535 unsigned NewOpc = 0; 11536 unsigned NumVecs = 0; 11537 if (isIntrinsic) { 11538 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 11539 switch (IntNo) { 11540 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 11541 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 11542 NumVecs = 1; break; 11543 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 11544 NumVecs = 2; break; 11545 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 11546 NumVecs = 3; break; 11547 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 11548 NumVecs = 4; break; 11549 case Intrinsic::arm_neon_vld2dup: 11550 case Intrinsic::arm_neon_vld3dup: 11551 case Intrinsic::arm_neon_vld4dup: 11552 // TODO: Support updating VLDxDUP nodes. For now, we just skip 11553 // combining base updates for such intrinsics. 11554 continue; 11555 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 11556 NumVecs = 2; isLaneOp = true; break; 11557 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 11558 NumVecs = 3; isLaneOp = true; break; 11559 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 11560 NumVecs = 4; isLaneOp = true; break; 11561 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 11562 NumVecs = 1; isLoadOp = false; break; 11563 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 11564 NumVecs = 2; isLoadOp = false; break; 11565 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 11566 NumVecs = 3; isLoadOp = false; break; 11567 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 11568 NumVecs = 4; isLoadOp = false; break; 11569 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 11570 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 11571 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 11572 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 11573 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 11574 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 11575 } 11576 } else { 11577 isLaneOp = true; 11578 switch (N->getOpcode()) { 11579 default: llvm_unreachable("unexpected opcode for Neon base update"); 11580 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break; 11581 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 11582 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 11583 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 11584 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 11585 NumVecs = 1; isLaneOp = false; break; 11586 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 11587 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 11588 } 11589 } 11590 11591 // Find the size of memory referenced by the load/store. 11592 EVT VecTy; 11593 if (isLoadOp) { 11594 VecTy = N->getValueType(0); 11595 } else if (isIntrinsic) { 11596 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 11597 } else { 11598 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 11599 VecTy = N->getOperand(1).getValueType(); 11600 } 11601 11602 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 11603 if (isLaneOp) 11604 NumBytes /= VecTy.getVectorNumElements(); 11605 11606 // If the increment is a constant, it must match the memory ref size. 11607 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 11608 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode()); 11609 if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) { 11610 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 11611 // separate instructions that make it harder to use a non-constant update. 11612 continue; 11613 } 11614 11615 // OK, we found an ADD we can fold into the base update. 11616 // Now, create a _UPD node, taking care of not breaking alignment. 11617 11618 EVT AlignedVecTy = VecTy; 11619 unsigned Alignment = MemN->getAlignment(); 11620 11621 // If this is a less-than-standard-aligned load/store, change the type to 11622 // match the standard alignment. 11623 // The alignment is overlooked when selecting _UPD variants; and it's 11624 // easier to introduce bitcasts here than fix that. 11625 // There are 3 ways to get to this base-update combine: 11626 // - intrinsics: they are assumed to be properly aligned (to the standard 11627 // alignment of the memory type), so we don't need to do anything. 11628 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 11629 // intrinsics, so, likewise, there's nothing to do. 11630 // - generic load/store instructions: the alignment is specified as an 11631 // explicit operand, rather than implicitly as the standard alignment 11632 // of the memory type (like the intrisics). We need to change the 11633 // memory type to match the explicit alignment. That way, we don't 11634 // generate non-standard-aligned ARMISD::VLDx nodes. 11635 if (isa<LSBaseSDNode>(N)) { 11636 if (Alignment == 0) 11637 Alignment = 1; 11638 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 11639 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 11640 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 11641 assert(!isLaneOp && "Unexpected generic load/store lane."); 11642 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 11643 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 11644 } 11645 // Don't set an explicit alignment on regular load/stores that we want 11646 // to transform to VLD/VST 1_UPD nodes. 11647 // This matches the behavior of regular load/stores, which only get an 11648 // explicit alignment if the MMO alignment is larger than the standard 11649 // alignment of the memory type. 11650 // Intrinsics, however, always get an explicit alignment, set to the 11651 // alignment of the MMO. 11652 Alignment = 1; 11653 } 11654 11655 // Create the new updating load/store node. 11656 // First, create an SDVTList for the new updating node's results. 11657 EVT Tys[6]; 11658 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 11659 unsigned n; 11660 for (n = 0; n < NumResultVecs; ++n) 11661 Tys[n] = AlignedVecTy; 11662 Tys[n++] = MVT::i32; 11663 Tys[n] = MVT::Other; 11664 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 11665 11666 // Then, gather the new node's operands. 11667 SmallVector<SDValue, 8> Ops; 11668 Ops.push_back(N->getOperand(0)); // incoming chain 11669 Ops.push_back(N->getOperand(AddrOpIdx)); 11670 Ops.push_back(Inc); 11671 11672 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 11673 // Try to match the intrinsic's signature 11674 Ops.push_back(StN->getValue()); 11675 } else { 11676 // Loads (and of course intrinsics) match the intrinsics' signature, 11677 // so just add all but the alignment operand. 11678 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) 11679 Ops.push_back(N->getOperand(i)); 11680 } 11681 11682 // For all node types, the alignment operand is always the last one. 11683 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 11684 11685 // If this is a non-standard-aligned STORE, the penultimate operand is the 11686 // stored value. Bitcast it to the aligned type. 11687 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 11688 SDValue &StVal = Ops[Ops.size()-2]; 11689 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 11690 } 11691 11692 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy; 11693 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT, 11694 MemN->getMemOperand()); 11695 11696 // Update the uses. 11697 SmallVector<SDValue, 5> NewResults; 11698 for (unsigned i = 0; i < NumResultVecs; ++i) 11699 NewResults.push_back(SDValue(UpdN.getNode(), i)); 11700 11701 // If this is an non-standard-aligned LOAD, the first result is the loaded 11702 // value. Bitcast it to the expected result type. 11703 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 11704 SDValue &LdVal = NewResults[0]; 11705 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 11706 } 11707 11708 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 11709 DCI.CombineTo(N, NewResults); 11710 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 11711 11712 break; 11713 } 11714 return SDValue(); 11715 } 11716 11717 static SDValue PerformVLDCombine(SDNode *N, 11718 TargetLowering::DAGCombinerInfo &DCI) { 11719 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 11720 return SDValue(); 11721 11722 return CombineBaseUpdate(N, DCI); 11723 } 11724 11725 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 11726 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 11727 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 11728 /// return true. 11729 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 11730 SelectionDAG &DAG = DCI.DAG; 11731 EVT VT = N->getValueType(0); 11732 // vldN-dup instructions only support 64-bit vectors for N > 1. 11733 if (!VT.is64BitVector()) 11734 return false; 11735 11736 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 11737 SDNode *VLD = N->getOperand(0).getNode(); 11738 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 11739 return false; 11740 unsigned NumVecs = 0; 11741 unsigned NewOpc = 0; 11742 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 11743 if (IntNo == Intrinsic::arm_neon_vld2lane) { 11744 NumVecs = 2; 11745 NewOpc = ARMISD::VLD2DUP; 11746 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 11747 NumVecs = 3; 11748 NewOpc = ARMISD::VLD3DUP; 11749 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 11750 NumVecs = 4; 11751 NewOpc = ARMISD::VLD4DUP; 11752 } else { 11753 return false; 11754 } 11755 11756 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 11757 // numbers match the load. 11758 unsigned VLDLaneNo = 11759 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 11760 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 11761 UI != UE; ++UI) { 11762 // Ignore uses of the chain result. 11763 if (UI.getUse().getResNo() == NumVecs) 11764 continue; 11765 SDNode *User = *UI; 11766 if (User->getOpcode() != ARMISD::VDUPLANE || 11767 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 11768 return false; 11769 } 11770 11771 // Create the vldN-dup node. 11772 EVT Tys[5]; 11773 unsigned n; 11774 for (n = 0; n < NumVecs; ++n) 11775 Tys[n] = VT; 11776 Tys[n] = MVT::Other; 11777 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 11778 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 11779 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 11780 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 11781 Ops, VLDMemInt->getMemoryVT(), 11782 VLDMemInt->getMemOperand()); 11783 11784 // Update the uses. 11785 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 11786 UI != UE; ++UI) { 11787 unsigned ResNo = UI.getUse().getResNo(); 11788 // Ignore uses of the chain result. 11789 if (ResNo == NumVecs) 11790 continue; 11791 SDNode *User = *UI; 11792 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 11793 } 11794 11795 // Now the vldN-lane intrinsic is dead except for its chain result. 11796 // Update uses of the chain. 11797 std::vector<SDValue> VLDDupResults; 11798 for (unsigned n = 0; n < NumVecs; ++n) 11799 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 11800 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 11801 DCI.CombineTo(VLD, VLDDupResults); 11802 11803 return true; 11804 } 11805 11806 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 11807 /// ARMISD::VDUPLANE. 11808 static SDValue PerformVDUPLANECombine(SDNode *N, 11809 TargetLowering::DAGCombinerInfo &DCI) { 11810 SDValue Op = N->getOperand(0); 11811 11812 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 11813 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 11814 if (CombineVLDDUP(N, DCI)) 11815 return SDValue(N, 0); 11816 11817 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 11818 // redundant. Ignore bit_converts for now; element sizes are checked below. 11819 while (Op.getOpcode() == ISD::BITCAST) 11820 Op = Op.getOperand(0); 11821 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 11822 return SDValue(); 11823 11824 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 11825 unsigned EltSize = Op.getScalarValueSizeInBits(); 11826 // The canonical VMOV for a zero vector uses a 32-bit element size. 11827 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 11828 unsigned EltBits; 11829 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 11830 EltSize = 8; 11831 EVT VT = N->getValueType(0); 11832 if (EltSize > VT.getScalarSizeInBits()) 11833 return SDValue(); 11834 11835 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 11836 } 11837 11838 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP. 11839 static SDValue PerformVDUPCombine(SDNode *N, 11840 TargetLowering::DAGCombinerInfo &DCI) { 11841 SelectionDAG &DAG = DCI.DAG; 11842 SDValue Op = N->getOperand(0); 11843 11844 // Match VDUP(LOAD) -> VLD1DUP. 11845 // We match this pattern here rather than waiting for isel because the 11846 // transform is only legal for unindexed loads. 11847 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()); 11848 if (LD && Op.hasOneUse() && LD->isUnindexed() && 11849 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) { 11850 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1), 11851 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) }; 11852 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other); 11853 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys, 11854 Ops, LD->getMemoryVT(), 11855 LD->getMemOperand()); 11856 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1)); 11857 return VLDDup; 11858 } 11859 11860 return SDValue(); 11861 } 11862 11863 static SDValue PerformLOADCombine(SDNode *N, 11864 TargetLowering::DAGCombinerInfo &DCI) { 11865 EVT VT = N->getValueType(0); 11866 11867 // If this is a legal vector load, try to combine it into a VLD1_UPD. 11868 if (ISD::isNormalLoad(N) && VT.isVector() && 11869 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 11870 return CombineBaseUpdate(N, DCI); 11871 11872 return SDValue(); 11873 } 11874 11875 /// PerformSTORECombine - Target-specific dag combine xforms for 11876 /// ISD::STORE. 11877 static SDValue PerformSTORECombine(SDNode *N, 11878 TargetLowering::DAGCombinerInfo &DCI) { 11879 StoreSDNode *St = cast<StoreSDNode>(N); 11880 if (St->isVolatile()) 11881 return SDValue(); 11882 11883 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 11884 // pack all of the elements in one place. Next, store to memory in fewer 11885 // chunks. 11886 SDValue StVal = St->getValue(); 11887 EVT VT = StVal.getValueType(); 11888 if (St->isTruncatingStore() && VT.isVector()) { 11889 SelectionDAG &DAG = DCI.DAG; 11890 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11891 EVT StVT = St->getMemoryVT(); 11892 unsigned NumElems = VT.getVectorNumElements(); 11893 assert(StVT != VT && "Cannot truncate to the same type"); 11894 unsigned FromEltSz = VT.getScalarSizeInBits(); 11895 unsigned ToEltSz = StVT.getScalarSizeInBits(); 11896 11897 // From, To sizes and ElemCount must be pow of two 11898 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 11899 11900 // We are going to use the original vector elt for storing. 11901 // Accumulated smaller vector elements must be a multiple of the store size. 11902 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 11903 11904 unsigned SizeRatio = FromEltSz / ToEltSz; 11905 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 11906 11907 // Create a type on which we perform the shuffle. 11908 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 11909 NumElems*SizeRatio); 11910 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 11911 11912 SDLoc DL(St); 11913 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 11914 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 11915 for (unsigned i = 0; i < NumElems; ++i) 11916 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() 11917 ? (i + 1) * SizeRatio - 1 11918 : i * SizeRatio; 11919 11920 // Can't shuffle using an illegal type. 11921 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 11922 11923 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 11924 DAG.getUNDEF(WideVec.getValueType()), 11925 ShuffleVec); 11926 // At this point all of the data is stored at the bottom of the 11927 // register. We now need to save it to mem. 11928 11929 // Find the largest store unit 11930 MVT StoreType = MVT::i8; 11931 for (MVT Tp : MVT::integer_valuetypes()) { 11932 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 11933 StoreType = Tp; 11934 } 11935 // Didn't find a legal store type. 11936 if (!TLI.isTypeLegal(StoreType)) 11937 return SDValue(); 11938 11939 // Bitcast the original vector into a vector of store-size units 11940 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 11941 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 11942 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 11943 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 11944 SmallVector<SDValue, 8> Chains; 11945 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 11946 TLI.getPointerTy(DAG.getDataLayout())); 11947 SDValue BasePtr = St->getBasePtr(); 11948 11949 // Perform one or more big stores into memory. 11950 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 11951 for (unsigned I = 0; I < E; I++) { 11952 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 11953 StoreType, ShuffWide, 11954 DAG.getIntPtrConstant(I, DL)); 11955 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 11956 St->getPointerInfo(), St->getAlignment(), 11957 St->getMemOperand()->getFlags()); 11958 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 11959 Increment); 11960 Chains.push_back(Ch); 11961 } 11962 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 11963 } 11964 11965 if (!ISD::isNormalStore(St)) 11966 return SDValue(); 11967 11968 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 11969 // ARM stores of arguments in the same cache line. 11970 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 11971 StVal.getNode()->hasOneUse()) { 11972 SelectionDAG &DAG = DCI.DAG; 11973 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 11974 SDLoc DL(St); 11975 SDValue BasePtr = St->getBasePtr(); 11976 SDValue NewST1 = DAG.getStore( 11977 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), 11978 BasePtr, St->getPointerInfo(), St->getAlignment(), 11979 St->getMemOperand()->getFlags()); 11980 11981 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 11982 DAG.getConstant(4, DL, MVT::i32)); 11983 return DAG.getStore(NewST1.getValue(0), DL, 11984 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 11985 OffsetPtr, St->getPointerInfo(), 11986 std::min(4U, St->getAlignment() / 2), 11987 St->getMemOperand()->getFlags()); 11988 } 11989 11990 if (StVal.getValueType() == MVT::i64 && 11991 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 11992 11993 // Bitcast an i64 store extracted from a vector to f64. 11994 // Otherwise, the i64 value will be legalized to a pair of i32 values. 11995 SelectionDAG &DAG = DCI.DAG; 11996 SDLoc dl(StVal); 11997 SDValue IntVec = StVal.getOperand(0); 11998 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 11999 IntVec.getValueType().getVectorNumElements()); 12000 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 12001 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 12002 Vec, StVal.getOperand(1)); 12003 dl = SDLoc(N); 12004 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 12005 // Make the DAGCombiner fold the bitcasts. 12006 DCI.AddToWorklist(Vec.getNode()); 12007 DCI.AddToWorklist(ExtElt.getNode()); 12008 DCI.AddToWorklist(V.getNode()); 12009 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 12010 St->getPointerInfo(), St->getAlignment(), 12011 St->getMemOperand()->getFlags(), St->getAAInfo()); 12012 } 12013 12014 // If this is a legal vector store, try to combine it into a VST1_UPD. 12015 if (ISD::isNormalStore(N) && VT.isVector() && 12016 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 12017 return CombineBaseUpdate(N, DCI); 12018 12019 return SDValue(); 12020 } 12021 12022 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 12023 /// can replace combinations of VMUL and VCVT (floating-point to integer) 12024 /// when the VMUL has a constant operand that is a power of 2. 12025 /// 12026 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 12027 /// vmul.f32 d16, d17, d16 12028 /// vcvt.s32.f32 d16, d16 12029 /// becomes: 12030 /// vcvt.s32.f32 d16, d16, #3 12031 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 12032 const ARMSubtarget *Subtarget) { 12033 if (!Subtarget->hasNEON()) 12034 return SDValue(); 12035 12036 SDValue Op = N->getOperand(0); 12037 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 12038 Op.getOpcode() != ISD::FMUL) 12039 return SDValue(); 12040 12041 SDValue ConstVec = Op->getOperand(1); 12042 if (!isa<BuildVectorSDNode>(ConstVec)) 12043 return SDValue(); 12044 12045 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 12046 uint32_t FloatBits = FloatTy.getSizeInBits(); 12047 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 12048 uint32_t IntBits = IntTy.getSizeInBits(); 12049 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 12050 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 12051 // These instructions only exist converting from f32 to i32. We can handle 12052 // smaller integers by generating an extra truncate, but larger ones would 12053 // be lossy. We also can't handle more then 4 lanes, since these intructions 12054 // only support v2i32/v4i32 types. 12055 return SDValue(); 12056 } 12057 12058 BitVector UndefElements; 12059 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 12060 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 12061 if (C == -1 || C == 0 || C > 32) 12062 return SDValue(); 12063 12064 SDLoc dl(N); 12065 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 12066 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 12067 Intrinsic::arm_neon_vcvtfp2fxu; 12068 SDValue FixConv = DAG.getNode( 12069 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 12070 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 12071 DAG.getConstant(C, dl, MVT::i32)); 12072 12073 if (IntBits < FloatBits) 12074 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 12075 12076 return FixConv; 12077 } 12078 12079 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 12080 /// can replace combinations of VCVT (integer to floating-point) and VDIV 12081 /// when the VDIV has a constant operand that is a power of 2. 12082 /// 12083 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 12084 /// vcvt.f32.s32 d16, d16 12085 /// vdiv.f32 d16, d17, d16 12086 /// becomes: 12087 /// vcvt.f32.s32 d16, d16, #3 12088 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 12089 const ARMSubtarget *Subtarget) { 12090 if (!Subtarget->hasNEON()) 12091 return SDValue(); 12092 12093 SDValue Op = N->getOperand(0); 12094 unsigned OpOpcode = Op.getNode()->getOpcode(); 12095 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 12096 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 12097 return SDValue(); 12098 12099 SDValue ConstVec = N->getOperand(1); 12100 if (!isa<BuildVectorSDNode>(ConstVec)) 12101 return SDValue(); 12102 12103 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 12104 uint32_t FloatBits = FloatTy.getSizeInBits(); 12105 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 12106 uint32_t IntBits = IntTy.getSizeInBits(); 12107 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 12108 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 12109 // These instructions only exist converting from i32 to f32. We can handle 12110 // smaller integers by generating an extra extend, but larger ones would 12111 // be lossy. We also can't handle more then 4 lanes, since these intructions 12112 // only support v2i32/v4i32 types. 12113 return SDValue(); 12114 } 12115 12116 BitVector UndefElements; 12117 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 12118 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 12119 if (C == -1 || C == 0 || C > 32) 12120 return SDValue(); 12121 12122 SDLoc dl(N); 12123 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 12124 SDValue ConvInput = Op.getOperand(0); 12125 if (IntBits < FloatBits) 12126 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 12127 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 12128 ConvInput); 12129 12130 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 12131 Intrinsic::arm_neon_vcvtfxu2fp; 12132 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 12133 Op.getValueType(), 12134 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 12135 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 12136 } 12137 12138 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 12139 /// operand of a vector shift operation, where all the elements of the 12140 /// build_vector must have the same constant integer value. 12141 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 12142 // Ignore bit_converts. 12143 while (Op.getOpcode() == ISD::BITCAST) 12144 Op = Op.getOperand(0); 12145 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 12146 APInt SplatBits, SplatUndef; 12147 unsigned SplatBitSize; 12148 bool HasAnyUndefs; 12149 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 12150 HasAnyUndefs, ElementBits) || 12151 SplatBitSize > ElementBits) 12152 return false; 12153 Cnt = SplatBits.getSExtValue(); 12154 return true; 12155 } 12156 12157 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 12158 /// operand of a vector shift left operation. That value must be in the range: 12159 /// 0 <= Value < ElementBits for a left shift; or 12160 /// 0 <= Value <= ElementBits for a long left shift. 12161 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 12162 assert(VT.isVector() && "vector shift count is not a vector type"); 12163 int64_t ElementBits = VT.getScalarSizeInBits(); 12164 if (! getVShiftImm(Op, ElementBits, Cnt)) 12165 return false; 12166 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 12167 } 12168 12169 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 12170 /// operand of a vector shift right operation. For a shift opcode, the value 12171 /// is positive, but for an intrinsic the value count must be negative. The 12172 /// absolute value must be in the range: 12173 /// 1 <= |Value| <= ElementBits for a right shift; or 12174 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 12175 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 12176 int64_t &Cnt) { 12177 assert(VT.isVector() && "vector shift count is not a vector type"); 12178 int64_t ElementBits = VT.getScalarSizeInBits(); 12179 if (! getVShiftImm(Op, ElementBits, Cnt)) 12180 return false; 12181 if (!isIntrinsic) 12182 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 12183 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { 12184 Cnt = -Cnt; 12185 return true; 12186 } 12187 return false; 12188 } 12189 12190 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 12191 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 12192 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 12193 switch (IntNo) { 12194 default: 12195 // Don't do anything for most intrinsics. 12196 break; 12197 12198 // Vector shifts: check for immediate versions and lower them. 12199 // Note: This is done during DAG combining instead of DAG legalizing because 12200 // the build_vectors for 64-bit vector element shift counts are generally 12201 // not legal, and it is hard to see their values after they get legalized to 12202 // loads from a constant pool. 12203 case Intrinsic::arm_neon_vshifts: 12204 case Intrinsic::arm_neon_vshiftu: 12205 case Intrinsic::arm_neon_vrshifts: 12206 case Intrinsic::arm_neon_vrshiftu: 12207 case Intrinsic::arm_neon_vrshiftn: 12208 case Intrinsic::arm_neon_vqshifts: 12209 case Intrinsic::arm_neon_vqshiftu: 12210 case Intrinsic::arm_neon_vqshiftsu: 12211 case Intrinsic::arm_neon_vqshiftns: 12212 case Intrinsic::arm_neon_vqshiftnu: 12213 case Intrinsic::arm_neon_vqshiftnsu: 12214 case Intrinsic::arm_neon_vqrshiftns: 12215 case Intrinsic::arm_neon_vqrshiftnu: 12216 case Intrinsic::arm_neon_vqrshiftnsu: { 12217 EVT VT = N->getOperand(1).getValueType(); 12218 int64_t Cnt; 12219 unsigned VShiftOpc = 0; 12220 12221 switch (IntNo) { 12222 case Intrinsic::arm_neon_vshifts: 12223 case Intrinsic::arm_neon_vshiftu: 12224 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 12225 VShiftOpc = ARMISD::VSHL; 12226 break; 12227 } 12228 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 12229 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 12230 ARMISD::VSHRs : ARMISD::VSHRu); 12231 break; 12232 } 12233 return SDValue(); 12234 12235 case Intrinsic::arm_neon_vrshifts: 12236 case Intrinsic::arm_neon_vrshiftu: 12237 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 12238 break; 12239 return SDValue(); 12240 12241 case Intrinsic::arm_neon_vqshifts: 12242 case Intrinsic::arm_neon_vqshiftu: 12243 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 12244 break; 12245 return SDValue(); 12246 12247 case Intrinsic::arm_neon_vqshiftsu: 12248 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 12249 break; 12250 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 12251 12252 case Intrinsic::arm_neon_vrshiftn: 12253 case Intrinsic::arm_neon_vqshiftns: 12254 case Intrinsic::arm_neon_vqshiftnu: 12255 case Intrinsic::arm_neon_vqshiftnsu: 12256 case Intrinsic::arm_neon_vqrshiftns: 12257 case Intrinsic::arm_neon_vqrshiftnu: 12258 case Intrinsic::arm_neon_vqrshiftnsu: 12259 // Narrowing shifts require an immediate right shift. 12260 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 12261 break; 12262 llvm_unreachable("invalid shift count for narrowing vector shift " 12263 "intrinsic"); 12264 12265 default: 12266 llvm_unreachable("unhandled vector shift"); 12267 } 12268 12269 switch (IntNo) { 12270 case Intrinsic::arm_neon_vshifts: 12271 case Intrinsic::arm_neon_vshiftu: 12272 // Opcode already set above. 12273 break; 12274 case Intrinsic::arm_neon_vrshifts: 12275 VShiftOpc = ARMISD::VRSHRs; break; 12276 case Intrinsic::arm_neon_vrshiftu: 12277 VShiftOpc = ARMISD::VRSHRu; break; 12278 case Intrinsic::arm_neon_vrshiftn: 12279 VShiftOpc = ARMISD::VRSHRN; break; 12280 case Intrinsic::arm_neon_vqshifts: 12281 VShiftOpc = ARMISD::VQSHLs; break; 12282 case Intrinsic::arm_neon_vqshiftu: 12283 VShiftOpc = ARMISD::VQSHLu; break; 12284 case Intrinsic::arm_neon_vqshiftsu: 12285 VShiftOpc = ARMISD::VQSHLsu; break; 12286 case Intrinsic::arm_neon_vqshiftns: 12287 VShiftOpc = ARMISD::VQSHRNs; break; 12288 case Intrinsic::arm_neon_vqshiftnu: 12289 VShiftOpc = ARMISD::VQSHRNu; break; 12290 case Intrinsic::arm_neon_vqshiftnsu: 12291 VShiftOpc = ARMISD::VQSHRNsu; break; 12292 case Intrinsic::arm_neon_vqrshiftns: 12293 VShiftOpc = ARMISD::VQRSHRNs; break; 12294 case Intrinsic::arm_neon_vqrshiftnu: 12295 VShiftOpc = ARMISD::VQRSHRNu; break; 12296 case Intrinsic::arm_neon_vqrshiftnsu: 12297 VShiftOpc = ARMISD::VQRSHRNsu; break; 12298 } 12299 12300 SDLoc dl(N); 12301 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 12302 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 12303 } 12304 12305 case Intrinsic::arm_neon_vshiftins: { 12306 EVT VT = N->getOperand(1).getValueType(); 12307 int64_t Cnt; 12308 unsigned VShiftOpc = 0; 12309 12310 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 12311 VShiftOpc = ARMISD::VSLI; 12312 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 12313 VShiftOpc = ARMISD::VSRI; 12314 else { 12315 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 12316 } 12317 12318 SDLoc dl(N); 12319 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 12320 N->getOperand(1), N->getOperand(2), 12321 DAG.getConstant(Cnt, dl, MVT::i32)); 12322 } 12323 12324 case Intrinsic::arm_neon_vqrshifts: 12325 case Intrinsic::arm_neon_vqrshiftu: 12326 // No immediate versions of these to check for. 12327 break; 12328 } 12329 12330 return SDValue(); 12331 } 12332 12333 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 12334 /// lowers them. As with the vector shift intrinsics, this is done during DAG 12335 /// combining instead of DAG legalizing because the build_vectors for 64-bit 12336 /// vector element shift counts are generally not legal, and it is hard to see 12337 /// their values after they get legalized to loads from a constant pool. 12338 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 12339 const ARMSubtarget *ST) { 12340 EVT VT = N->getValueType(0); 12341 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 12342 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 12343 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 12344 SDValue N1 = N->getOperand(1); 12345 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 12346 SDValue N0 = N->getOperand(0); 12347 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 12348 DAG.MaskedValueIsZero(N0.getOperand(0), 12349 APInt::getHighBitsSet(32, 16))) 12350 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 12351 } 12352 } 12353 12354 // Nothing to be done for scalar shifts. 12355 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12356 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 12357 return SDValue(); 12358 12359 assert(ST->hasNEON() && "unexpected vector shift"); 12360 int64_t Cnt; 12361 12362 switch (N->getOpcode()) { 12363 default: llvm_unreachable("unexpected shift opcode"); 12364 12365 case ISD::SHL: 12366 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 12367 SDLoc dl(N); 12368 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), 12369 DAG.getConstant(Cnt, dl, MVT::i32)); 12370 } 12371 break; 12372 12373 case ISD::SRA: 12374 case ISD::SRL: 12375 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 12376 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 12377 ARMISD::VSHRs : ARMISD::VSHRu); 12378 SDLoc dl(N); 12379 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 12380 DAG.getConstant(Cnt, dl, MVT::i32)); 12381 } 12382 } 12383 return SDValue(); 12384 } 12385 12386 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 12387 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 12388 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 12389 const ARMSubtarget *ST) { 12390 SDValue N0 = N->getOperand(0); 12391 12392 // Check for sign- and zero-extensions of vector extract operations of 8- 12393 // and 16-bit vector elements. NEON supports these directly. They are 12394 // handled during DAG combining because type legalization will promote them 12395 // to 32-bit types and it is messy to recognize the operations after that. 12396 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 12397 SDValue Vec = N0.getOperand(0); 12398 SDValue Lane = N0.getOperand(1); 12399 EVT VT = N->getValueType(0); 12400 EVT EltVT = N0.getValueType(); 12401 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12402 12403 if (VT == MVT::i32 && 12404 (EltVT == MVT::i8 || EltVT == MVT::i16) && 12405 TLI.isTypeLegal(Vec.getValueType()) && 12406 isa<ConstantSDNode>(Lane)) { 12407 12408 unsigned Opc = 0; 12409 switch (N->getOpcode()) { 12410 default: llvm_unreachable("unexpected opcode"); 12411 case ISD::SIGN_EXTEND: 12412 Opc = ARMISD::VGETLANEs; 12413 break; 12414 case ISD::ZERO_EXTEND: 12415 case ISD::ANY_EXTEND: 12416 Opc = ARMISD::VGETLANEu; 12417 break; 12418 } 12419 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 12420 } 12421 } 12422 12423 return SDValue(); 12424 } 12425 12426 static const APInt *isPowerOf2Constant(SDValue V) { 12427 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V); 12428 if (!C) 12429 return nullptr; 12430 const APInt *CV = &C->getAPIntValue(); 12431 return CV->isPowerOf2() ? CV : nullptr; 12432 } 12433 12434 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 12435 // If we have a CMOV, OR and AND combination such as: 12436 // if (x & CN) 12437 // y |= CM; 12438 // 12439 // And: 12440 // * CN is a single bit; 12441 // * All bits covered by CM are known zero in y 12442 // 12443 // Then we can convert this into a sequence of BFI instructions. This will 12444 // always be a win if CM is a single bit, will always be no worse than the 12445 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 12446 // three bits (due to the extra IT instruction). 12447 12448 SDValue Op0 = CMOV->getOperand(0); 12449 SDValue Op1 = CMOV->getOperand(1); 12450 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 12451 auto CC = CCNode->getAPIntValue().getLimitedValue(); 12452 SDValue CmpZ = CMOV->getOperand(4); 12453 12454 // The compare must be against zero. 12455 if (!isNullConstant(CmpZ->getOperand(1))) 12456 return SDValue(); 12457 12458 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 12459 SDValue And = CmpZ->getOperand(0); 12460 if (And->getOpcode() != ISD::AND) 12461 return SDValue(); 12462 const APInt *AndC = isPowerOf2Constant(And->getOperand(1)); 12463 if (!AndC) 12464 return SDValue(); 12465 SDValue X = And->getOperand(0); 12466 12467 if (CC == ARMCC::EQ) { 12468 // We're performing an "equal to zero" compare. Swap the operands so we 12469 // canonicalize on a "not equal to zero" compare. 12470 std::swap(Op0, Op1); 12471 } else { 12472 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 12473 } 12474 12475 if (Op1->getOpcode() != ISD::OR) 12476 return SDValue(); 12477 12478 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 12479 if (!OrC) 12480 return SDValue(); 12481 SDValue Y = Op1->getOperand(0); 12482 12483 if (Op0 != Y) 12484 return SDValue(); 12485 12486 // Now, is it profitable to continue? 12487 APInt OrCI = OrC->getAPIntValue(); 12488 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 12489 if (OrCI.countPopulation() > Heuristic) 12490 return SDValue(); 12491 12492 // Lastly, can we determine that the bits defined by OrCI 12493 // are zero in Y? 12494 KnownBits Known; 12495 DAG.computeKnownBits(Y, Known); 12496 if ((OrCI & Known.Zero) != OrCI) 12497 return SDValue(); 12498 12499 // OK, we can do the combine. 12500 SDValue V = Y; 12501 SDLoc dl(X); 12502 EVT VT = X.getValueType(); 12503 unsigned BitInX = AndC->logBase2(); 12504 12505 if (BitInX != 0) { 12506 // We must shift X first. 12507 X = DAG.getNode(ISD::SRL, dl, VT, X, 12508 DAG.getConstant(BitInX, dl, VT)); 12509 } 12510 12511 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 12512 BitInY < NumActiveBits; ++BitInY) { 12513 if (OrCI[BitInY] == 0) 12514 continue; 12515 APInt Mask(VT.getSizeInBits(), 0); 12516 Mask.setBit(BitInY); 12517 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 12518 // Confusingly, the operand is an *inverted* mask. 12519 DAG.getConstant(~Mask, dl, VT)); 12520 } 12521 12522 return V; 12523 } 12524 12525 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 12526 SDValue 12527 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 12528 SDValue Cmp = N->getOperand(4); 12529 if (Cmp.getOpcode() != ARMISD::CMPZ) 12530 // Only looking at NE cases. 12531 return SDValue(); 12532 12533 EVT VT = N->getValueType(0); 12534 SDLoc dl(N); 12535 SDValue LHS = Cmp.getOperand(0); 12536 SDValue RHS = Cmp.getOperand(1); 12537 SDValue Chain = N->getOperand(0); 12538 SDValue BB = N->getOperand(1); 12539 SDValue ARMcc = N->getOperand(2); 12540 ARMCC::CondCodes CC = 12541 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 12542 12543 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 12544 // -> (brcond Chain BB CC CPSR Cmp) 12545 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 12546 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 12547 LHS->getOperand(0)->hasOneUse()) { 12548 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 12549 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 12550 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 12551 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 12552 if ((LHS00C && LHS00C->getZExtValue() == 0) && 12553 (LHS01C && LHS01C->getZExtValue() == 1) && 12554 (LHS1C && LHS1C->getZExtValue() == 1) && 12555 (RHSC && RHSC->getZExtValue() == 0)) { 12556 return DAG.getNode( 12557 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 12558 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 12559 } 12560 } 12561 12562 return SDValue(); 12563 } 12564 12565 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 12566 SDValue 12567 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 12568 SDValue Cmp = N->getOperand(4); 12569 if (Cmp.getOpcode() != ARMISD::CMPZ) 12570 // Only looking at EQ and NE cases. 12571 return SDValue(); 12572 12573 EVT VT = N->getValueType(0); 12574 SDLoc dl(N); 12575 SDValue LHS = Cmp.getOperand(0); 12576 SDValue RHS = Cmp.getOperand(1); 12577 SDValue FalseVal = N->getOperand(0); 12578 SDValue TrueVal = N->getOperand(1); 12579 SDValue ARMcc = N->getOperand(2); 12580 ARMCC::CondCodes CC = 12581 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 12582 12583 // BFI is only available on V6T2+. 12584 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 12585 SDValue R = PerformCMOVToBFICombine(N, DAG); 12586 if (R) 12587 return R; 12588 } 12589 12590 // Simplify 12591 // mov r1, r0 12592 // cmp r1, x 12593 // mov r0, y 12594 // moveq r0, x 12595 // to 12596 // cmp r0, x 12597 // movne r0, y 12598 // 12599 // mov r1, r0 12600 // cmp r1, x 12601 // mov r0, x 12602 // movne r0, y 12603 // to 12604 // cmp r0, x 12605 // movne r0, y 12606 /// FIXME: Turn this into a target neutral optimization? 12607 SDValue Res; 12608 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 12609 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 12610 N->getOperand(3), Cmp); 12611 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 12612 SDValue ARMcc; 12613 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 12614 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 12615 N->getOperand(3), NewCmp); 12616 } 12617 12618 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 12619 // -> (cmov F T CC CPSR Cmp) 12620 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 12621 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 12622 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 12623 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 12624 if ((LHS0C && LHS0C->getZExtValue() == 0) && 12625 (LHS1C && LHS1C->getZExtValue() == 1) && 12626 (RHSC && RHSC->getZExtValue() == 0)) { 12627 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 12628 LHS->getOperand(2), LHS->getOperand(3), 12629 LHS->getOperand(4)); 12630 } 12631 } 12632 12633 if (!VT.isInteger()) 12634 return SDValue(); 12635 12636 // Materialize a boolean comparison for integers so we can avoid branching. 12637 if (isNullConstant(FalseVal)) { 12638 if (CC == ARMCC::EQ && isOneConstant(TrueVal)) { 12639 if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) { 12640 // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it 12641 // right 5 bits will make that 32 be 1, otherwise it will be 0. 12642 // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5 12643 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 12644 Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub), 12645 DAG.getConstant(5, dl, MVT::i32)); 12646 } else { 12647 // CMOV 0, 1, ==, (CMPZ x, y) -> 12648 // (ADDCARRY (SUB x, y), t:0, t:1) 12649 // where t = (SUBCARRY 0, (SUB x, y), 0) 12650 // 12651 // The SUBCARRY computes 0 - (x - y) and this will give a borrow when 12652 // x != y. In other words, a carry C == 1 when x == y, C == 0 12653 // otherwise. 12654 // The final ADDCARRY computes 12655 // x - y + (0 - (x - y)) + C == C 12656 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 12657 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12658 SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub); 12659 // ISD::SUBCARRY returns a borrow but we want the carry here 12660 // actually. 12661 SDValue Carry = 12662 DAG.getNode(ISD::SUB, dl, MVT::i32, 12663 DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1)); 12664 Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry); 12665 } 12666 } else if (CC == ARMCC::NE && LHS != RHS && 12667 (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) { 12668 // This seems pointless but will allow us to combine it further below. 12669 // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUB x, y), z, !=, (CMPZ x, y) 12670 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 12671 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc, 12672 N->getOperand(3), Cmp); 12673 } 12674 } else if (isNullConstant(TrueVal)) { 12675 if (CC == ARMCC::EQ && LHS != RHS && 12676 (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) { 12677 // This seems pointless but will allow us to combine it further below 12678 // Note that we change == for != as this is the dual for the case above. 12679 // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUB x, y), z, !=, (CMPZ x, y) 12680 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS); 12681 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal, 12682 DAG.getConstant(ARMCC::NE, dl, MVT::i32), 12683 N->getOperand(3), Cmp); 12684 } 12685 } 12686 12687 // On Thumb1, the DAG above may be further combined if z is a power of 2 12688 // (z == 2 ^ K). 12689 // CMOV (SUB x, y), z, !=, (CMPZ x, y) -> 12690 // merge t3, t4 12691 // where t1 = (SUBCARRY (SUB x, y), z, 0) 12692 // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1) 12693 // t3 = if K != 0 then (SHL t2:0, K) else t2:0 12694 // t4 = (SUB 1, t2:1) [ we want a carry, not a borrow ] 12695 const APInt *TrueConst; 12696 if (Subtarget->isThumb1Only() && CC == ARMCC::NE && 12697 (FalseVal.getOpcode() == ISD::SUB) && (FalseVal.getOperand(0) == LHS) && 12698 (FalseVal.getOperand(1) == RHS) && 12699 (TrueConst = isPowerOf2Constant(TrueVal))) { 12700 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 12701 unsigned ShiftAmount = TrueConst->logBase2(); 12702 if (ShiftAmount) 12703 TrueVal = DAG.getConstant(1, dl, VT); 12704 SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal); 12705 Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1)); 12706 // Make it a carry, not a borrow. 12707 SDValue Carry = DAG.getNode( 12708 ISD::SUB, dl, VT, DAG.getConstant(1, dl, MVT::i32), Res.getValue(1)); 12709 Res = DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Res, Carry); 12710 12711 if (ShiftAmount) 12712 Res = DAG.getNode(ISD::SHL, dl, VT, Res, 12713 DAG.getConstant(ShiftAmount, dl, MVT::i32)); 12714 } 12715 12716 if (Res.getNode()) { 12717 KnownBits Known; 12718 DAG.computeKnownBits(SDValue(N,0), Known); 12719 // Capture demanded bits information that would be otherwise lost. 12720 if (Known.Zero == 0xfffffffe) 12721 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 12722 DAG.getValueType(MVT::i1)); 12723 else if (Known.Zero == 0xffffff00) 12724 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 12725 DAG.getValueType(MVT::i8)); 12726 else if (Known.Zero == 0xffff0000) 12727 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 12728 DAG.getValueType(MVT::i16)); 12729 } 12730 12731 return Res; 12732 } 12733 12734 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 12735 DAGCombinerInfo &DCI) const { 12736 switch (N->getOpcode()) { 12737 default: break; 12738 case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); 12739 case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget); 12740 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 12741 case ISD::SUB: return PerformSUBCombine(N, DCI); 12742 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 12743 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 12744 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 12745 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 12746 case ARMISD::ADDC: 12747 case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget); 12748 case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget); 12749 case ARMISD::BFI: return PerformBFICombine(N, DCI); 12750 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 12751 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 12752 case ISD::STORE: return PerformSTORECombine(N, DCI); 12753 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 12754 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 12755 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 12756 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 12757 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI); 12758 case ISD::FP_TO_SINT: 12759 case ISD::FP_TO_UINT: 12760 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 12761 case ISD::FDIV: 12762 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 12763 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 12764 case ISD::SHL: 12765 case ISD::SRA: 12766 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 12767 case ISD::SIGN_EXTEND: 12768 case ISD::ZERO_EXTEND: 12769 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 12770 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 12771 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 12772 case ISD::LOAD: return PerformLOADCombine(N, DCI); 12773 case ARMISD::VLD1DUP: 12774 case ARMISD::VLD2DUP: 12775 case ARMISD::VLD3DUP: 12776 case ARMISD::VLD4DUP: 12777 return PerformVLDCombine(N, DCI); 12778 case ARMISD::BUILD_VECTOR: 12779 return PerformARMBUILD_VECTORCombine(N, DCI); 12780 case ARMISD::SMULWB: { 12781 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 12782 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 12783 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 12784 return SDValue(); 12785 break; 12786 } 12787 case ARMISD::SMULWT: { 12788 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 12789 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 12790 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)) 12791 return SDValue(); 12792 break; 12793 } 12794 case ARMISD::SMLALBB: { 12795 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 12796 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 12797 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 12798 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 12799 return SDValue(); 12800 break; 12801 } 12802 case ARMISD::SMLALBT: { 12803 unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits(); 12804 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 12805 unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits(); 12806 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 12807 if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) || 12808 (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI))) 12809 return SDValue(); 12810 break; 12811 } 12812 case ARMISD::SMLALTB: { 12813 unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits(); 12814 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16); 12815 unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits(); 12816 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16); 12817 if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) || 12818 (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI))) 12819 return SDValue(); 12820 break; 12821 } 12822 case ARMISD::SMLALTT: { 12823 unsigned BitWidth = N->getValueType(0).getSizeInBits(); 12824 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16); 12825 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) || 12826 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))) 12827 return SDValue(); 12828 break; 12829 } 12830 case ISD::INTRINSIC_VOID: 12831 case ISD::INTRINSIC_W_CHAIN: 12832 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12833 case Intrinsic::arm_neon_vld1: 12834 case Intrinsic::arm_neon_vld1x2: 12835 case Intrinsic::arm_neon_vld1x3: 12836 case Intrinsic::arm_neon_vld1x4: 12837 case Intrinsic::arm_neon_vld2: 12838 case Intrinsic::arm_neon_vld3: 12839 case Intrinsic::arm_neon_vld4: 12840 case Intrinsic::arm_neon_vld2lane: 12841 case Intrinsic::arm_neon_vld3lane: 12842 case Intrinsic::arm_neon_vld4lane: 12843 case Intrinsic::arm_neon_vld2dup: 12844 case Intrinsic::arm_neon_vld3dup: 12845 case Intrinsic::arm_neon_vld4dup: 12846 case Intrinsic::arm_neon_vst1: 12847 case Intrinsic::arm_neon_vst1x2: 12848 case Intrinsic::arm_neon_vst1x3: 12849 case Intrinsic::arm_neon_vst1x4: 12850 case Intrinsic::arm_neon_vst2: 12851 case Intrinsic::arm_neon_vst3: 12852 case Intrinsic::arm_neon_vst4: 12853 case Intrinsic::arm_neon_vst2lane: 12854 case Intrinsic::arm_neon_vst3lane: 12855 case Intrinsic::arm_neon_vst4lane: 12856 return PerformVLDCombine(N, DCI); 12857 default: break; 12858 } 12859 break; 12860 } 12861 return SDValue(); 12862 } 12863 12864 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 12865 EVT VT) const { 12866 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 12867 } 12868 12869 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 12870 unsigned, 12871 unsigned, 12872 bool *Fast) const { 12873 // Depends what it gets converted into if the type is weird. 12874 if (!VT.isSimple()) 12875 return false; 12876 12877 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 12878 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 12879 12880 switch (VT.getSimpleVT().SimpleTy) { 12881 default: 12882 return false; 12883 case MVT::i8: 12884 case MVT::i16: 12885 case MVT::i32: { 12886 // Unaligned access can use (for example) LRDB, LRDH, LDR 12887 if (AllowsUnaligned) { 12888 if (Fast) 12889 *Fast = Subtarget->hasV7Ops(); 12890 return true; 12891 } 12892 return false; 12893 } 12894 case MVT::f64: 12895 case MVT::v2f64: { 12896 // For any little-endian targets with neon, we can support unaligned ld/st 12897 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 12898 // A big-endian target may also explicitly support unaligned accesses 12899 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 12900 if (Fast) 12901 *Fast = true; 12902 return true; 12903 } 12904 return false; 12905 } 12906 } 12907 } 12908 12909 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 12910 unsigned AlignCheck) { 12911 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 12912 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 12913 } 12914 12915 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 12916 unsigned DstAlign, unsigned SrcAlign, 12917 bool IsMemset, bool ZeroMemset, 12918 bool MemcpyStrSrc, 12919 MachineFunction &MF) const { 12920 const Function &F = MF.getFunction(); 12921 12922 // See if we can use NEON instructions for this... 12923 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && 12924 !F.hasFnAttribute(Attribute::NoImplicitFloat)) { 12925 bool Fast; 12926 if (Size >= 16 && 12927 (memOpAlign(SrcAlign, DstAlign, 16) || 12928 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { 12929 return MVT::v2f64; 12930 } else if (Size >= 8 && 12931 (memOpAlign(SrcAlign, DstAlign, 8) || 12932 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && 12933 Fast))) { 12934 return MVT::f64; 12935 } 12936 } 12937 12938 // Let the target-independent logic figure it out. 12939 return MVT::Other; 12940 } 12941 12942 // 64-bit integers are split into their high and low parts and held in two 12943 // different registers, so the trunc is free since the low register can just 12944 // be used. 12945 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 12946 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 12947 return false; 12948 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 12949 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 12950 return (SrcBits == 64 && DestBits == 32); 12951 } 12952 12953 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 12954 if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() || 12955 !DstVT.isInteger()) 12956 return false; 12957 unsigned SrcBits = SrcVT.getSizeInBits(); 12958 unsigned DestBits = DstVT.getSizeInBits(); 12959 return (SrcBits == 64 && DestBits == 32); 12960 } 12961 12962 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 12963 if (Val.getOpcode() != ISD::LOAD) 12964 return false; 12965 12966 EVT VT1 = Val.getValueType(); 12967 if (!VT1.isSimple() || !VT1.isInteger() || 12968 !VT2.isSimple() || !VT2.isInteger()) 12969 return false; 12970 12971 switch (VT1.getSimpleVT().SimpleTy) { 12972 default: break; 12973 case MVT::i1: 12974 case MVT::i8: 12975 case MVT::i16: 12976 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 12977 return true; 12978 } 12979 12980 return false; 12981 } 12982 12983 bool ARMTargetLowering::isFNegFree(EVT VT) const { 12984 if (!VT.isSimple()) 12985 return false; 12986 12987 // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that 12988 // negate values directly (fneg is free). So, we don't want to let the DAG 12989 // combiner rewrite fneg into xors and some other instructions. For f16 and 12990 // FullFP16 argument passing, some bitcast nodes may be introduced, 12991 // triggering this DAG combine rewrite, so we are avoiding that with this. 12992 switch (VT.getSimpleVT().SimpleTy) { 12993 default: break; 12994 case MVT::f16: 12995 return Subtarget->hasFullFP16(); 12996 } 12997 12998 return false; 12999 } 13000 13001 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 13002 EVT VT = ExtVal.getValueType(); 13003 13004 if (!isTypeLegal(VT)) 13005 return false; 13006 13007 // Don't create a loadext if we can fold the extension into a wide/long 13008 // instruction. 13009 // If there's more than one user instruction, the loadext is desirable no 13010 // matter what. There can be two uses by the same instruction. 13011 if (ExtVal->use_empty() || 13012 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 13013 return true; 13014 13015 SDNode *U = *ExtVal->use_begin(); 13016 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 13017 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) 13018 return false; 13019 13020 return true; 13021 } 13022 13023 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 13024 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 13025 return false; 13026 13027 if (!isTypeLegal(EVT::getEVT(Ty1))) 13028 return false; 13029 13030 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 13031 13032 // Assuming the caller doesn't have a zeroext or signext return parameter, 13033 // truncation all the way down to i1 is valid. 13034 return true; 13035 } 13036 13037 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL, 13038 const AddrMode &AM, Type *Ty, 13039 unsigned AS) const { 13040 if (isLegalAddressingMode(DL, AM, Ty, AS)) { 13041 if (Subtarget->hasFPAO()) 13042 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster 13043 return 0; 13044 } 13045 return -1; 13046 } 13047 13048 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 13049 if (V < 0) 13050 return false; 13051 13052 unsigned Scale = 1; 13053 switch (VT.getSimpleVT().SimpleTy) { 13054 default: return false; 13055 case MVT::i1: 13056 case MVT::i8: 13057 // Scale == 1; 13058 break; 13059 case MVT::i16: 13060 // Scale == 2; 13061 Scale = 2; 13062 break; 13063 case MVT::i32: 13064 // Scale == 4; 13065 Scale = 4; 13066 break; 13067 } 13068 13069 if ((V & (Scale - 1)) != 0) 13070 return false; 13071 V /= Scale; 13072 return V == (V & ((1LL << 5) - 1)); 13073 } 13074 13075 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 13076 const ARMSubtarget *Subtarget) { 13077 bool isNeg = false; 13078 if (V < 0) { 13079 isNeg = true; 13080 V = - V; 13081 } 13082 13083 switch (VT.getSimpleVT().SimpleTy) { 13084 default: return false; 13085 case MVT::i1: 13086 case MVT::i8: 13087 case MVT::i16: 13088 case MVT::i32: 13089 // + imm12 or - imm8 13090 if (isNeg) 13091 return V == (V & ((1LL << 8) - 1)); 13092 return V == (V & ((1LL << 12) - 1)); 13093 case MVT::f32: 13094 case MVT::f64: 13095 // Same as ARM mode. FIXME: NEON? 13096 if (!Subtarget->hasVFP2()) 13097 return false; 13098 if ((V & 3) != 0) 13099 return false; 13100 V >>= 2; 13101 return V == (V & ((1LL << 8) - 1)); 13102 } 13103 } 13104 13105 /// isLegalAddressImmediate - Return true if the integer value can be used 13106 /// as the offset of the target addressing mode for load / store of the 13107 /// given type. 13108 static bool isLegalAddressImmediate(int64_t V, EVT VT, 13109 const ARMSubtarget *Subtarget) { 13110 if (V == 0) 13111 return true; 13112 13113 if (!VT.isSimple()) 13114 return false; 13115 13116 if (Subtarget->isThumb1Only()) 13117 return isLegalT1AddressImmediate(V, VT); 13118 else if (Subtarget->isThumb2()) 13119 return isLegalT2AddressImmediate(V, VT, Subtarget); 13120 13121 // ARM mode. 13122 if (V < 0) 13123 V = - V; 13124 switch (VT.getSimpleVT().SimpleTy) { 13125 default: return false; 13126 case MVT::i1: 13127 case MVT::i8: 13128 case MVT::i32: 13129 // +- imm12 13130 return V == (V & ((1LL << 12) - 1)); 13131 case MVT::i16: 13132 // +- imm8 13133 return V == (V & ((1LL << 8) - 1)); 13134 case MVT::f32: 13135 case MVT::f64: 13136 if (!Subtarget->hasVFP2()) // FIXME: NEON? 13137 return false; 13138 if ((V & 3) != 0) 13139 return false; 13140 V >>= 2; 13141 return V == (V & ((1LL << 8) - 1)); 13142 } 13143 } 13144 13145 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 13146 EVT VT) const { 13147 int Scale = AM.Scale; 13148 if (Scale < 0) 13149 return false; 13150 13151 switch (VT.getSimpleVT().SimpleTy) { 13152 default: return false; 13153 case MVT::i1: 13154 case MVT::i8: 13155 case MVT::i16: 13156 case MVT::i32: 13157 if (Scale == 1) 13158 return true; 13159 // r + r << imm 13160 Scale = Scale & ~1; 13161 return Scale == 2 || Scale == 4 || Scale == 8; 13162 case MVT::i64: 13163 // FIXME: What are we trying to model here? ldrd doesn't have an r + r 13164 // version in Thumb mode. 13165 // r + r 13166 if (Scale == 1) 13167 return true; 13168 // r * 2 (this can be lowered to r + r). 13169 if (!AM.HasBaseReg && Scale == 2) 13170 return true; 13171 return false; 13172 case MVT::isVoid: 13173 // Note, we allow "void" uses (basically, uses that aren't loads or 13174 // stores), because arm allows folding a scale into many arithmetic 13175 // operations. This should be made more precise and revisited later. 13176 13177 // Allow r << imm, but the imm has to be a multiple of two. 13178 if (Scale & 1) return false; 13179 return isPowerOf2_32(Scale); 13180 } 13181 } 13182 13183 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM, 13184 EVT VT) const { 13185 const int Scale = AM.Scale; 13186 13187 // Negative scales are not supported in Thumb1. 13188 if (Scale < 0) 13189 return false; 13190 13191 // Thumb1 addressing modes do not support register scaling excepting the 13192 // following cases: 13193 // 1. Scale == 1 means no scaling. 13194 // 2. Scale == 2 this can be lowered to r + r if there is no base register. 13195 return (Scale == 1) || (!AM.HasBaseReg && Scale == 2); 13196 } 13197 13198 /// isLegalAddressingMode - Return true if the addressing mode represented 13199 /// by AM is legal for this target, for a load/store of the specified type. 13200 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 13201 const AddrMode &AM, Type *Ty, 13202 unsigned AS, Instruction *I) const { 13203 EVT VT = getValueType(DL, Ty, true); 13204 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 13205 return false; 13206 13207 // Can never fold addr of global into load/store. 13208 if (AM.BaseGV) 13209 return false; 13210 13211 switch (AM.Scale) { 13212 case 0: // no scale reg, must be "r+i" or "r", or "i". 13213 break; 13214 default: 13215 // ARM doesn't support any R+R*scale+imm addr modes. 13216 if (AM.BaseOffs) 13217 return false; 13218 13219 if (!VT.isSimple()) 13220 return false; 13221 13222 if (Subtarget->isThumb1Only()) 13223 return isLegalT1ScaledAddressingMode(AM, VT); 13224 13225 if (Subtarget->isThumb2()) 13226 return isLegalT2ScaledAddressingMode(AM, VT); 13227 13228 int Scale = AM.Scale; 13229 switch (VT.getSimpleVT().SimpleTy) { 13230 default: return false; 13231 case MVT::i1: 13232 case MVT::i8: 13233 case MVT::i32: 13234 if (Scale < 0) Scale = -Scale; 13235 if (Scale == 1) 13236 return true; 13237 // r + r << imm 13238 return isPowerOf2_32(Scale & ~1); 13239 case MVT::i16: 13240 case MVT::i64: 13241 // r +/- r 13242 if (Scale == 1 || (AM.HasBaseReg && Scale == -1)) 13243 return true; 13244 // r * 2 (this can be lowered to r + r). 13245 if (!AM.HasBaseReg && Scale == 2) 13246 return true; 13247 return false; 13248 13249 case MVT::isVoid: 13250 // Note, we allow "void" uses (basically, uses that aren't loads or 13251 // stores), because arm allows folding a scale into many arithmetic 13252 // operations. This should be made more precise and revisited later. 13253 13254 // Allow r << imm, but the imm has to be a multiple of two. 13255 if (Scale & 1) return false; 13256 return isPowerOf2_32(Scale); 13257 } 13258 } 13259 return true; 13260 } 13261 13262 /// isLegalICmpImmediate - Return true if the specified immediate is legal 13263 /// icmp immediate, that is the target has icmp instructions which can compare 13264 /// a register against the immediate without having to materialize the 13265 /// immediate into a register. 13266 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 13267 // Thumb2 and ARM modes can use cmn for negative immediates. 13268 if (!Subtarget->isThumb()) 13269 return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 || 13270 ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1; 13271 if (Subtarget->isThumb2()) 13272 return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 || 13273 ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1; 13274 // Thumb1 doesn't have cmn, and only 8-bit immediates. 13275 return Imm >= 0 && Imm <= 255; 13276 } 13277 13278 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 13279 /// *or sub* immediate, that is the target has add or sub instructions which can 13280 /// add a register with the immediate without having to materialize the 13281 /// immediate into a register. 13282 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 13283 // Same encoding for add/sub, just flip the sign. 13284 int64_t AbsImm = std::abs(Imm); 13285 if (!Subtarget->isThumb()) 13286 return ARM_AM::getSOImmVal(AbsImm) != -1; 13287 if (Subtarget->isThumb2()) 13288 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 13289 // Thumb1 only has 8-bit unsigned immediate. 13290 return AbsImm >= 0 && AbsImm <= 255; 13291 } 13292 13293 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 13294 bool isSEXTLoad, SDValue &Base, 13295 SDValue &Offset, bool &isInc, 13296 SelectionDAG &DAG) { 13297 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 13298 return false; 13299 13300 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 13301 // AddressingMode 3 13302 Base = Ptr->getOperand(0); 13303 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 13304 int RHSC = (int)RHS->getZExtValue(); 13305 if (RHSC < 0 && RHSC > -256) { 13306 assert(Ptr->getOpcode() == ISD::ADD); 13307 isInc = false; 13308 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 13309 return true; 13310 } 13311 } 13312 isInc = (Ptr->getOpcode() == ISD::ADD); 13313 Offset = Ptr->getOperand(1); 13314 return true; 13315 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 13316 // AddressingMode 2 13317 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 13318 int RHSC = (int)RHS->getZExtValue(); 13319 if (RHSC < 0 && RHSC > -0x1000) { 13320 assert(Ptr->getOpcode() == ISD::ADD); 13321 isInc = false; 13322 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 13323 Base = Ptr->getOperand(0); 13324 return true; 13325 } 13326 } 13327 13328 if (Ptr->getOpcode() == ISD::ADD) { 13329 isInc = true; 13330 ARM_AM::ShiftOpc ShOpcVal= 13331 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 13332 if (ShOpcVal != ARM_AM::no_shift) { 13333 Base = Ptr->getOperand(1); 13334 Offset = Ptr->getOperand(0); 13335 } else { 13336 Base = Ptr->getOperand(0); 13337 Offset = Ptr->getOperand(1); 13338 } 13339 return true; 13340 } 13341 13342 isInc = (Ptr->getOpcode() == ISD::ADD); 13343 Base = Ptr->getOperand(0); 13344 Offset = Ptr->getOperand(1); 13345 return true; 13346 } 13347 13348 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 13349 return false; 13350 } 13351 13352 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 13353 bool isSEXTLoad, SDValue &Base, 13354 SDValue &Offset, bool &isInc, 13355 SelectionDAG &DAG) { 13356 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 13357 return false; 13358 13359 Base = Ptr->getOperand(0); 13360 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 13361 int RHSC = (int)RHS->getZExtValue(); 13362 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 13363 assert(Ptr->getOpcode() == ISD::ADD); 13364 isInc = false; 13365 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 13366 return true; 13367 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 13368 isInc = Ptr->getOpcode() == ISD::ADD; 13369 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 13370 return true; 13371 } 13372 } 13373 13374 return false; 13375 } 13376 13377 /// getPreIndexedAddressParts - returns true by value, base pointer and 13378 /// offset pointer and addressing mode by reference if the node's address 13379 /// can be legally represented as pre-indexed load / store address. 13380 bool 13381 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 13382 SDValue &Offset, 13383 ISD::MemIndexedMode &AM, 13384 SelectionDAG &DAG) const { 13385 if (Subtarget->isThumb1Only()) 13386 return false; 13387 13388 EVT VT; 13389 SDValue Ptr; 13390 bool isSEXTLoad = false; 13391 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 13392 Ptr = LD->getBasePtr(); 13393 VT = LD->getMemoryVT(); 13394 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 13395 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 13396 Ptr = ST->getBasePtr(); 13397 VT = ST->getMemoryVT(); 13398 } else 13399 return false; 13400 13401 bool isInc; 13402 bool isLegal = false; 13403 if (Subtarget->isThumb2()) 13404 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 13405 Offset, isInc, DAG); 13406 else 13407 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 13408 Offset, isInc, DAG); 13409 if (!isLegal) 13410 return false; 13411 13412 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 13413 return true; 13414 } 13415 13416 /// getPostIndexedAddressParts - returns true by value, base pointer and 13417 /// offset pointer and addressing mode by reference if this node can be 13418 /// combined with a load / store to form a post-indexed load / store. 13419 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 13420 SDValue &Base, 13421 SDValue &Offset, 13422 ISD::MemIndexedMode &AM, 13423 SelectionDAG &DAG) const { 13424 EVT VT; 13425 SDValue Ptr; 13426 bool isSEXTLoad = false, isNonExt; 13427 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 13428 VT = LD->getMemoryVT(); 13429 Ptr = LD->getBasePtr(); 13430 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 13431 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 13432 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 13433 VT = ST->getMemoryVT(); 13434 Ptr = ST->getBasePtr(); 13435 isNonExt = !ST->isTruncatingStore(); 13436 } else 13437 return false; 13438 13439 if (Subtarget->isThumb1Only()) { 13440 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It 13441 // must be non-extending/truncating, i32, with an offset of 4. 13442 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!"); 13443 if (Op->getOpcode() != ISD::ADD || !isNonExt) 13444 return false; 13445 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 13446 if (!RHS || RHS->getZExtValue() != 4) 13447 return false; 13448 13449 Offset = Op->getOperand(1); 13450 Base = Op->getOperand(0); 13451 AM = ISD::POST_INC; 13452 return true; 13453 } 13454 13455 bool isInc; 13456 bool isLegal = false; 13457 if (Subtarget->isThumb2()) 13458 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 13459 isInc, DAG); 13460 else 13461 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 13462 isInc, DAG); 13463 if (!isLegal) 13464 return false; 13465 13466 if (Ptr != Base) { 13467 // Swap base ptr and offset to catch more post-index load / store when 13468 // it's legal. In Thumb2 mode, offset must be an immediate. 13469 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 13470 !Subtarget->isThumb2()) 13471 std::swap(Base, Offset); 13472 13473 // Post-indexed load / store update the base pointer. 13474 if (Ptr != Base) 13475 return false; 13476 } 13477 13478 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 13479 return true; 13480 } 13481 13482 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 13483 KnownBits &Known, 13484 const APInt &DemandedElts, 13485 const SelectionDAG &DAG, 13486 unsigned Depth) const { 13487 unsigned BitWidth = Known.getBitWidth(); 13488 Known.resetAll(); 13489 switch (Op.getOpcode()) { 13490 default: break; 13491 case ARMISD::ADDC: 13492 case ARMISD::ADDE: 13493 case ARMISD::SUBC: 13494 case ARMISD::SUBE: 13495 // Special cases when we convert a carry to a boolean. 13496 if (Op.getResNo() == 0) { 13497 SDValue LHS = Op.getOperand(0); 13498 SDValue RHS = Op.getOperand(1); 13499 // (ADDE 0, 0, C) will give us a single bit. 13500 if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) && 13501 isNullConstant(RHS)) { 13502 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 13503 return; 13504 } 13505 } 13506 break; 13507 case ARMISD::CMOV: { 13508 // Bits are known zero/one if known on the LHS and RHS. 13509 DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1); 13510 if (Known.isUnknown()) 13511 return; 13512 13513 KnownBits KnownRHS; 13514 DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1); 13515 Known.Zero &= KnownRHS.Zero; 13516 Known.One &= KnownRHS.One; 13517 return; 13518 } 13519 case ISD::INTRINSIC_W_CHAIN: { 13520 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 13521 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 13522 switch (IntID) { 13523 default: return; 13524 case Intrinsic::arm_ldaex: 13525 case Intrinsic::arm_ldrex: { 13526 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 13527 unsigned MemBits = VT.getScalarSizeInBits(); 13528 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 13529 return; 13530 } 13531 } 13532 } 13533 case ARMISD::BFI: { 13534 // Conservatively, we can recurse down the first operand 13535 // and just mask out all affected bits. 13536 DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1); 13537 13538 // The operand to BFI is already a mask suitable for removing the bits it 13539 // sets. 13540 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 13541 const APInt &Mask = CI->getAPIntValue(); 13542 Known.Zero &= Mask; 13543 Known.One &= Mask; 13544 return; 13545 } 13546 } 13547 } 13548 13549 bool 13550 ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op, 13551 const APInt &DemandedAPInt, 13552 TargetLoweringOpt &TLO) const { 13553 // Delay optimization, so we don't have to deal with illegal types, or block 13554 // optimizations. 13555 if (!TLO.LegalOps) 13556 return false; 13557 13558 // Only optimize AND for now. 13559 if (Op.getOpcode() != ISD::AND) 13560 return false; 13561 13562 EVT VT = Op.getValueType(); 13563 13564 // Ignore vectors. 13565 if (VT.isVector()) 13566 return false; 13567 13568 assert(VT == MVT::i32 && "Unexpected integer type"); 13569 13570 // Make sure the RHS really is a constant. 13571 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 13572 if (!C) 13573 return false; 13574 13575 unsigned Mask = C->getZExtValue(); 13576 13577 unsigned Demanded = DemandedAPInt.getZExtValue(); 13578 unsigned ShrunkMask = Mask & Demanded; 13579 unsigned ExpandedMask = Mask | ~Demanded; 13580 13581 // If the mask is all zeros, let the target-independent code replace the 13582 // result with zero. 13583 if (ShrunkMask == 0) 13584 return false; 13585 13586 // If the mask is all ones, erase the AND. (Currently, the target-independent 13587 // code won't do this, so we have to do it explicitly to avoid an infinite 13588 // loop in obscure cases.) 13589 if (ExpandedMask == ~0U) 13590 return TLO.CombineTo(Op, Op.getOperand(0)); 13591 13592 auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool { 13593 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0; 13594 }; 13595 auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool { 13596 if (NewMask == Mask) 13597 return true; 13598 SDLoc DL(Op); 13599 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 13600 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 13601 return TLO.CombineTo(Op, NewOp); 13602 }; 13603 13604 // Prefer uxtb mask. 13605 if (IsLegalMask(0xFF)) 13606 return UseMask(0xFF); 13607 13608 // Prefer uxth mask. 13609 if (IsLegalMask(0xFFFF)) 13610 return UseMask(0xFFFF); 13611 13612 // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2. 13613 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 13614 if (ShrunkMask < 256) 13615 return UseMask(ShrunkMask); 13616 13617 // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2. 13618 // FIXME: Prefer a contiguous sequence of bits for other optimizations. 13619 if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256) 13620 return UseMask(ExpandedMask); 13621 13622 // Potential improvements: 13623 // 13624 // We could try to recognize lsls+lsrs or lsrs+lsls pairs here. 13625 // We could try to prefer Thumb1 immediates which can be lowered to a 13626 // two-instruction sequence. 13627 // We could try to recognize more legal ARM/Thumb2 immediates here. 13628 13629 return false; 13630 } 13631 13632 13633 //===----------------------------------------------------------------------===// 13634 // ARM Inline Assembly Support 13635 //===----------------------------------------------------------------------===// 13636 13637 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 13638 // Looking for "rev" which is V6+. 13639 if (!Subtarget->hasV6Ops()) 13640 return false; 13641 13642 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 13643 std::string AsmStr = IA->getAsmString(); 13644 SmallVector<StringRef, 4> AsmPieces; 13645 SplitString(AsmStr, AsmPieces, ";\n"); 13646 13647 switch (AsmPieces.size()) { 13648 default: return false; 13649 case 1: 13650 AsmStr = AsmPieces[0]; 13651 AsmPieces.clear(); 13652 SplitString(AsmStr, AsmPieces, " \t,"); 13653 13654 // rev $0, $1 13655 if (AsmPieces.size() == 3 && 13656 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 13657 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 13658 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 13659 if (Ty && Ty->getBitWidth() == 32) 13660 return IntrinsicLowering::LowerToByteSwap(CI); 13661 } 13662 break; 13663 } 13664 13665 return false; 13666 } 13667 13668 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 13669 // At this point, we have to lower this constraint to something else, so we 13670 // lower it to an "r" or "w". However, by doing this we will force the result 13671 // to be in register, while the X constraint is much more permissive. 13672 // 13673 // Although we are correct (we are free to emit anything, without 13674 // constraints), we might break use cases that would expect us to be more 13675 // efficient and emit something else. 13676 if (!Subtarget->hasVFP2()) 13677 return "r"; 13678 if (ConstraintVT.isFloatingPoint()) 13679 return "w"; 13680 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 13681 (ConstraintVT.getSizeInBits() == 64 || 13682 ConstraintVT.getSizeInBits() == 128)) 13683 return "w"; 13684 13685 return "r"; 13686 } 13687 13688 /// getConstraintType - Given a constraint letter, return the type of 13689 /// constraint it is for this target. 13690 ARMTargetLowering::ConstraintType 13691 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 13692 if (Constraint.size() == 1) { 13693 switch (Constraint[0]) { 13694 default: break; 13695 case 'l': return C_RegisterClass; 13696 case 'w': return C_RegisterClass; 13697 case 'h': return C_RegisterClass; 13698 case 'x': return C_RegisterClass; 13699 case 't': return C_RegisterClass; 13700 case 'j': return C_Other; // Constant for movw. 13701 // An address with a single base register. Due to the way we 13702 // currently handle addresses it is the same as an 'r' memory constraint. 13703 case 'Q': return C_Memory; 13704 } 13705 } else if (Constraint.size() == 2) { 13706 switch (Constraint[0]) { 13707 default: break; 13708 // All 'U+' constraints are addresses. 13709 case 'U': return C_Memory; 13710 } 13711 } 13712 return TargetLowering::getConstraintType(Constraint); 13713 } 13714 13715 /// Examine constraint type and operand type and determine a weight value. 13716 /// This object must already have been set up with the operand type 13717 /// and the current alternative constraint selected. 13718 TargetLowering::ConstraintWeight 13719 ARMTargetLowering::getSingleConstraintMatchWeight( 13720 AsmOperandInfo &info, const char *constraint) const { 13721 ConstraintWeight weight = CW_Invalid; 13722 Value *CallOperandVal = info.CallOperandVal; 13723 // If we don't have a value, we can't do a match, 13724 // but allow it at the lowest weight. 13725 if (!CallOperandVal) 13726 return CW_Default; 13727 Type *type = CallOperandVal->getType(); 13728 // Look at the constraint type. 13729 switch (*constraint) { 13730 default: 13731 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 13732 break; 13733 case 'l': 13734 if (type->isIntegerTy()) { 13735 if (Subtarget->isThumb()) 13736 weight = CW_SpecificReg; 13737 else 13738 weight = CW_Register; 13739 } 13740 break; 13741 case 'w': 13742 if (type->isFloatingPointTy()) 13743 weight = CW_Register; 13744 break; 13745 } 13746 return weight; 13747 } 13748 13749 using RCPair = std::pair<unsigned, const TargetRegisterClass *>; 13750 13751 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 13752 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 13753 if (Constraint.size() == 1) { 13754 // GCC ARM Constraint Letters 13755 switch (Constraint[0]) { 13756 case 'l': // Low regs or general regs. 13757 if (Subtarget->isThumb()) 13758 return RCPair(0U, &ARM::tGPRRegClass); 13759 return RCPair(0U, &ARM::GPRRegClass); 13760 case 'h': // High regs or no regs. 13761 if (Subtarget->isThumb()) 13762 return RCPair(0U, &ARM::hGPRRegClass); 13763 break; 13764 case 'r': 13765 if (Subtarget->isThumb1Only()) 13766 return RCPair(0U, &ARM::tGPRRegClass); 13767 return RCPair(0U, &ARM::GPRRegClass); 13768 case 'w': 13769 if (VT == MVT::Other) 13770 break; 13771 if (VT == MVT::f32) 13772 return RCPair(0U, &ARM::SPRRegClass); 13773 if (VT.getSizeInBits() == 64) 13774 return RCPair(0U, &ARM::DPRRegClass); 13775 if (VT.getSizeInBits() == 128) 13776 return RCPair(0U, &ARM::QPRRegClass); 13777 break; 13778 case 'x': 13779 if (VT == MVT::Other) 13780 break; 13781 if (VT == MVT::f32) 13782 return RCPair(0U, &ARM::SPR_8RegClass); 13783 if (VT.getSizeInBits() == 64) 13784 return RCPair(0U, &ARM::DPR_8RegClass); 13785 if (VT.getSizeInBits() == 128) 13786 return RCPair(0U, &ARM::QPR_8RegClass); 13787 break; 13788 case 't': 13789 if (VT == MVT::Other) 13790 break; 13791 if (VT == MVT::f32 || VT == MVT::i32) 13792 return RCPair(0U, &ARM::SPRRegClass); 13793 if (VT.getSizeInBits() == 64) 13794 return RCPair(0U, &ARM::DPR_VFP2RegClass); 13795 if (VT.getSizeInBits() == 128) 13796 return RCPair(0U, &ARM::QPR_VFP2RegClass); 13797 break; 13798 } 13799 } 13800 if (StringRef("{cc}").equals_lower(Constraint)) 13801 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 13802 13803 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 13804 } 13805 13806 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 13807 /// vector. If it is invalid, don't add anything to Ops. 13808 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 13809 std::string &Constraint, 13810 std::vector<SDValue>&Ops, 13811 SelectionDAG &DAG) const { 13812 SDValue Result; 13813 13814 // Currently only support length 1 constraints. 13815 if (Constraint.length() != 1) return; 13816 13817 char ConstraintLetter = Constraint[0]; 13818 switch (ConstraintLetter) { 13819 default: break; 13820 case 'j': 13821 case 'I': case 'J': case 'K': case 'L': 13822 case 'M': case 'N': case 'O': 13823 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 13824 if (!C) 13825 return; 13826 13827 int64_t CVal64 = C->getSExtValue(); 13828 int CVal = (int) CVal64; 13829 // None of these constraints allow values larger than 32 bits. Check 13830 // that the value fits in an int. 13831 if (CVal != CVal64) 13832 return; 13833 13834 switch (ConstraintLetter) { 13835 case 'j': 13836 // Constant suitable for movw, must be between 0 and 13837 // 65535. 13838 if (Subtarget->hasV6T2Ops()) 13839 if (CVal >= 0 && CVal <= 65535) 13840 break; 13841 return; 13842 case 'I': 13843 if (Subtarget->isThumb1Only()) { 13844 // This must be a constant between 0 and 255, for ADD 13845 // immediates. 13846 if (CVal >= 0 && CVal <= 255) 13847 break; 13848 } else if (Subtarget->isThumb2()) { 13849 // A constant that can be used as an immediate value in a 13850 // data-processing instruction. 13851 if (ARM_AM::getT2SOImmVal(CVal) != -1) 13852 break; 13853 } else { 13854 // A constant that can be used as an immediate value in a 13855 // data-processing instruction. 13856 if (ARM_AM::getSOImmVal(CVal) != -1) 13857 break; 13858 } 13859 return; 13860 13861 case 'J': 13862 if (Subtarget->isThumb1Only()) { 13863 // This must be a constant between -255 and -1, for negated ADD 13864 // immediates. This can be used in GCC with an "n" modifier that 13865 // prints the negated value, for use with SUB instructions. It is 13866 // not useful otherwise but is implemented for compatibility. 13867 if (CVal >= -255 && CVal <= -1) 13868 break; 13869 } else { 13870 // This must be a constant between -4095 and 4095. It is not clear 13871 // what this constraint is intended for. Implemented for 13872 // compatibility with GCC. 13873 if (CVal >= -4095 && CVal <= 4095) 13874 break; 13875 } 13876 return; 13877 13878 case 'K': 13879 if (Subtarget->isThumb1Only()) { 13880 // A 32-bit value where only one byte has a nonzero value. Exclude 13881 // zero to match GCC. This constraint is used by GCC internally for 13882 // constants that can be loaded with a move/shift combination. 13883 // It is not useful otherwise but is implemented for compatibility. 13884 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 13885 break; 13886 } else if (Subtarget->isThumb2()) { 13887 // A constant whose bitwise inverse can be used as an immediate 13888 // value in a data-processing instruction. This can be used in GCC 13889 // with a "B" modifier that prints the inverted value, for use with 13890 // BIC and MVN instructions. It is not useful otherwise but is 13891 // implemented for compatibility. 13892 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 13893 break; 13894 } else { 13895 // A constant whose bitwise inverse can be used as an immediate 13896 // value in a data-processing instruction. This can be used in GCC 13897 // with a "B" modifier that prints the inverted value, for use with 13898 // BIC and MVN instructions. It is not useful otherwise but is 13899 // implemented for compatibility. 13900 if (ARM_AM::getSOImmVal(~CVal) != -1) 13901 break; 13902 } 13903 return; 13904 13905 case 'L': 13906 if (Subtarget->isThumb1Only()) { 13907 // This must be a constant between -7 and 7, 13908 // for 3-operand ADD/SUB immediate instructions. 13909 if (CVal >= -7 && CVal < 7) 13910 break; 13911 } else if (Subtarget->isThumb2()) { 13912 // A constant whose negation can be used as an immediate value in a 13913 // data-processing instruction. This can be used in GCC with an "n" 13914 // modifier that prints the negated value, for use with SUB 13915 // instructions. It is not useful otherwise but is implemented for 13916 // compatibility. 13917 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 13918 break; 13919 } else { 13920 // A constant whose negation can be used as an immediate value in a 13921 // data-processing instruction. This can be used in GCC with an "n" 13922 // modifier that prints the negated value, for use with SUB 13923 // instructions. It is not useful otherwise but is implemented for 13924 // compatibility. 13925 if (ARM_AM::getSOImmVal(-CVal) != -1) 13926 break; 13927 } 13928 return; 13929 13930 case 'M': 13931 if (Subtarget->isThumb1Only()) { 13932 // This must be a multiple of 4 between 0 and 1020, for 13933 // ADD sp + immediate. 13934 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 13935 break; 13936 } else { 13937 // A power of two or a constant between 0 and 32. This is used in 13938 // GCC for the shift amount on shifted register operands, but it is 13939 // useful in general for any shift amounts. 13940 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 13941 break; 13942 } 13943 return; 13944 13945 case 'N': 13946 if (Subtarget->isThumb()) { // FIXME thumb2 13947 // This must be a constant between 0 and 31, for shift amounts. 13948 if (CVal >= 0 && CVal <= 31) 13949 break; 13950 } 13951 return; 13952 13953 case 'O': 13954 if (Subtarget->isThumb()) { // FIXME thumb2 13955 // This must be a multiple of 4 between -508 and 508, for 13956 // ADD/SUB sp = sp + immediate. 13957 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 13958 break; 13959 } 13960 return; 13961 } 13962 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 13963 break; 13964 } 13965 13966 if (Result.getNode()) { 13967 Ops.push_back(Result); 13968 return; 13969 } 13970 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 13971 } 13972 13973 static RTLIB::Libcall getDivRemLibcall( 13974 const SDNode *N, MVT::SimpleValueType SVT) { 13975 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 13976 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 13977 "Unhandled Opcode in getDivRemLibcall"); 13978 bool isSigned = N->getOpcode() == ISD::SDIVREM || 13979 N->getOpcode() == ISD::SREM; 13980 RTLIB::Libcall LC; 13981 switch (SVT) { 13982 default: llvm_unreachable("Unexpected request for libcall!"); 13983 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 13984 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 13985 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 13986 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 13987 } 13988 return LC; 13989 } 13990 13991 static TargetLowering::ArgListTy getDivRemArgList( 13992 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { 13993 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 13994 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 13995 "Unhandled Opcode in getDivRemArgList"); 13996 bool isSigned = N->getOpcode() == ISD::SDIVREM || 13997 N->getOpcode() == ISD::SREM; 13998 TargetLowering::ArgListTy Args; 13999 TargetLowering::ArgListEntry Entry; 14000 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 14001 EVT ArgVT = N->getOperand(i).getValueType(); 14002 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 14003 Entry.Node = N->getOperand(i); 14004 Entry.Ty = ArgTy; 14005 Entry.IsSExt = isSigned; 14006 Entry.IsZExt = !isSigned; 14007 Args.push_back(Entry); 14008 } 14009 if (Subtarget->isTargetWindows() && Args.size() >= 2) 14010 std::swap(Args[0], Args[1]); 14011 return Args; 14012 } 14013 14014 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 14015 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 14016 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() || 14017 Subtarget->isTargetWindows()) && 14018 "Register-based DivRem lowering only"); 14019 unsigned Opcode = Op->getOpcode(); 14020 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 14021 "Invalid opcode for Div/Rem lowering"); 14022 bool isSigned = (Opcode == ISD::SDIVREM); 14023 EVT VT = Op->getValueType(0); 14024 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 14025 SDLoc dl(Op); 14026 14027 // If the target has hardware divide, use divide + multiply + subtract: 14028 // div = a / b 14029 // rem = a - b * div 14030 // return {div, rem} 14031 // This should be lowered into UDIV/SDIV + MLS later on. 14032 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode() 14033 : Subtarget->hasDivideInARMMode(); 14034 if (hasDivide && Op->getValueType(0).isSimple() && 14035 Op->getSimpleValueType(0) == MVT::i32) { 14036 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 14037 const SDValue Dividend = Op->getOperand(0); 14038 const SDValue Divisor = Op->getOperand(1); 14039 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor); 14040 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor); 14041 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 14042 14043 SDValue Values[2] = {Div, Rem}; 14044 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values); 14045 } 14046 14047 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 14048 VT.getSimpleVT().SimpleTy); 14049 SDValue InChain = DAG.getEntryNode(); 14050 14051 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 14052 DAG.getContext(), 14053 Subtarget); 14054 14055 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 14056 getPointerTy(DAG.getDataLayout())); 14057 14058 Type *RetTy = StructType::get(Ty, Ty); 14059 14060 if (Subtarget->isTargetWindows()) 14061 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain); 14062 14063 TargetLowering::CallLoweringInfo CLI(DAG); 14064 CLI.setDebugLoc(dl).setChain(InChain) 14065 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 14066 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 14067 14068 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 14069 return CallInfo.first; 14070 } 14071 14072 // Lowers REM using divmod helpers 14073 // see RTABI section 4.2/4.3 14074 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 14075 // Build return types (div and rem) 14076 std::vector<Type*> RetTyParams; 14077 Type *RetTyElement; 14078 14079 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 14080 default: llvm_unreachable("Unexpected request for libcall!"); 14081 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 14082 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 14083 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 14084 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 14085 } 14086 14087 RetTyParams.push_back(RetTyElement); 14088 RetTyParams.push_back(RetTyElement); 14089 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 14090 Type *RetTy = StructType::get(*DAG.getContext(), ret); 14091 14092 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 14093 SimpleTy); 14094 SDValue InChain = DAG.getEntryNode(); 14095 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(), 14096 Subtarget); 14097 bool isSigned = N->getOpcode() == ISD::SREM; 14098 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 14099 getPointerTy(DAG.getDataLayout())); 14100 14101 if (Subtarget->isTargetWindows()) 14102 InChain = WinDBZCheckDenominator(DAG, N, InChain); 14103 14104 // Lower call 14105 CallLoweringInfo CLI(DAG); 14106 CLI.setChain(InChain) 14107 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) 14108 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 14109 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 14110 14111 // Return second (rem) result operand (first contains div) 14112 SDNode *ResNode = CallResult.first.getNode(); 14113 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 14114 return ResNode->getOperand(1); 14115 } 14116 14117 SDValue 14118 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 14119 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 14120 SDLoc DL(Op); 14121 14122 // Get the inputs. 14123 SDValue Chain = Op.getOperand(0); 14124 SDValue Size = Op.getOperand(1); 14125 14126 if (DAG.getMachineFunction().getFunction().hasFnAttribute( 14127 "no-stack-arg-probe")) { 14128 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 14129 SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 14130 Chain = SP.getValue(1); 14131 SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size); 14132 if (Align) 14133 SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0), 14134 DAG.getConstant(-(uint64_t)Align, DL, MVT::i32)); 14135 Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP); 14136 SDValue Ops[2] = { SP, Chain }; 14137 return DAG.getMergeValues(Ops, DL); 14138 } 14139 14140 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 14141 DAG.getConstant(2, DL, MVT::i32)); 14142 14143 SDValue Flag; 14144 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 14145 Flag = Chain.getValue(1); 14146 14147 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 14148 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 14149 14150 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 14151 Chain = NewSP.getValue(1); 14152 14153 SDValue Ops[2] = { NewSP, Chain }; 14154 return DAG.getMergeValues(Ops, DL); 14155 } 14156 14157 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 14158 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && 14159 "Unexpected type for custom-lowering FP_EXTEND"); 14160 14161 RTLIB::Libcall LC; 14162 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 14163 14164 SDValue SrcVal = Op.getOperand(0); 14165 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 14166 SDLoc(Op)).first; 14167 } 14168 14169 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 14170 assert(Op.getOperand(0).getValueType() == MVT::f64 && 14171 Subtarget->isFPOnlySP() && 14172 "Unexpected type for custom-lowering FP_ROUND"); 14173 14174 RTLIB::Libcall LC; 14175 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 14176 14177 SDValue SrcVal = Op.getOperand(0); 14178 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 14179 SDLoc(Op)).first; 14180 } 14181 14182 bool 14183 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 14184 // The ARM target isn't yet aware of offsets. 14185 return false; 14186 } 14187 14188 bool ARM::isBitFieldInvertedMask(unsigned v) { 14189 if (v == 0xffffffff) 14190 return false; 14191 14192 // there can be 1's on either or both "outsides", all the "inside" 14193 // bits must be 0's 14194 return isShiftedMask_32(~v); 14195 } 14196 14197 /// isFPImmLegal - Returns true if the target can instruction select the 14198 /// specified FP immediate natively. If false, the legalizer will 14199 /// materialize the FP immediate as a load from a constant pool. 14200 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 14201 if (!Subtarget->hasVFP3()) 14202 return false; 14203 if (VT == MVT::f16 && Subtarget->hasFullFP16()) 14204 return ARM_AM::getFP16Imm(Imm) != -1; 14205 if (VT == MVT::f32) 14206 return ARM_AM::getFP32Imm(Imm) != -1; 14207 if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) 14208 return ARM_AM::getFP64Imm(Imm) != -1; 14209 return false; 14210 } 14211 14212 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 14213 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 14214 /// specified in the intrinsic calls. 14215 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 14216 const CallInst &I, 14217 MachineFunction &MF, 14218 unsigned Intrinsic) const { 14219 switch (Intrinsic) { 14220 case Intrinsic::arm_neon_vld1: 14221 case Intrinsic::arm_neon_vld2: 14222 case Intrinsic::arm_neon_vld3: 14223 case Intrinsic::arm_neon_vld4: 14224 case Intrinsic::arm_neon_vld2lane: 14225 case Intrinsic::arm_neon_vld3lane: 14226 case Intrinsic::arm_neon_vld4lane: 14227 case Intrinsic::arm_neon_vld2dup: 14228 case Intrinsic::arm_neon_vld3dup: 14229 case Intrinsic::arm_neon_vld4dup: { 14230 Info.opc = ISD::INTRINSIC_W_CHAIN; 14231 // Conservatively set memVT to the entire set of vectors loaded. 14232 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14233 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 14234 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 14235 Info.ptrVal = I.getArgOperand(0); 14236 Info.offset = 0; 14237 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 14238 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 14239 // volatile loads with NEON intrinsics not supported 14240 Info.flags = MachineMemOperand::MOLoad; 14241 return true; 14242 } 14243 case Intrinsic::arm_neon_vld1x2: 14244 case Intrinsic::arm_neon_vld1x3: 14245 case Intrinsic::arm_neon_vld1x4: { 14246 Info.opc = ISD::INTRINSIC_W_CHAIN; 14247 // Conservatively set memVT to the entire set of vectors loaded. 14248 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14249 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 14250 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 14251 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); 14252 Info.offset = 0; 14253 Info.align = 0; 14254 // volatile loads with NEON intrinsics not supported 14255 Info.flags = MachineMemOperand::MOLoad; 14256 return true; 14257 } 14258 case Intrinsic::arm_neon_vst1: 14259 case Intrinsic::arm_neon_vst2: 14260 case Intrinsic::arm_neon_vst3: 14261 case Intrinsic::arm_neon_vst4: 14262 case Intrinsic::arm_neon_vst2lane: 14263 case Intrinsic::arm_neon_vst3lane: 14264 case Intrinsic::arm_neon_vst4lane: { 14265 Info.opc = ISD::INTRINSIC_VOID; 14266 // Conservatively set memVT to the entire set of vectors stored. 14267 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14268 unsigned NumElts = 0; 14269 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 14270 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 14271 if (!ArgTy->isVectorTy()) 14272 break; 14273 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 14274 } 14275 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 14276 Info.ptrVal = I.getArgOperand(0); 14277 Info.offset = 0; 14278 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 14279 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 14280 // volatile stores with NEON intrinsics not supported 14281 Info.flags = MachineMemOperand::MOStore; 14282 return true; 14283 } 14284 case Intrinsic::arm_neon_vst1x2: 14285 case Intrinsic::arm_neon_vst1x3: 14286 case Intrinsic::arm_neon_vst1x4: { 14287 Info.opc = ISD::INTRINSIC_VOID; 14288 // Conservatively set memVT to the entire set of vectors stored. 14289 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14290 unsigned NumElts = 0; 14291 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 14292 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 14293 if (!ArgTy->isVectorTy()) 14294 break; 14295 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 14296 } 14297 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 14298 Info.ptrVal = I.getArgOperand(0); 14299 Info.offset = 0; 14300 Info.align = 0; 14301 // volatile stores with NEON intrinsics not supported 14302 Info.flags = MachineMemOperand::MOStore; 14303 return true; 14304 } 14305 case Intrinsic::arm_ldaex: 14306 case Intrinsic::arm_ldrex: { 14307 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14308 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 14309 Info.opc = ISD::INTRINSIC_W_CHAIN; 14310 Info.memVT = MVT::getVT(PtrTy->getElementType()); 14311 Info.ptrVal = I.getArgOperand(0); 14312 Info.offset = 0; 14313 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 14314 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 14315 return true; 14316 } 14317 case Intrinsic::arm_stlex: 14318 case Intrinsic::arm_strex: { 14319 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 14320 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 14321 Info.opc = ISD::INTRINSIC_W_CHAIN; 14322 Info.memVT = MVT::getVT(PtrTy->getElementType()); 14323 Info.ptrVal = I.getArgOperand(1); 14324 Info.offset = 0; 14325 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 14326 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 14327 return true; 14328 } 14329 case Intrinsic::arm_stlexd: 14330 case Intrinsic::arm_strexd: 14331 Info.opc = ISD::INTRINSIC_W_CHAIN; 14332 Info.memVT = MVT::i64; 14333 Info.ptrVal = I.getArgOperand(2); 14334 Info.offset = 0; 14335 Info.align = 8; 14336 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 14337 return true; 14338 14339 case Intrinsic::arm_ldaexd: 14340 case Intrinsic::arm_ldrexd: 14341 Info.opc = ISD::INTRINSIC_W_CHAIN; 14342 Info.memVT = MVT::i64; 14343 Info.ptrVal = I.getArgOperand(0); 14344 Info.offset = 0; 14345 Info.align = 8; 14346 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 14347 return true; 14348 14349 default: 14350 break; 14351 } 14352 14353 return false; 14354 } 14355 14356 /// Returns true if it is beneficial to convert a load of a constant 14357 /// to just the constant itself. 14358 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 14359 Type *Ty) const { 14360 assert(Ty->isIntegerTy()); 14361 14362 unsigned Bits = Ty->getPrimitiveSizeInBits(); 14363 if (Bits == 0 || Bits > 32) 14364 return false; 14365 return true; 14366 } 14367 14368 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 14369 unsigned Index) const { 14370 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) 14371 return false; 14372 14373 return (Index == 0 || Index == ResVT.getVectorNumElements()); 14374 } 14375 14376 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 14377 ARM_MB::MemBOpt Domain) const { 14378 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 14379 14380 // First, if the target has no DMB, see what fallback we can use. 14381 if (!Subtarget->hasDataBarrier()) { 14382 // Some ARMv6 cpus can support data barriers with an mcr instruction. 14383 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 14384 // here. 14385 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 14386 Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 14387 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 14388 Builder.getInt32(0), Builder.getInt32(7), 14389 Builder.getInt32(10), Builder.getInt32(5)}; 14390 return Builder.CreateCall(MCR, args); 14391 } else { 14392 // Instead of using barriers, atomic accesses on these subtargets use 14393 // libcalls. 14394 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 14395 } 14396 } else { 14397 Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 14398 // Only a full system barrier exists in the M-class architectures. 14399 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 14400 Constant *CDomain = Builder.getInt32(Domain); 14401 return Builder.CreateCall(DMB, CDomain); 14402 } 14403 } 14404 14405 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 14406 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 14407 Instruction *Inst, 14408 AtomicOrdering Ord) const { 14409 switch (Ord) { 14410 case AtomicOrdering::NotAtomic: 14411 case AtomicOrdering::Unordered: 14412 llvm_unreachable("Invalid fence: unordered/non-atomic"); 14413 case AtomicOrdering::Monotonic: 14414 case AtomicOrdering::Acquire: 14415 return nullptr; // Nothing to do 14416 case AtomicOrdering::SequentiallyConsistent: 14417 if (!Inst->hasAtomicStore()) 14418 return nullptr; // Nothing to do 14419 LLVM_FALLTHROUGH; 14420 case AtomicOrdering::Release: 14421 case AtomicOrdering::AcquireRelease: 14422 if (Subtarget->preferISHSTBarriers()) 14423 return makeDMB(Builder, ARM_MB::ISHST); 14424 // FIXME: add a comment with a link to documentation justifying this. 14425 else 14426 return makeDMB(Builder, ARM_MB::ISH); 14427 } 14428 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 14429 } 14430 14431 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 14432 Instruction *Inst, 14433 AtomicOrdering Ord) const { 14434 switch (Ord) { 14435 case AtomicOrdering::NotAtomic: 14436 case AtomicOrdering::Unordered: 14437 llvm_unreachable("Invalid fence: unordered/not-atomic"); 14438 case AtomicOrdering::Monotonic: 14439 case AtomicOrdering::Release: 14440 return nullptr; // Nothing to do 14441 case AtomicOrdering::Acquire: 14442 case AtomicOrdering::AcquireRelease: 14443 case AtomicOrdering::SequentiallyConsistent: 14444 return makeDMB(Builder, ARM_MB::ISH); 14445 } 14446 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 14447 } 14448 14449 // Loads and stores less than 64-bits are already atomic; ones above that 14450 // are doomed anyway, so defer to the default libcall and blame the OS when 14451 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 14452 // anything for those. 14453 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 14454 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 14455 return (Size == 64) && !Subtarget->isMClass(); 14456 } 14457 14458 // Loads and stores less than 64-bits are already atomic; ones above that 14459 // are doomed anyway, so defer to the default libcall and blame the OS when 14460 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 14461 // anything for those. 14462 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 14463 // guarantee, see DDI0406C ARM architecture reference manual, 14464 // sections A8.8.72-74 LDRD) 14465 TargetLowering::AtomicExpansionKind 14466 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 14467 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 14468 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 14469 : AtomicExpansionKind::None; 14470 } 14471 14472 // For the real atomic operations, we have ldrex/strex up to 32 bits, 14473 // and up to 64 bits on the non-M profiles 14474 TargetLowering::AtomicExpansionKind 14475 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 14476 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 14477 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 14478 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW) 14479 ? AtomicExpansionKind::LLSC 14480 : AtomicExpansionKind::None; 14481 } 14482 14483 TargetLowering::AtomicExpansionKind 14484 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 14485 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 14486 // implement cmpxchg without spilling. If the address being exchanged is also 14487 // on the stack and close enough to the spill slot, this can lead to a 14488 // situation where the monitor always gets cleared and the atomic operation 14489 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 14490 bool HasAtomicCmpXchg = 14491 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps(); 14492 if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg) 14493 return AtomicExpansionKind::LLSC; 14494 return AtomicExpansionKind::None; 14495 } 14496 14497 bool ARMTargetLowering::shouldInsertFencesForAtomic( 14498 const Instruction *I) const { 14499 return InsertFencesForAtomic; 14500 } 14501 14502 // This has so far only been implemented for MachO. 14503 bool ARMTargetLowering::useLoadStackGuardNode() const { 14504 return Subtarget->isTargetMachO(); 14505 } 14506 14507 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 14508 unsigned &Cost) const { 14509 // If we do not have NEON, vector types are not natively supported. 14510 if (!Subtarget->hasNEON()) 14511 return false; 14512 14513 // Floating point values and vector values map to the same register file. 14514 // Therefore, although we could do a store extract of a vector type, this is 14515 // better to leave at float as we have more freedom in the addressing mode for 14516 // those. 14517 if (VectorTy->isFPOrFPVectorTy()) 14518 return false; 14519 14520 // If the index is unknown at compile time, this is very expensive to lower 14521 // and it is not possible to combine the store with the extract. 14522 if (!isa<ConstantInt>(Idx)) 14523 return false; 14524 14525 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 14526 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); 14527 // We can do a store + vector extract on any vector that fits perfectly in a D 14528 // or Q register. 14529 if (BitWidth == 64 || BitWidth == 128) { 14530 Cost = 0; 14531 return true; 14532 } 14533 return false; 14534 } 14535 14536 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 14537 return Subtarget->hasV6T2Ops(); 14538 } 14539 14540 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 14541 return Subtarget->hasV6T2Ops(); 14542 } 14543 14544 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 14545 AtomicOrdering Ord) const { 14546 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 14547 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 14548 bool IsAcquire = isAcquireOrStronger(Ord); 14549 14550 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 14551 // intrinsic must return {i32, i32} and we have to recombine them into a 14552 // single i64 here. 14553 if (ValTy->getPrimitiveSizeInBits() == 64) { 14554 Intrinsic::ID Int = 14555 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 14556 Function *Ldrex = Intrinsic::getDeclaration(M, Int); 14557 14558 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 14559 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 14560 14561 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 14562 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 14563 if (!Subtarget->isLittle()) 14564 std::swap (Lo, Hi); 14565 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 14566 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 14567 return Builder.CreateOr( 14568 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 14569 } 14570 14571 Type *Tys[] = { Addr->getType() }; 14572 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 14573 Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys); 14574 14575 return Builder.CreateTruncOrBitCast( 14576 Builder.CreateCall(Ldrex, Addr), 14577 cast<PointerType>(Addr->getType())->getElementType()); 14578 } 14579 14580 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 14581 IRBuilder<> &Builder) const { 14582 if (!Subtarget->hasV7Ops()) 14583 return; 14584 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 14585 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 14586 } 14587 14588 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 14589 Value *Addr, 14590 AtomicOrdering Ord) const { 14591 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 14592 bool IsRelease = isReleaseOrStronger(Ord); 14593 14594 // Since the intrinsics must have legal type, the i64 intrinsics take two 14595 // parameters: "i32, i32". We must marshal Val into the appropriate form 14596 // before the call. 14597 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 14598 Intrinsic::ID Int = 14599 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 14600 Function *Strex = Intrinsic::getDeclaration(M, Int); 14601 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 14602 14603 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 14604 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 14605 if (!Subtarget->isLittle()) 14606 std::swap(Lo, Hi); 14607 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 14608 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 14609 } 14610 14611 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 14612 Type *Tys[] = { Addr->getType() }; 14613 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 14614 14615 return Builder.CreateCall( 14616 Strex, {Builder.CreateZExtOrBitCast( 14617 Val, Strex->getFunctionType()->getParamType(0)), 14618 Addr}); 14619 } 14620 14621 14622 bool ARMTargetLowering::alignLoopsWithOptSize() const { 14623 return Subtarget->isMClass(); 14624 } 14625 14626 /// A helper function for determining the number of interleaved accesses we 14627 /// will generate when lowering accesses of the given type. 14628 unsigned 14629 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy, 14630 const DataLayout &DL) const { 14631 return (DL.getTypeSizeInBits(VecTy) + 127) / 128; 14632 } 14633 14634 bool ARMTargetLowering::isLegalInterleavedAccessType( 14635 VectorType *VecTy, const DataLayout &DL) const { 14636 14637 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 14638 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); 14639 14640 // Ensure the vector doesn't have f16 elements. Even though we could do an 14641 // i16 vldN, we can't hold the f16 vectors and will end up converting via 14642 // f32. 14643 if (VecTy->getElementType()->isHalfTy()) 14644 return false; 14645 14646 // Ensure the number of vector elements is greater than 1. 14647 if (VecTy->getNumElements() < 2) 14648 return false; 14649 14650 // Ensure the element type is legal. 14651 if (ElSize != 8 && ElSize != 16 && ElSize != 32) 14652 return false; 14653 14654 // Ensure the total vector size is 64 or a multiple of 128. Types larger than 14655 // 128 will be split into multiple interleaved accesses. 14656 return VecSize == 64 || VecSize % 128 == 0; 14657 } 14658 14659 /// Lower an interleaved load into a vldN intrinsic. 14660 /// 14661 /// E.g. Lower an interleaved load (Factor = 2): 14662 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 14663 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 14664 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 14665 /// 14666 /// Into: 14667 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 14668 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 14669 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 14670 bool ARMTargetLowering::lowerInterleavedLoad( 14671 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 14672 ArrayRef<unsigned> Indices, unsigned Factor) const { 14673 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 14674 "Invalid interleave factor"); 14675 assert(!Shuffles.empty() && "Empty shufflevector input"); 14676 assert(Shuffles.size() == Indices.size() && 14677 "Unmatched number of shufflevectors and indices"); 14678 14679 VectorType *VecTy = Shuffles[0]->getType(); 14680 Type *EltTy = VecTy->getVectorElementType(); 14681 14682 const DataLayout &DL = LI->getModule()->getDataLayout(); 14683 14684 // Skip if we do not have NEON and skip illegal vector types. We can 14685 // "legalize" wide vector types into multiple interleaved accesses as long as 14686 // the vector types are divisible by 128. 14687 if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL)) 14688 return false; 14689 14690 unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); 14691 14692 // A pointer vector can not be the return type of the ldN intrinsics. Need to 14693 // load integer vectors first and then convert to pointer vectors. 14694 if (EltTy->isPointerTy()) 14695 VecTy = 14696 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); 14697 14698 IRBuilder<> Builder(LI); 14699 14700 // The base address of the load. 14701 Value *BaseAddr = LI->getPointerOperand(); 14702 14703 if (NumLoads > 1) { 14704 // If we're going to generate more than one load, reset the sub-vector type 14705 // to something legal. 14706 VecTy = VectorType::get(VecTy->getVectorElementType(), 14707 VecTy->getVectorNumElements() / NumLoads); 14708 14709 // We will compute the pointer operand of each load from the original base 14710 // address using GEPs. Cast the base address to a pointer to the scalar 14711 // element type. 14712 BaseAddr = Builder.CreateBitCast( 14713 BaseAddr, VecTy->getVectorElementType()->getPointerTo( 14714 LI->getPointerAddressSpace())); 14715 } 14716 14717 assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!"); 14718 14719 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 14720 Type *Tys[] = {VecTy, Int8Ptr}; 14721 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 14722 Intrinsic::arm_neon_vld3, 14723 Intrinsic::arm_neon_vld4}; 14724 Function *VldnFunc = 14725 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 14726 14727 // Holds sub-vectors extracted from the load intrinsic return values. The 14728 // sub-vectors are associated with the shufflevector instructions they will 14729 // replace. 14730 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; 14731 14732 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { 14733 // If we're generating more than one load, compute the base address of 14734 // subsequent loads as an offset from the previous. 14735 if (LoadCount > 0) 14736 BaseAddr = Builder.CreateConstGEP1_32( 14737 BaseAddr, VecTy->getVectorNumElements() * Factor); 14738 14739 SmallVector<Value *, 2> Ops; 14740 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 14741 Ops.push_back(Builder.getInt32(LI->getAlignment())); 14742 14743 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN"); 14744 14745 // Replace uses of each shufflevector with the corresponding vector loaded 14746 // by ldN. 14747 for (unsigned i = 0; i < Shuffles.size(); i++) { 14748 ShuffleVectorInst *SV = Shuffles[i]; 14749 unsigned Index = Indices[i]; 14750 14751 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 14752 14753 // Convert the integer vector to pointer vector if the element is pointer. 14754 if (EltTy->isPointerTy()) 14755 SubVec = Builder.CreateIntToPtr( 14756 SubVec, VectorType::get(SV->getType()->getVectorElementType(), 14757 VecTy->getVectorNumElements())); 14758 14759 SubVecs[SV].push_back(SubVec); 14760 } 14761 } 14762 14763 // Replace uses of the shufflevector instructions with the sub-vectors 14764 // returned by the load intrinsic. If a shufflevector instruction is 14765 // associated with more than one sub-vector, those sub-vectors will be 14766 // concatenated into a single wide vector. 14767 for (ShuffleVectorInst *SVI : Shuffles) { 14768 auto &SubVec = SubVecs[SVI]; 14769 auto *WideVec = 14770 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; 14771 SVI->replaceAllUsesWith(WideVec); 14772 } 14773 14774 return true; 14775 } 14776 14777 /// Lower an interleaved store into a vstN intrinsic. 14778 /// 14779 /// E.g. Lower an interleaved store (Factor = 3): 14780 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 14781 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 14782 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 14783 /// 14784 /// Into: 14785 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 14786 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 14787 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 14788 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 14789 /// 14790 /// Note that the new shufflevectors will be removed and we'll only generate one 14791 /// vst3 instruction in CodeGen. 14792 /// 14793 /// Example for a more general valid mask (Factor 3). Lower: 14794 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, 14795 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> 14796 /// store <12 x i32> %i.vec, <12 x i32>* %ptr 14797 /// 14798 /// Into: 14799 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> 14800 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> 14801 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> 14802 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 14803 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 14804 ShuffleVectorInst *SVI, 14805 unsigned Factor) const { 14806 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 14807 "Invalid interleave factor"); 14808 14809 VectorType *VecTy = SVI->getType(); 14810 assert(VecTy->getVectorNumElements() % Factor == 0 && 14811 "Invalid interleaved store"); 14812 14813 unsigned LaneLen = VecTy->getVectorNumElements() / Factor; 14814 Type *EltTy = VecTy->getVectorElementType(); 14815 VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); 14816 14817 const DataLayout &DL = SI->getModule()->getDataLayout(); 14818 14819 // Skip if we do not have NEON and skip illegal vector types. We can 14820 // "legalize" wide vector types into multiple interleaved accesses as long as 14821 // the vector types are divisible by 128. 14822 if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) 14823 return false; 14824 14825 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); 14826 14827 Value *Op0 = SVI->getOperand(0); 14828 Value *Op1 = SVI->getOperand(1); 14829 IRBuilder<> Builder(SI); 14830 14831 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 14832 // vectors to integer vectors. 14833 if (EltTy->isPointerTy()) { 14834 Type *IntTy = DL.getIntPtrType(EltTy); 14835 14836 // Convert to the corresponding integer vector. 14837 Type *IntVecTy = 14838 VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); 14839 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 14840 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 14841 14842 SubVecTy = VectorType::get(IntTy, LaneLen); 14843 } 14844 14845 // The base address of the store. 14846 Value *BaseAddr = SI->getPointerOperand(); 14847 14848 if (NumStores > 1) { 14849 // If we're going to generate more than one store, reset the lane length 14850 // and sub-vector type to something legal. 14851 LaneLen /= NumStores; 14852 SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); 14853 14854 // We will compute the pointer operand of each store from the original base 14855 // address using GEPs. Cast the base address to a pointer to the scalar 14856 // element type. 14857 BaseAddr = Builder.CreateBitCast( 14858 BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( 14859 SI->getPointerAddressSpace())); 14860 } 14861 14862 assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!"); 14863 14864 auto Mask = SVI->getShuffleMask(); 14865 14866 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 14867 Type *Tys[] = {Int8Ptr, SubVecTy}; 14868 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 14869 Intrinsic::arm_neon_vst3, 14870 Intrinsic::arm_neon_vst4}; 14871 14872 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { 14873 // If we generating more than one store, we compute the base address of 14874 // subsequent stores as an offset from the previous. 14875 if (StoreCount > 0) 14876 BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor); 14877 14878 SmallVector<Value *, 6> Ops; 14879 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr)); 14880 14881 Function *VstNFunc = 14882 Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); 14883 14884 // Split the shufflevector operands into sub vectors for the new vstN call. 14885 for (unsigned i = 0; i < Factor; i++) { 14886 unsigned IdxI = StoreCount * LaneLen * Factor + i; 14887 if (Mask[IdxI] >= 0) { 14888 Ops.push_back(Builder.CreateShuffleVector( 14889 Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0))); 14890 } else { 14891 unsigned StartMask = 0; 14892 for (unsigned j = 1; j < LaneLen; j++) { 14893 unsigned IdxJ = StoreCount * LaneLen * Factor + j; 14894 if (Mask[IdxJ * Factor + IdxI] >= 0) { 14895 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; 14896 break; 14897 } 14898 } 14899 // Note: If all elements in a chunk are undefs, StartMask=0! 14900 // Note: Filling undef gaps with random elements is ok, since 14901 // those elements were being written anyway (with undefs). 14902 // In the case of all undefs we're defaulting to using elems from 0 14903 // Note: StartMask cannot be negative, it's checked in 14904 // isReInterleaveMask 14905 Ops.push_back(Builder.CreateShuffleVector( 14906 Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0))); 14907 } 14908 } 14909 14910 Ops.push_back(Builder.getInt32(SI->getAlignment())); 14911 Builder.CreateCall(VstNFunc, Ops); 14912 } 14913 return true; 14914 } 14915 14916 enum HABaseType { 14917 HA_UNKNOWN = 0, 14918 HA_FLOAT, 14919 HA_DOUBLE, 14920 HA_VECT64, 14921 HA_VECT128 14922 }; 14923 14924 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 14925 uint64_t &Members) { 14926 if (auto *ST = dyn_cast<StructType>(Ty)) { 14927 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 14928 uint64_t SubMembers = 0; 14929 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 14930 return false; 14931 Members += SubMembers; 14932 } 14933 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 14934 uint64_t SubMembers = 0; 14935 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 14936 return false; 14937 Members += SubMembers * AT->getNumElements(); 14938 } else if (Ty->isFloatTy()) { 14939 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 14940 return false; 14941 Members = 1; 14942 Base = HA_FLOAT; 14943 } else if (Ty->isDoubleTy()) { 14944 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 14945 return false; 14946 Members = 1; 14947 Base = HA_DOUBLE; 14948 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 14949 Members = 1; 14950 switch (Base) { 14951 case HA_FLOAT: 14952 case HA_DOUBLE: 14953 return false; 14954 case HA_VECT64: 14955 return VT->getBitWidth() == 64; 14956 case HA_VECT128: 14957 return VT->getBitWidth() == 128; 14958 case HA_UNKNOWN: 14959 switch (VT->getBitWidth()) { 14960 case 64: 14961 Base = HA_VECT64; 14962 return true; 14963 case 128: 14964 Base = HA_VECT128; 14965 return true; 14966 default: 14967 return false; 14968 } 14969 } 14970 } 14971 14972 return (Members > 0 && Members <= 4); 14973 } 14974 14975 /// Return the correct alignment for the current calling convention. 14976 unsigned 14977 ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy, 14978 DataLayout DL) const { 14979 if (!ArgTy->isVectorTy()) 14980 return DL.getABITypeAlignment(ArgTy); 14981 14982 // Avoid over-aligning vector parameters. It would require realigning the 14983 // stack and waste space for no real benefit. 14984 return std::min(DL.getABITypeAlignment(ArgTy), DL.getStackAlignment()); 14985 } 14986 14987 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 14988 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 14989 /// passing according to AAPCS rules. 14990 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 14991 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 14992 if (getEffectiveCallingConv(CallConv, isVarArg) != 14993 CallingConv::ARM_AAPCS_VFP) 14994 return false; 14995 14996 HABaseType Base = HA_UNKNOWN; 14997 uint64_t Members = 0; 14998 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 14999 LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 15000 15001 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 15002 return IsHA || IsIntArray; 15003 } 15004 15005 unsigned ARMTargetLowering::getExceptionPointerRegister( 15006 const Constant *PersonalityFn) const { 15007 // Platforms which do not use SjLj EH may return values in these registers 15008 // via the personality function. 15009 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; 15010 } 15011 15012 unsigned ARMTargetLowering::getExceptionSelectorRegister( 15013 const Constant *PersonalityFn) const { 15014 // Platforms which do not use SjLj EH may return values in these registers 15015 // via the personality function. 15016 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; 15017 } 15018 15019 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 15020 // Update IsSplitCSR in ARMFunctionInfo. 15021 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 15022 AFI->setIsSplitCSR(true); 15023 } 15024 15025 void ARMTargetLowering::insertCopiesSplitCSR( 15026 MachineBasicBlock *Entry, 15027 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 15028 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 15029 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 15030 if (!IStart) 15031 return; 15032 15033 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 15034 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 15035 MachineBasicBlock::iterator MBBI = Entry->begin(); 15036 for (const MCPhysReg *I = IStart; *I; ++I) { 15037 const TargetRegisterClass *RC = nullptr; 15038 if (ARM::GPRRegClass.contains(*I)) 15039 RC = &ARM::GPRRegClass; 15040 else if (ARM::DPRRegClass.contains(*I)) 15041 RC = &ARM::DPRRegClass; 15042 else 15043 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 15044 15045 unsigned NewVR = MRI->createVirtualRegister(RC); 15046 // Create copy from CSR to a virtual register. 15047 // FIXME: this currently does not emit CFI pseudo-instructions, it works 15048 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 15049 // nounwind. If we want to generalize this later, we may need to emit 15050 // CFI pseudo-instructions. 15051 assert(Entry->getParent()->getFunction().hasFnAttribute( 15052 Attribute::NoUnwind) && 15053 "Function should be nounwind in insertCopiesSplitCSR!"); 15054 Entry->addLiveIn(*I); 15055 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 15056 .addReg(*I); 15057 15058 // Insert the copy-back instructions right before the terminator. 15059 for (auto *Exit : Exits) 15060 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 15061 TII->get(TargetOpcode::COPY), *I) 15062 .addReg(NewVR); 15063 } 15064 } 15065 15066 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { 15067 MF.getFrameInfo().computeMaxCallFrameSize(MF); 15068 TargetLoweringBase::finalizeLowering(MF); 15069 } 15070