1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARMISelLowering.h" 16 #include "ARMCallingConv.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMPerfectShuffle.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "ARMTargetObjectFile.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringSwitch.h" 27 #include "llvm/CodeGen/CallingConvLower.h" 28 #include "llvm/CodeGen/IntrinsicLowering.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineJumpTableInfo.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/IR/CallingConv.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/DebugInfoMetadata.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/MC/MCSectionMachO.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Debug.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include "llvm/Target/TargetOptions.h" 55 #include <utility> 56 using namespace llvm; 57 58 #define DEBUG_TYPE "arm-isel" 59 60 STATISTIC(NumTailCalls, "Number of tail calls"); 61 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 62 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 63 STATISTIC(NumConstpoolPromoted, 64 "Number of constants with their storage promoted into constant pools"); 65 66 static cl::opt<bool> 67 ARMInterworking("arm-interworking", cl::Hidden, 68 cl::desc("Enable / disable ARM interworking (for debugging only)"), 69 cl::init(true)); 70 71 static cl::opt<bool> EnableConstpoolPromotion( 72 "arm-promote-constant", cl::Hidden, 73 cl::desc("Enable / disable promotion of unnamed_addr constants into " 74 "constant pools"), 75 cl::init(true)); 76 static cl::opt<unsigned> ConstpoolPromotionMaxSize( 77 "arm-promote-constant-max-size", cl::Hidden, 78 cl::desc("Maximum size of constant to promote into a constant pool"), 79 cl::init(64)); 80 static cl::opt<unsigned> ConstpoolPromotionMaxTotal( 81 "arm-promote-constant-max-total", cl::Hidden, 82 cl::desc("Maximum size of ALL constants to promote into a constant pool"), 83 cl::init(128)); 84 85 namespace { 86 class ARMCCState : public CCState { 87 public: 88 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 89 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C, 90 ParmContext PC) 91 : CCState(CC, isVarArg, MF, locs, C) { 92 assert(((PC == Call) || (PC == Prologue)) && 93 "ARMCCState users must specify whether their context is call" 94 "or prologue generation."); 95 CallOrPrologue = PC; 96 } 97 }; 98 } 99 100 void ARMTargetLowering::InitLibcallCallingConvs() { 101 // The builtins on ARM always use AAPCS, irrespective of wheter C is AAPCS or 102 // AAPCS_VFP. 103 for (const auto LC : { 104 RTLIB::SHL_I16, 105 RTLIB::SHL_I32, 106 RTLIB::SHL_I64, 107 RTLIB::SHL_I128, 108 RTLIB::SRL_I16, 109 RTLIB::SRL_I32, 110 RTLIB::SRL_I64, 111 RTLIB::SRL_I128, 112 RTLIB::SRA_I16, 113 RTLIB::SRA_I32, 114 RTLIB::SRA_I64, 115 RTLIB::SRA_I128, 116 RTLIB::MUL_I8, 117 RTLIB::MUL_I16, 118 RTLIB::MUL_I32, 119 RTLIB::MUL_I64, 120 RTLIB::MUL_I128, 121 RTLIB::MULO_I32, 122 RTLIB::MULO_I64, 123 RTLIB::MULO_I128, 124 RTLIB::SDIV_I8, 125 RTLIB::SDIV_I16, 126 RTLIB::SDIV_I32, 127 RTLIB::SDIV_I64, 128 RTLIB::SDIV_I128, 129 RTLIB::UDIV_I8, 130 RTLIB::UDIV_I16, 131 RTLIB::UDIV_I32, 132 RTLIB::UDIV_I64, 133 RTLIB::UDIV_I128, 134 RTLIB::SREM_I8, 135 RTLIB::SREM_I16, 136 RTLIB::SREM_I32, 137 RTLIB::SREM_I64, 138 RTLIB::SREM_I128, 139 RTLIB::UREM_I8, 140 RTLIB::UREM_I16, 141 RTLIB::UREM_I32, 142 RTLIB::UREM_I64, 143 RTLIB::UREM_I128, 144 RTLIB::SDIVREM_I8, 145 RTLIB::SDIVREM_I16, 146 RTLIB::SDIVREM_I32, 147 RTLIB::SDIVREM_I64, 148 RTLIB::SDIVREM_I128, 149 RTLIB::UDIVREM_I8, 150 RTLIB::UDIVREM_I16, 151 RTLIB::UDIVREM_I32, 152 RTLIB::UDIVREM_I64, 153 RTLIB::UDIVREM_I128, 154 RTLIB::NEG_I32, 155 RTLIB::NEG_I64, 156 RTLIB::ADD_F32, 157 RTLIB::ADD_F64, 158 RTLIB::ADD_F80, 159 RTLIB::ADD_F128, 160 RTLIB::SUB_F32, 161 RTLIB::SUB_F64, 162 RTLIB::SUB_F80, 163 RTLIB::SUB_F128, 164 RTLIB::MUL_F32, 165 RTLIB::MUL_F64, 166 RTLIB::MUL_F80, 167 RTLIB::MUL_F128, 168 RTLIB::DIV_F32, 169 RTLIB::DIV_F64, 170 RTLIB::DIV_F80, 171 RTLIB::DIV_F128, 172 RTLIB::POWI_F32, 173 RTLIB::POWI_F64, 174 RTLIB::POWI_F80, 175 RTLIB::POWI_F128, 176 RTLIB::FPEXT_F64_F128, 177 RTLIB::FPEXT_F32_F128, 178 RTLIB::FPEXT_F32_F64, 179 RTLIB::FPEXT_F16_F32, 180 RTLIB::FPROUND_F32_F16, 181 RTLIB::FPROUND_F64_F16, 182 RTLIB::FPROUND_F80_F16, 183 RTLIB::FPROUND_F128_F16, 184 RTLIB::FPROUND_F64_F32, 185 RTLIB::FPROUND_F80_F32, 186 RTLIB::FPROUND_F128_F32, 187 RTLIB::FPROUND_F80_F64, 188 RTLIB::FPROUND_F128_F64, 189 RTLIB::FPTOSINT_F32_I32, 190 RTLIB::FPTOSINT_F32_I64, 191 RTLIB::FPTOSINT_F32_I128, 192 RTLIB::FPTOSINT_F64_I32, 193 RTLIB::FPTOSINT_F64_I64, 194 RTLIB::FPTOSINT_F64_I128, 195 RTLIB::FPTOSINT_F80_I32, 196 RTLIB::FPTOSINT_F80_I64, 197 RTLIB::FPTOSINT_F80_I128, 198 RTLIB::FPTOSINT_F128_I32, 199 RTLIB::FPTOSINT_F128_I64, 200 RTLIB::FPTOSINT_F128_I128, 201 RTLIB::FPTOUINT_F32_I32, 202 RTLIB::FPTOUINT_F32_I64, 203 RTLIB::FPTOUINT_F32_I128, 204 RTLIB::FPTOUINT_F64_I32, 205 RTLIB::FPTOUINT_F64_I64, 206 RTLIB::FPTOUINT_F64_I128, 207 RTLIB::FPTOUINT_F80_I32, 208 RTLIB::FPTOUINT_F80_I64, 209 RTLIB::FPTOUINT_F80_I128, 210 RTLIB::FPTOUINT_F128_I32, 211 RTLIB::FPTOUINT_F128_I64, 212 RTLIB::FPTOUINT_F128_I128, 213 RTLIB::SINTTOFP_I32_F32, 214 RTLIB::SINTTOFP_I32_F64, 215 RTLIB::SINTTOFP_I32_F80, 216 RTLIB::SINTTOFP_I32_F128, 217 RTLIB::SINTTOFP_I64_F32, 218 RTLIB::SINTTOFP_I64_F64, 219 RTLIB::SINTTOFP_I64_F80, 220 RTLIB::SINTTOFP_I64_F128, 221 RTLIB::SINTTOFP_I128_F32, 222 RTLIB::SINTTOFP_I128_F64, 223 RTLIB::SINTTOFP_I128_F80, 224 RTLIB::SINTTOFP_I128_F128, 225 RTLIB::UINTTOFP_I32_F32, 226 RTLIB::UINTTOFP_I32_F64, 227 RTLIB::UINTTOFP_I32_F80, 228 RTLIB::UINTTOFP_I32_F128, 229 RTLIB::UINTTOFP_I64_F32, 230 RTLIB::UINTTOFP_I64_F64, 231 RTLIB::UINTTOFP_I64_F80, 232 RTLIB::UINTTOFP_I64_F128, 233 RTLIB::UINTTOFP_I128_F32, 234 RTLIB::UINTTOFP_I128_F64, 235 RTLIB::UINTTOFP_I128_F80, 236 RTLIB::UINTTOFP_I128_F128, 237 RTLIB::OEQ_F32, 238 RTLIB::OEQ_F64, 239 RTLIB::OEQ_F128, 240 RTLIB::UNE_F32, 241 RTLIB::UNE_F64, 242 RTLIB::UNE_F128, 243 RTLIB::OGE_F32, 244 RTLIB::OGE_F64, 245 RTLIB::OGE_F128, 246 RTLIB::OLT_F32, 247 RTLIB::OLT_F64, 248 RTLIB::OLT_F128, 249 RTLIB::OLE_F32, 250 RTLIB::OLE_F64, 251 RTLIB::OLE_F128, 252 RTLIB::OGT_F32, 253 RTLIB::OGT_F64, 254 RTLIB::OGT_F128, 255 RTLIB::UO_F32, 256 RTLIB::UO_F64, 257 RTLIB::UO_F128, 258 RTLIB::O_F32, 259 RTLIB::O_F64, 260 RTLIB::O_F128, 261 }) 262 setLibcallCallingConv(LC, CallingConv::ARM_AAPCS); 263 } 264 265 // The APCS parameter registers. 266 static const MCPhysReg GPRArgRegs[] = { 267 ARM::R0, ARM::R1, ARM::R2, ARM::R3 268 }; 269 270 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 271 MVT PromotedBitwiseVT) { 272 if (VT != PromotedLdStVT) { 273 setOperationAction(ISD::LOAD, VT, Promote); 274 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 275 276 setOperationAction(ISD::STORE, VT, Promote); 277 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 278 } 279 280 MVT ElemTy = VT.getVectorElementType(); 281 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 282 setOperationAction(ISD::SETCC, VT, Custom); 283 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 284 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 285 if (ElemTy == MVT::i32) { 286 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 287 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 288 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 289 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 290 } else { 291 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 292 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 293 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 294 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 295 } 296 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 297 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 298 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 299 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 300 setOperationAction(ISD::SELECT, VT, Expand); 301 setOperationAction(ISD::SELECT_CC, VT, Expand); 302 setOperationAction(ISD::VSELECT, VT, Expand); 303 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 304 if (VT.isInteger()) { 305 setOperationAction(ISD::SHL, VT, Custom); 306 setOperationAction(ISD::SRA, VT, Custom); 307 setOperationAction(ISD::SRL, VT, Custom); 308 } 309 310 // Promote all bit-wise operations. 311 if (VT.isInteger() && VT != PromotedBitwiseVT) { 312 setOperationAction(ISD::AND, VT, Promote); 313 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 314 setOperationAction(ISD::OR, VT, Promote); 315 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 316 setOperationAction(ISD::XOR, VT, Promote); 317 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 318 } 319 320 // Neon does not support vector divide/remainder operations. 321 setOperationAction(ISD::SDIV, VT, Expand); 322 setOperationAction(ISD::UDIV, VT, Expand); 323 setOperationAction(ISD::FDIV, VT, Expand); 324 setOperationAction(ISD::SREM, VT, Expand); 325 setOperationAction(ISD::UREM, VT, Expand); 326 setOperationAction(ISD::FREM, VT, Expand); 327 328 if (!VT.isFloatingPoint() && 329 VT != MVT::v2i64 && VT != MVT::v1i64) 330 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 331 setOperationAction(Opcode, VT, Legal); 332 } 333 334 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 335 addRegisterClass(VT, &ARM::DPRRegClass); 336 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 337 } 338 339 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 340 addRegisterClass(VT, &ARM::DPairRegClass); 341 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 342 } 343 344 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, 345 const ARMSubtarget &STI) 346 : TargetLowering(TM), Subtarget(&STI) { 347 RegInfo = Subtarget->getRegisterInfo(); 348 Itins = Subtarget->getInstrItineraryData(); 349 350 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 351 352 InitLibcallCallingConvs(); 353 354 if (Subtarget->isTargetMachO()) { 355 // Uses VFP for Thumb libfuncs if available. 356 if (Subtarget->isThumb() && Subtarget->hasVFP2() && 357 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { 358 static const struct { 359 const RTLIB::Libcall Op; 360 const char * const Name; 361 const ISD::CondCode Cond; 362 } LibraryCalls[] = { 363 // Single-precision floating-point arithmetic. 364 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID }, 365 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID }, 366 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID }, 367 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID }, 368 369 // Double-precision floating-point arithmetic. 370 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID }, 371 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID }, 372 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID }, 373 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID }, 374 375 // Single-precision comparisons. 376 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE }, 377 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE }, 378 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE }, 379 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE }, 380 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE }, 381 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE }, 382 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE }, 383 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ }, 384 385 // Double-precision comparisons. 386 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE }, 387 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE }, 388 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE }, 389 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE }, 390 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE }, 391 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE }, 392 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE }, 393 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ }, 394 395 // Floating-point to integer conversions. 396 // i64 conversions are done via library routines even when generating VFP 397 // instructions, so use the same ones. 398 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID }, 399 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID }, 400 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID }, 401 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID }, 402 403 // Conversions between floating types. 404 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID }, 405 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID }, 406 407 // Integer to floating-point conversions. 408 // i64 conversions are done via library routines even when generating VFP 409 // instructions, so use the same ones. 410 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 411 // e.g., __floatunsidf vs. __floatunssidfvfp. 412 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID }, 413 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID }, 414 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID }, 415 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID }, 416 }; 417 418 for (const auto &LC : LibraryCalls) { 419 setLibcallName(LC.Op, LC.Name); 420 if (LC.Cond != ISD::SETCC_INVALID) 421 setCmpLibcallCC(LC.Op, LC.Cond); 422 } 423 } 424 425 // Set the correct calling convention for ARMv7k WatchOS. It's just 426 // AAPCS_VFP for functions as simple as libcalls. 427 if (Subtarget->isTargetWatchABI()) { 428 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) 429 setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP); 430 } 431 } 432 433 // These libcalls are not available in 32-bit. 434 setLibcallName(RTLIB::SHL_I128, nullptr); 435 setLibcallName(RTLIB::SRL_I128, nullptr); 436 setLibcallName(RTLIB::SRA_I128, nullptr); 437 438 // RTLIB 439 if (Subtarget->isAAPCS_ABI() && 440 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() || 441 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) { 442 static const struct { 443 const RTLIB::Libcall Op; 444 const char * const Name; 445 const CallingConv::ID CC; 446 const ISD::CondCode Cond; 447 } LibraryCalls[] = { 448 // Double-precision floating-point arithmetic helper functions 449 // RTABI chapter 4.1.2, Table 2 450 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 451 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 452 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 453 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 454 455 // Double-precision floating-point comparison helper functions 456 // RTABI chapter 4.1.2, Table 3 457 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 458 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 459 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 460 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 461 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 462 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 463 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 464 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 465 466 // Single-precision floating-point arithmetic helper functions 467 // RTABI chapter 4.1.2, Table 4 468 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 469 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 470 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 471 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 472 473 // Single-precision floating-point comparison helper functions 474 // RTABI chapter 4.1.2, Table 5 475 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 476 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 477 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 478 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 479 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 480 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 481 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 482 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 483 484 // Floating-point to integer conversions. 485 // RTABI chapter 4.1.2, Table 6 486 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 487 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 488 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 489 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 490 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 491 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 492 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 493 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 494 495 // Conversions between floating types. 496 // RTABI chapter 4.1.2, Table 7 497 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 498 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 499 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 500 501 // Integer to floating-point conversions. 502 // RTABI chapter 4.1.2, Table 8 503 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 504 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 505 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 506 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 507 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 508 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 509 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 510 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 511 512 // Long long helper functions 513 // RTABI chapter 4.2, Table 9 514 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 515 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 516 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 517 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 518 519 // Integer division functions 520 // RTABI chapter 4.3.1 521 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 522 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 523 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 524 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 525 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 526 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 527 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 528 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 529 }; 530 531 for (const auto &LC : LibraryCalls) { 532 setLibcallName(LC.Op, LC.Name); 533 setLibcallCallingConv(LC.Op, LC.CC); 534 if (LC.Cond != ISD::SETCC_INVALID) 535 setCmpLibcallCC(LC.Op, LC.Cond); 536 } 537 538 // EABI dependent RTLIB 539 if (TM.Options.EABIVersion == EABI::EABI4 || 540 TM.Options.EABIVersion == EABI::EABI5) { 541 static const struct { 542 const RTLIB::Libcall Op; 543 const char *const Name; 544 const CallingConv::ID CC; 545 const ISD::CondCode Cond; 546 } MemOpsLibraryCalls[] = { 547 // Memory operations 548 // RTABI chapter 4.3.4 549 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 550 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 551 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 552 }; 553 554 for (const auto &LC : MemOpsLibraryCalls) { 555 setLibcallName(LC.Op, LC.Name); 556 setLibcallCallingConv(LC.Op, LC.CC); 557 if (LC.Cond != ISD::SETCC_INVALID) 558 setCmpLibcallCC(LC.Op, LC.Cond); 559 } 560 } 561 } 562 563 if (Subtarget->isTargetWindows()) { 564 static const struct { 565 const RTLIB::Libcall Op; 566 const char * const Name; 567 const CallingConv::ID CC; 568 } LibraryCalls[] = { 569 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 570 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 571 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 572 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 573 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 574 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 575 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 576 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 577 }; 578 579 for (const auto &LC : LibraryCalls) { 580 setLibcallName(LC.Op, LC.Name); 581 setLibcallCallingConv(LC.Op, LC.CC); 582 } 583 } 584 585 // Use divmod compiler-rt calls for iOS 5.0 and later. 586 if (Subtarget->isTargetWatchOS() || 587 (Subtarget->isTargetIOS() && 588 !Subtarget->getTargetTriple().isOSVersionLT(5, 0))) { 589 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 590 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 591 } 592 593 // The half <-> float conversion functions are always soft-float on 594 // non-watchos platforms, but are needed for some targets which use a 595 // hard-float calling convention by default. 596 if (!Subtarget->isTargetWatchABI()) { 597 if (Subtarget->isAAPCS_ABI()) { 598 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS); 599 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS); 600 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS); 601 } else { 602 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS); 603 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS); 604 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS); 605 } 606 } 607 608 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have 609 // a __gnu_ prefix (which is the default). 610 if (Subtarget->isTargetAEABI()) { 611 setLibcallName(RTLIB::FPROUND_F32_F16, "__aeabi_f2h"); 612 setLibcallName(RTLIB::FPROUND_F64_F16, "__aeabi_d2h"); 613 setLibcallName(RTLIB::FPEXT_F16_F32, "__aeabi_h2f"); 614 } 615 616 if (Subtarget->isThumb1Only()) 617 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 618 else 619 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 620 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 621 !Subtarget->isThumb1Only()) { 622 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 623 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 624 } 625 626 for (MVT VT : MVT::vector_valuetypes()) { 627 for (MVT InnerVT : MVT::vector_valuetypes()) { 628 setTruncStoreAction(VT, InnerVT, Expand); 629 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 630 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 631 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 632 } 633 634 setOperationAction(ISD::MULHS, VT, Expand); 635 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 636 setOperationAction(ISD::MULHU, VT, Expand); 637 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 638 639 setOperationAction(ISD::BSWAP, VT, Expand); 640 } 641 642 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 643 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 644 645 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom); 646 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom); 647 648 if (Subtarget->hasNEON()) { 649 addDRTypeForNEON(MVT::v2f32); 650 addDRTypeForNEON(MVT::v8i8); 651 addDRTypeForNEON(MVT::v4i16); 652 addDRTypeForNEON(MVT::v2i32); 653 addDRTypeForNEON(MVT::v1i64); 654 655 addQRTypeForNEON(MVT::v4f32); 656 addQRTypeForNEON(MVT::v2f64); 657 addQRTypeForNEON(MVT::v16i8); 658 addQRTypeForNEON(MVT::v8i16); 659 addQRTypeForNEON(MVT::v4i32); 660 addQRTypeForNEON(MVT::v2i64); 661 662 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 663 // neither Neon nor VFP support any arithmetic operations on it. 664 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 665 // supported for v4f32. 666 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 667 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 668 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 669 // FIXME: Code duplication: FDIV and FREM are expanded always, see 670 // ARMTargetLowering::addTypeForNEON method for details. 671 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 672 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 673 // FIXME: Create unittest. 674 // In another words, find a way when "copysign" appears in DAG with vector 675 // operands. 676 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 677 // FIXME: Code duplication: SETCC has custom operation action, see 678 // ARMTargetLowering::addTypeForNEON method for details. 679 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 680 // FIXME: Create unittest for FNEG and for FABS. 681 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 682 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 683 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 684 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 685 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 686 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 687 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 688 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 689 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 690 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 691 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 692 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 693 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 694 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 695 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 696 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 697 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 698 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 699 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 700 701 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 702 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 703 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 704 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 705 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 706 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 707 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 708 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 709 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 710 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 711 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 712 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 713 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 714 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 715 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 716 717 // Mark v2f32 intrinsics. 718 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 719 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 720 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 721 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 722 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 723 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 724 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 725 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 726 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 727 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 728 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 729 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 730 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 731 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 732 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 733 734 // Neon does not support some operations on v1i64 and v2i64 types. 735 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 736 // Custom handling for some quad-vector types to detect VMULL. 737 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 738 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 739 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 740 // Custom handling for some vector types to avoid expensive expansions 741 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 742 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 743 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 744 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 745 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 746 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 747 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 748 // a destination type that is wider than the source, and nor does 749 // it have a FP_TO_[SU]INT instruction with a narrower destination than 750 // source. 751 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 752 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 753 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 754 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 755 756 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 757 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 758 759 // NEON does not have single instruction CTPOP for vectors with element 760 // types wider than 8-bits. However, custom lowering can leverage the 761 // v8i8/v16i8 vcnt instruction. 762 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 763 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 764 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 765 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 766 setOperationAction(ISD::CTPOP, MVT::v1i64, Expand); 767 setOperationAction(ISD::CTPOP, MVT::v2i64, Expand); 768 769 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 770 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 771 772 // NEON does not have single instruction CTTZ for vectors. 773 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom); 774 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom); 775 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom); 776 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom); 777 778 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom); 779 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom); 780 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom); 781 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom); 782 783 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom); 784 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom); 785 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom); 786 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom); 787 788 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom); 789 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom); 790 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom); 791 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); 792 793 // NEON only has FMA instructions as of VFP4. 794 if (!Subtarget->hasVFP4()) { 795 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 796 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 797 } 798 799 setTargetDAGCombine(ISD::INTRINSIC_VOID); 800 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 801 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 802 setTargetDAGCombine(ISD::SHL); 803 setTargetDAGCombine(ISD::SRL); 804 setTargetDAGCombine(ISD::SRA); 805 setTargetDAGCombine(ISD::SIGN_EXTEND); 806 setTargetDAGCombine(ISD::ZERO_EXTEND); 807 setTargetDAGCombine(ISD::ANY_EXTEND); 808 setTargetDAGCombine(ISD::BUILD_VECTOR); 809 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 810 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 811 setTargetDAGCombine(ISD::STORE); 812 setTargetDAGCombine(ISD::FP_TO_SINT); 813 setTargetDAGCombine(ISD::FP_TO_UINT); 814 setTargetDAGCombine(ISD::FDIV); 815 setTargetDAGCombine(ISD::LOAD); 816 817 // It is legal to extload from v4i8 to v4i16 or v4i32. 818 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, 819 MVT::v2i32}) { 820 for (MVT VT : MVT::integer_vector_valuetypes()) { 821 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); 822 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); 823 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); 824 } 825 } 826 } 827 828 // ARM and Thumb2 support UMLAL/SMLAL. 829 if (!Subtarget->isThumb1Only()) 830 setTargetDAGCombine(ISD::ADDC); 831 832 if (Subtarget->isFPOnlySP()) { 833 // When targeting a floating-point unit with only single-precision 834 // operations, f64 is legal for the few double-precision instructions which 835 // are present However, no double-precision operations other than moves, 836 // loads and stores are provided by the hardware. 837 setOperationAction(ISD::FADD, MVT::f64, Expand); 838 setOperationAction(ISD::FSUB, MVT::f64, Expand); 839 setOperationAction(ISD::FMUL, MVT::f64, Expand); 840 setOperationAction(ISD::FMA, MVT::f64, Expand); 841 setOperationAction(ISD::FDIV, MVT::f64, Expand); 842 setOperationAction(ISD::FREM, MVT::f64, Expand); 843 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 844 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand); 845 setOperationAction(ISD::FNEG, MVT::f64, Expand); 846 setOperationAction(ISD::FABS, MVT::f64, Expand); 847 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 848 setOperationAction(ISD::FSIN, MVT::f64, Expand); 849 setOperationAction(ISD::FCOS, MVT::f64, Expand); 850 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 851 setOperationAction(ISD::FPOW, MVT::f64, Expand); 852 setOperationAction(ISD::FLOG, MVT::f64, Expand); 853 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 854 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 855 setOperationAction(ISD::FEXP, MVT::f64, Expand); 856 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 857 setOperationAction(ISD::FCEIL, MVT::f64, Expand); 858 setOperationAction(ISD::FTRUNC, MVT::f64, Expand); 859 setOperationAction(ISD::FRINT, MVT::f64, Expand); 860 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); 861 setOperationAction(ISD::FFLOOR, MVT::f64, Expand); 862 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 863 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 864 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 865 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 866 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom); 867 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom); 868 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 869 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); 870 } 871 872 computeRegisterProperties(Subtarget->getRegisterInfo()); 873 874 // ARM does not have floating-point extending loads. 875 for (MVT VT : MVT::fp_valuetypes()) { 876 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 877 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 878 } 879 880 // ... or truncating stores 881 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 882 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 883 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 884 885 // ARM does not have i1 sign extending load. 886 for (MVT VT : MVT::integer_valuetypes()) 887 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 888 889 // ARM supports all 4 flavors of integer indexed load / store. 890 if (!Subtarget->isThumb1Only()) { 891 for (unsigned im = (unsigned)ISD::PRE_INC; 892 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 893 setIndexedLoadAction(im, MVT::i1, Legal); 894 setIndexedLoadAction(im, MVT::i8, Legal); 895 setIndexedLoadAction(im, MVT::i16, Legal); 896 setIndexedLoadAction(im, MVT::i32, Legal); 897 setIndexedStoreAction(im, MVT::i1, Legal); 898 setIndexedStoreAction(im, MVT::i8, Legal); 899 setIndexedStoreAction(im, MVT::i16, Legal); 900 setIndexedStoreAction(im, MVT::i32, Legal); 901 } 902 } else { 903 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}. 904 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); 905 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); 906 } 907 908 setOperationAction(ISD::SADDO, MVT::i32, Custom); 909 setOperationAction(ISD::UADDO, MVT::i32, Custom); 910 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 911 setOperationAction(ISD::USUBO, MVT::i32, Custom); 912 913 // i64 operation support. 914 setOperationAction(ISD::MUL, MVT::i64, Expand); 915 setOperationAction(ISD::MULHU, MVT::i32, Expand); 916 if (Subtarget->isThumb1Only()) { 917 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 918 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 919 } 920 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 921 || (Subtarget->isThumb2() && !Subtarget->hasDSP())) 922 setOperationAction(ISD::MULHS, MVT::i32, Expand); 923 924 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 925 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 926 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 927 setOperationAction(ISD::SRL, MVT::i64, Custom); 928 setOperationAction(ISD::SRA, MVT::i64, Custom); 929 930 if (!Subtarget->isThumb1Only()) { 931 // FIXME: We should do this for Thumb1 as well. 932 setOperationAction(ISD::ADDC, MVT::i32, Custom); 933 setOperationAction(ISD::ADDE, MVT::i32, Custom); 934 setOperationAction(ISD::SUBC, MVT::i32, Custom); 935 setOperationAction(ISD::SUBE, MVT::i32, Custom); 936 } 937 938 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) 939 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 940 941 // ARM does not have ROTL. 942 setOperationAction(ISD::ROTL, MVT::i32, Expand); 943 for (MVT VT : MVT::vector_valuetypes()) { 944 setOperationAction(ISD::ROTL, VT, Expand); 945 setOperationAction(ISD::ROTR, VT, Expand); 946 } 947 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 948 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 949 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 950 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 951 952 // @llvm.readcyclecounter requires the Performance Monitors extension. 953 // Default to the 0 expansion on unsupported platforms. 954 // FIXME: Technically there are older ARM CPUs that have 955 // implementation-specific ways of obtaining this information. 956 if (Subtarget->hasPerfMon()) 957 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 958 959 // Only ARMv6 has BSWAP. 960 if (!Subtarget->hasV6Ops()) 961 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 962 963 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide() 964 : Subtarget->hasDivideInARMMode(); 965 if (!hasDivide) { 966 // These are expanded into libcalls if the cpu doesn't have HW divider. 967 setOperationAction(ISD::SDIV, MVT::i32, LibCall); 968 setOperationAction(ISD::UDIV, MVT::i32, LibCall); 969 } 970 971 if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) { 972 setOperationAction(ISD::SDIV, MVT::i32, Custom); 973 setOperationAction(ISD::UDIV, MVT::i32, Custom); 974 975 setOperationAction(ISD::SDIV, MVT::i64, Custom); 976 setOperationAction(ISD::UDIV, MVT::i64, Custom); 977 } 978 979 setOperationAction(ISD::SREM, MVT::i32, Expand); 980 setOperationAction(ISD::UREM, MVT::i32, Expand); 981 // Register based DivRem for AEABI (RTABI 4.2) 982 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 983 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI()) { 984 setOperationAction(ISD::SREM, MVT::i64, Custom); 985 setOperationAction(ISD::UREM, MVT::i64, Custom); 986 HasStandaloneRem = false; 987 988 setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); 989 setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); 990 setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); 991 setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); 992 setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); 993 setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); 994 setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); 995 setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); 996 997 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); 998 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); 999 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); 1000 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); 1001 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); 1002 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); 1003 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); 1004 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); 1005 1006 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 1007 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 1008 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 1009 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 1010 } else { 1011 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 1012 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 1013 } 1014 1015 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 1016 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 1017 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 1018 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 1019 1020 setOperationAction(ISD::TRAP, MVT::Other, Legal); 1021 1022 // Use the default implementation. 1023 setOperationAction(ISD::VASTART, MVT::Other, Custom); 1024 setOperationAction(ISD::VAARG, MVT::Other, Expand); 1025 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 1026 setOperationAction(ISD::VAEND, MVT::Other, Expand); 1027 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 1028 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 1029 1030 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 1031 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 1032 else 1033 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 1034 1035 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 1036 // the default expansion. 1037 InsertFencesForAtomic = false; 1038 if (Subtarget->hasAnyDataBarrier() && 1039 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) { 1040 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 1041 // to ldrex/strex loops already. 1042 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 1043 if (!Subtarget->isThumb() || !Subtarget->isMClass()) 1044 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 1045 1046 // On v8, we have particularly efficient implementations of atomic fences 1047 // if they can be combined with nearby atomic loads and stores. 1048 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) { 1049 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc. 1050 InsertFencesForAtomic = true; 1051 } 1052 } else { 1053 // If there's anything we can use as a barrier, go through custom lowering 1054 // for ATOMIC_FENCE. 1055 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 1056 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 1057 1058 // Set them all for expansion, which will force libcalls. 1059 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 1060 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 1061 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 1062 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 1063 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 1064 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 1065 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 1066 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 1067 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 1068 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 1069 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 1070 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 1071 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 1072 // Unordered/Monotonic case. 1073 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 1074 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 1075 } 1076 1077 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 1078 1079 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 1080 if (!Subtarget->hasV6Ops()) { 1081 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 1082 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 1083 } 1084 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 1085 1086 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 1087 !Subtarget->isThumb1Only()) { 1088 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 1089 // iff target supports vfp2. 1090 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 1091 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 1092 } 1093 1094 // We want to custom lower some of our intrinsics. 1095 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1096 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 1097 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 1098 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); 1099 if (Subtarget->useSjLjEH()) 1100 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 1101 1102 setOperationAction(ISD::SETCC, MVT::i32, Expand); 1103 setOperationAction(ISD::SETCC, MVT::f32, Expand); 1104 setOperationAction(ISD::SETCC, MVT::f64, Expand); 1105 setOperationAction(ISD::SELECT, MVT::i32, Custom); 1106 setOperationAction(ISD::SELECT, MVT::f32, Custom); 1107 setOperationAction(ISD::SELECT, MVT::f64, Custom); 1108 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 1109 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 1110 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 1111 1112 // Thumb-1 cannot currently select ARMISD::SUBE. 1113 if (!Subtarget->isThumb1Only()) 1114 setOperationAction(ISD::SETCCE, MVT::i32, Custom); 1115 1116 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 1117 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 1118 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 1119 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 1120 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 1121 1122 // We don't support sin/cos/fmod/copysign/pow 1123 setOperationAction(ISD::FSIN, MVT::f64, Expand); 1124 setOperationAction(ISD::FSIN, MVT::f32, Expand); 1125 setOperationAction(ISD::FCOS, MVT::f32, Expand); 1126 setOperationAction(ISD::FCOS, MVT::f64, Expand); 1127 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 1128 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 1129 setOperationAction(ISD::FREM, MVT::f64, Expand); 1130 setOperationAction(ISD::FREM, MVT::f32, Expand); 1131 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && 1132 !Subtarget->isThumb1Only()) { 1133 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 1134 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 1135 } 1136 setOperationAction(ISD::FPOW, MVT::f64, Expand); 1137 setOperationAction(ISD::FPOW, MVT::f32, Expand); 1138 1139 if (!Subtarget->hasVFP4()) { 1140 setOperationAction(ISD::FMA, MVT::f64, Expand); 1141 setOperationAction(ISD::FMA, MVT::f32, Expand); 1142 } 1143 1144 // Various VFP goodness 1145 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { 1146 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. 1147 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { 1148 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 1149 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 1150 } 1151 1152 // fp16 is a special v7 extension that adds f16 <-> f32 conversions. 1153 if (!Subtarget->hasFP16()) { 1154 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 1155 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 1156 } 1157 } 1158 1159 // Combine sin / cos into one node or libcall if possible. 1160 if (Subtarget->hasSinCos()) { 1161 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 1162 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 1163 if (Subtarget->isTargetWatchABI()) { 1164 setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP); 1165 setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP); 1166 } 1167 if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) { 1168 // For iOS, we don't want to the normal expansion of a libcall to 1169 // sincos. We want to issue a libcall to __sincos_stret. 1170 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 1171 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 1172 } 1173 } 1174 1175 // FP-ARMv8 implements a lot of rounding-like FP operations. 1176 if (Subtarget->hasFPARMv8()) { 1177 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 1178 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 1179 setOperationAction(ISD::FROUND, MVT::f32, Legal); 1180 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 1181 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 1182 setOperationAction(ISD::FRINT, MVT::f32, Legal); 1183 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 1184 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 1185 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal); 1186 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal); 1187 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1188 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1189 1190 if (!Subtarget->isFPOnlySP()) { 1191 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 1192 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 1193 setOperationAction(ISD::FROUND, MVT::f64, Legal); 1194 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 1195 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 1196 setOperationAction(ISD::FRINT, MVT::f64, Legal); 1197 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 1198 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 1199 } 1200 } 1201 1202 if (Subtarget->hasNEON()) { 1203 // vmin and vmax aren't available in a scalar form, so we use 1204 // a NEON instruction with an undef lane instead. 1205 setOperationAction(ISD::FMINNAN, MVT::f32, Legal); 1206 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal); 1207 setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal); 1208 setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal); 1209 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal); 1210 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal); 1211 } 1212 1213 // We have target-specific dag combine patterns for the following nodes: 1214 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 1215 setTargetDAGCombine(ISD::ADD); 1216 setTargetDAGCombine(ISD::SUB); 1217 setTargetDAGCombine(ISD::MUL); 1218 setTargetDAGCombine(ISD::AND); 1219 setTargetDAGCombine(ISD::OR); 1220 setTargetDAGCombine(ISD::XOR); 1221 1222 if (Subtarget->hasV6Ops()) 1223 setTargetDAGCombine(ISD::SRL); 1224 1225 setStackPointerRegisterToSaveRestore(ARM::SP); 1226 1227 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || 1228 !Subtarget->hasVFP2()) 1229 setSchedulingPreference(Sched::RegPressure); 1230 else 1231 setSchedulingPreference(Sched::Hybrid); 1232 1233 //// temporary - rewrite interface to use type 1234 MaxStoresPerMemset = 8; 1235 MaxStoresPerMemsetOptSize = 4; 1236 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 1237 MaxStoresPerMemcpyOptSize = 2; 1238 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 1239 MaxStoresPerMemmoveOptSize = 2; 1240 1241 // On ARM arguments smaller than 4 bytes are extended, so all arguments 1242 // are at least 4 bytes aligned. 1243 setMinStackArgumentAlignment(4); 1244 1245 // Prefer likely predicted branches to selects on out-of-order cores. 1246 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); 1247 1248 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 1249 } 1250 1251 bool ARMTargetLowering::useSoftFloat() const { 1252 return Subtarget->useSoftFloat(); 1253 } 1254 1255 // FIXME: It might make sense to define the representative register class as the 1256 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 1257 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 1258 // SPR's representative would be DPR_VFP2. This should work well if register 1259 // pressure tracking were modified such that a register use would increment the 1260 // pressure of the register class's representative and all of it's super 1261 // classes' representatives transitively. We have not implemented this because 1262 // of the difficulty prior to coalescing of modeling operand register classes 1263 // due to the common occurrence of cross class copies and subregister insertions 1264 // and extractions. 1265 std::pair<const TargetRegisterClass *, uint8_t> 1266 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, 1267 MVT VT) const { 1268 const TargetRegisterClass *RRC = nullptr; 1269 uint8_t Cost = 1; 1270 switch (VT.SimpleTy) { 1271 default: 1272 return TargetLowering::findRepresentativeClass(TRI, VT); 1273 // Use DPR as representative register class for all floating point 1274 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 1275 // the cost is 1 for both f32 and f64. 1276 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 1277 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 1278 RRC = &ARM::DPRRegClass; 1279 // When NEON is used for SP, only half of the register file is available 1280 // because operations that define both SP and DP results will be constrained 1281 // to the VFP2 class (D0-D15). We currently model this constraint prior to 1282 // coalescing by double-counting the SP regs. See the FIXME above. 1283 if (Subtarget->useNEONForSinglePrecisionFP()) 1284 Cost = 2; 1285 break; 1286 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1287 case MVT::v4f32: case MVT::v2f64: 1288 RRC = &ARM::DPRRegClass; 1289 Cost = 2; 1290 break; 1291 case MVT::v4i64: 1292 RRC = &ARM::DPRRegClass; 1293 Cost = 4; 1294 break; 1295 case MVT::v8i64: 1296 RRC = &ARM::DPRRegClass; 1297 Cost = 8; 1298 break; 1299 } 1300 return std::make_pair(RRC, Cost); 1301 } 1302 1303 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1304 switch ((ARMISD::NodeType)Opcode) { 1305 case ARMISD::FIRST_NUMBER: break; 1306 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1307 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1308 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1309 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL"; 1310 case ARMISD::CALL: return "ARMISD::CALL"; 1311 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1312 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1313 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1314 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1315 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1316 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1317 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1318 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1319 case ARMISD::CMP: return "ARMISD::CMP"; 1320 case ARMISD::CMN: return "ARMISD::CMN"; 1321 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1322 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1323 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1324 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1325 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1326 1327 case ARMISD::CMOV: return "ARMISD::CMOV"; 1328 1329 case ARMISD::SSAT: return "ARMISD::SSAT"; 1330 1331 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1332 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1333 case ARMISD::RRX: return "ARMISD::RRX"; 1334 1335 case ARMISD::ADDC: return "ARMISD::ADDC"; 1336 case ARMISD::ADDE: return "ARMISD::ADDE"; 1337 case ARMISD::SUBC: return "ARMISD::SUBC"; 1338 case ARMISD::SUBE: return "ARMISD::SUBE"; 1339 1340 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1341 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1342 1343 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1344 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP"; 1345 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH"; 1346 1347 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1348 1349 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1350 1351 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1352 1353 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1354 1355 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1356 1357 case ARMISD::WIN__CHKSTK: return "ARMISD:::WIN__CHKSTK"; 1358 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK"; 1359 1360 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 1361 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 1362 case ARMISD::VCGE: return "ARMISD::VCGE"; 1363 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 1364 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 1365 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 1366 case ARMISD::VCGT: return "ARMISD::VCGT"; 1367 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1368 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1369 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1370 case ARMISD::VTST: return "ARMISD::VTST"; 1371 1372 case ARMISD::VSHL: return "ARMISD::VSHL"; 1373 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1374 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1375 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1376 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1377 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1378 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1379 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1380 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1381 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1382 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1383 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1384 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1385 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1386 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1387 case ARMISD::VSLI: return "ARMISD::VSLI"; 1388 case ARMISD::VSRI: return "ARMISD::VSRI"; 1389 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1390 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1391 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1392 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1393 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1394 case ARMISD::VDUP: return "ARMISD::VDUP"; 1395 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1396 case ARMISD::VEXT: return "ARMISD::VEXT"; 1397 case ARMISD::VREV64: return "ARMISD::VREV64"; 1398 case ARMISD::VREV32: return "ARMISD::VREV32"; 1399 case ARMISD::VREV16: return "ARMISD::VREV16"; 1400 case ARMISD::VZIP: return "ARMISD::VZIP"; 1401 case ARMISD::VUZP: return "ARMISD::VUZP"; 1402 case ARMISD::VTRN: return "ARMISD::VTRN"; 1403 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1404 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1405 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1406 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1407 case ARMISD::UMAAL: return "ARMISD::UMAAL"; 1408 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1409 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1410 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1411 case ARMISD::BFI: return "ARMISD::BFI"; 1412 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1413 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1414 case ARMISD::VBSL: return "ARMISD::VBSL"; 1415 case ARMISD::MEMCPY: return "ARMISD::MEMCPY"; 1416 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1417 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1418 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1419 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1420 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1421 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1422 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1423 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1424 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1425 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1426 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1427 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1428 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1429 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1430 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1431 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1432 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1433 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1434 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1435 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1436 } 1437 return nullptr; 1438 } 1439 1440 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1441 EVT VT) const { 1442 if (!VT.isVector()) 1443 return getPointerTy(DL); 1444 return VT.changeVectorElementTypeToInteger(); 1445 } 1446 1447 /// getRegClassFor - Return the register class that should be used for the 1448 /// specified value type. 1449 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1450 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1451 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1452 // load / store 4 to 8 consecutive D registers. 1453 if (Subtarget->hasNEON()) { 1454 if (VT == MVT::v4i64) 1455 return &ARM::QQPRRegClass; 1456 if (VT == MVT::v8i64) 1457 return &ARM::QQQQPRRegClass; 1458 } 1459 return TargetLowering::getRegClassFor(VT); 1460 } 1461 1462 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the 1463 // source/dest is aligned and the copy size is large enough. We therefore want 1464 // to align such objects passed to memory intrinsics. 1465 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, 1466 unsigned &PrefAlign) const { 1467 if (!isa<MemIntrinsic>(CI)) 1468 return false; 1469 MinSize = 8; 1470 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 1471 // cycle faster than 4-byte aligned LDM. 1472 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4); 1473 return true; 1474 } 1475 1476 // Create a fast isel object. 1477 FastISel * 1478 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1479 const TargetLibraryInfo *libInfo) const { 1480 return ARM::createFastISel(funcInfo, libInfo); 1481 } 1482 1483 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1484 unsigned NumVals = N->getNumValues(); 1485 if (!NumVals) 1486 return Sched::RegPressure; 1487 1488 for (unsigned i = 0; i != NumVals; ++i) { 1489 EVT VT = N->getValueType(i); 1490 if (VT == MVT::Glue || VT == MVT::Other) 1491 continue; 1492 if (VT.isFloatingPoint() || VT.isVector()) 1493 return Sched::ILP; 1494 } 1495 1496 if (!N->isMachineOpcode()) 1497 return Sched::RegPressure; 1498 1499 // Load are scheduled for latency even if there instruction itinerary 1500 // is not available. 1501 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1502 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1503 1504 if (MCID.getNumDefs() == 0) 1505 return Sched::RegPressure; 1506 if (!Itins->isEmpty() && 1507 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1508 return Sched::ILP; 1509 1510 return Sched::RegPressure; 1511 } 1512 1513 //===----------------------------------------------------------------------===// 1514 // Lowering Code 1515 //===----------------------------------------------------------------------===// 1516 1517 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1518 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1519 switch (CC) { 1520 default: llvm_unreachable("Unknown condition code!"); 1521 case ISD::SETNE: return ARMCC::NE; 1522 case ISD::SETEQ: return ARMCC::EQ; 1523 case ISD::SETGT: return ARMCC::GT; 1524 case ISD::SETGE: return ARMCC::GE; 1525 case ISD::SETLT: return ARMCC::LT; 1526 case ISD::SETLE: return ARMCC::LE; 1527 case ISD::SETUGT: return ARMCC::HI; 1528 case ISD::SETUGE: return ARMCC::HS; 1529 case ISD::SETULT: return ARMCC::LO; 1530 case ISD::SETULE: return ARMCC::LS; 1531 } 1532 } 1533 1534 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1535 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1536 ARMCC::CondCodes &CondCode2) { 1537 CondCode2 = ARMCC::AL; 1538 switch (CC) { 1539 default: llvm_unreachable("Unknown FP condition!"); 1540 case ISD::SETEQ: 1541 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1542 case ISD::SETGT: 1543 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1544 case ISD::SETGE: 1545 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1546 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1547 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1548 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1549 case ISD::SETO: CondCode = ARMCC::VC; break; 1550 case ISD::SETUO: CondCode = ARMCC::VS; break; 1551 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1552 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1553 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1554 case ISD::SETLT: 1555 case ISD::SETULT: CondCode = ARMCC::LT; break; 1556 case ISD::SETLE: 1557 case ISD::SETULE: CondCode = ARMCC::LE; break; 1558 case ISD::SETNE: 1559 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1560 } 1561 } 1562 1563 //===----------------------------------------------------------------------===// 1564 // Calling Convention Implementation 1565 //===----------------------------------------------------------------------===// 1566 1567 #include "ARMGenCallingConv.inc" 1568 1569 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1570 /// account presence of floating point hardware and calling convention 1571 /// limitations, such as support for variadic functions. 1572 CallingConv::ID 1573 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1574 bool isVarArg) const { 1575 switch (CC) { 1576 default: 1577 llvm_unreachable("Unsupported calling convention"); 1578 case CallingConv::ARM_AAPCS: 1579 case CallingConv::ARM_APCS: 1580 case CallingConv::GHC: 1581 return CC; 1582 case CallingConv::PreserveMost: 1583 return CallingConv::PreserveMost; 1584 case CallingConv::ARM_AAPCS_VFP: 1585 case CallingConv::Swift: 1586 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1587 case CallingConv::C: 1588 if (!Subtarget->isAAPCS_ABI()) 1589 return CallingConv::ARM_APCS; 1590 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && 1591 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1592 !isVarArg) 1593 return CallingConv::ARM_AAPCS_VFP; 1594 else 1595 return CallingConv::ARM_AAPCS; 1596 case CallingConv::Fast: 1597 case CallingConv::CXX_FAST_TLS: 1598 if (!Subtarget->isAAPCS_ABI()) { 1599 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1600 return CallingConv::Fast; 1601 return CallingConv::ARM_APCS; 1602 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1603 return CallingConv::ARM_AAPCS_VFP; 1604 else 1605 return CallingConv::ARM_AAPCS; 1606 } 1607 } 1608 1609 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 1610 /// CallingConvention. 1611 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1612 bool Return, 1613 bool isVarArg) const { 1614 switch (getEffectiveCallingConv(CC, isVarArg)) { 1615 default: 1616 llvm_unreachable("Unsupported calling convention"); 1617 case CallingConv::ARM_APCS: 1618 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1619 case CallingConv::ARM_AAPCS: 1620 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1621 case CallingConv::ARM_AAPCS_VFP: 1622 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1623 case CallingConv::Fast: 1624 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1625 case CallingConv::GHC: 1626 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1627 case CallingConv::PreserveMost: 1628 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1629 } 1630 } 1631 1632 /// LowerCallResult - Lower the result values of a call into the 1633 /// appropriate copies out of appropriate physical registers. 1634 SDValue ARMTargetLowering::LowerCallResult( 1635 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 1636 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1637 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 1638 SDValue ThisVal) const { 1639 1640 // Assign locations to each value returned by this call. 1641 SmallVector<CCValAssign, 16> RVLocs; 1642 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1643 *DAG.getContext(), Call); 1644 CCInfo.AnalyzeCallResult(Ins, 1645 CCAssignFnForNode(CallConv, /* Return*/ true, 1646 isVarArg)); 1647 1648 // Copy all of the result registers out of their specified physreg. 1649 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1650 CCValAssign VA = RVLocs[i]; 1651 1652 // Pass 'this' value directly from the argument to return value, to avoid 1653 // reg unit interference 1654 if (i == 0 && isThisReturn) { 1655 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1656 "unexpected return calling convention register assignment"); 1657 InVals.push_back(ThisVal); 1658 continue; 1659 } 1660 1661 SDValue Val; 1662 if (VA.needsCustom()) { 1663 // Handle f64 or half of a v2f64. 1664 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1665 InFlag); 1666 Chain = Lo.getValue(1); 1667 InFlag = Lo.getValue(2); 1668 VA = RVLocs[++i]; // skip ahead to next loc 1669 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1670 InFlag); 1671 Chain = Hi.getValue(1); 1672 InFlag = Hi.getValue(2); 1673 if (!Subtarget->isLittle()) 1674 std::swap (Lo, Hi); 1675 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1676 1677 if (VA.getLocVT() == MVT::v2f64) { 1678 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1679 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1680 DAG.getConstant(0, dl, MVT::i32)); 1681 1682 VA = RVLocs[++i]; // skip ahead to next loc 1683 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1684 Chain = Lo.getValue(1); 1685 InFlag = Lo.getValue(2); 1686 VA = RVLocs[++i]; // skip ahead to next loc 1687 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1688 Chain = Hi.getValue(1); 1689 InFlag = Hi.getValue(2); 1690 if (!Subtarget->isLittle()) 1691 std::swap (Lo, Hi); 1692 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1693 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1694 DAG.getConstant(1, dl, MVT::i32)); 1695 } 1696 } else { 1697 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1698 InFlag); 1699 Chain = Val.getValue(1); 1700 InFlag = Val.getValue(2); 1701 } 1702 1703 switch (VA.getLocInfo()) { 1704 default: llvm_unreachable("Unknown loc info!"); 1705 case CCValAssign::Full: break; 1706 case CCValAssign::BCvt: 1707 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1708 break; 1709 } 1710 1711 InVals.push_back(Val); 1712 } 1713 1714 return Chain; 1715 } 1716 1717 /// LowerMemOpCallTo - Store the argument to the stack. 1718 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, 1719 SDValue Arg, const SDLoc &dl, 1720 SelectionDAG &DAG, 1721 const CCValAssign &VA, 1722 ISD::ArgFlagsTy Flags) const { 1723 unsigned LocMemOffset = VA.getLocMemOffset(); 1724 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1725 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 1726 StackPtr, PtrOff); 1727 return DAG.getStore( 1728 Chain, dl, Arg, PtrOff, 1729 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); 1730 } 1731 1732 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, 1733 SDValue Chain, SDValue &Arg, 1734 RegsToPassVector &RegsToPass, 1735 CCValAssign &VA, CCValAssign &NextVA, 1736 SDValue &StackPtr, 1737 SmallVectorImpl<SDValue> &MemOpChains, 1738 ISD::ArgFlagsTy Flags) const { 1739 1740 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1741 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1742 unsigned id = Subtarget->isLittle() ? 0 : 1; 1743 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 1744 1745 if (NextVA.isRegLoc()) 1746 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 1747 else { 1748 assert(NextVA.isMemLoc()); 1749 if (!StackPtr.getNode()) 1750 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, 1751 getPointerTy(DAG.getDataLayout())); 1752 1753 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 1754 dl, DAG, NextVA, 1755 Flags)); 1756 } 1757 } 1758 1759 /// LowerCall - Lowering a call into a callseq_start <- 1760 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1761 /// nodes. 1762 SDValue 1763 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1764 SmallVectorImpl<SDValue> &InVals) const { 1765 SelectionDAG &DAG = CLI.DAG; 1766 SDLoc &dl = CLI.DL; 1767 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1768 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1769 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1770 SDValue Chain = CLI.Chain; 1771 SDValue Callee = CLI.Callee; 1772 bool &isTailCall = CLI.IsTailCall; 1773 CallingConv::ID CallConv = CLI.CallConv; 1774 bool doesNotRet = CLI.DoesNotReturn; 1775 bool isVarArg = CLI.IsVarArg; 1776 1777 MachineFunction &MF = DAG.getMachineFunction(); 1778 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1779 bool isThisReturn = false; 1780 bool isSibCall = false; 1781 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); 1782 1783 // Disable tail calls if they're not supported. 1784 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") 1785 isTailCall = false; 1786 1787 if (isTailCall) { 1788 // Check if it's really possible to do a tail call. 1789 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1790 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1791 Outs, OutVals, Ins, DAG); 1792 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 1793 report_fatal_error("failed to perform tail call elimination on a call " 1794 "site marked musttail"); 1795 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1796 // detected sibcalls. 1797 if (isTailCall) { 1798 ++NumTailCalls; 1799 isSibCall = true; 1800 } 1801 } 1802 1803 // Analyze operands of the call, assigning locations to each operand. 1804 SmallVector<CCValAssign, 16> ArgLocs; 1805 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1806 *DAG.getContext(), Call); 1807 CCInfo.AnalyzeCallOperands(Outs, 1808 CCAssignFnForNode(CallConv, /* Return*/ false, 1809 isVarArg)); 1810 1811 // Get a count of how many bytes are to be pushed on the stack. 1812 unsigned NumBytes = CCInfo.getNextStackOffset(); 1813 1814 // For tail calls, memory operands are available in our caller's stack. 1815 if (isSibCall) 1816 NumBytes = 0; 1817 1818 // Adjust the stack pointer for the new arguments... 1819 // These operations are automatically eliminated by the prolog/epilog pass 1820 if (!isSibCall) 1821 Chain = DAG.getCALLSEQ_START(Chain, 1822 DAG.getIntPtrConstant(NumBytes, dl, true), dl); 1823 1824 SDValue StackPtr = 1825 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout())); 1826 1827 RegsToPassVector RegsToPass; 1828 SmallVector<SDValue, 8> MemOpChains; 1829 1830 // Walk the register/memloc assignments, inserting copies/loads. In the case 1831 // of tail call optimization, arguments are handled later. 1832 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1833 i != e; 1834 ++i, ++realArgIdx) { 1835 CCValAssign &VA = ArgLocs[i]; 1836 SDValue Arg = OutVals[realArgIdx]; 1837 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1838 bool isByVal = Flags.isByVal(); 1839 1840 // Promote the value if needed. 1841 switch (VA.getLocInfo()) { 1842 default: llvm_unreachable("Unknown loc info!"); 1843 case CCValAssign::Full: break; 1844 case CCValAssign::SExt: 1845 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1846 break; 1847 case CCValAssign::ZExt: 1848 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1849 break; 1850 case CCValAssign::AExt: 1851 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1852 break; 1853 case CCValAssign::BCvt: 1854 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1855 break; 1856 } 1857 1858 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1859 if (VA.needsCustom()) { 1860 if (VA.getLocVT() == MVT::v2f64) { 1861 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1862 DAG.getConstant(0, dl, MVT::i32)); 1863 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1864 DAG.getConstant(1, dl, MVT::i32)); 1865 1866 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1867 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1868 1869 VA = ArgLocs[++i]; // skip ahead to next loc 1870 if (VA.isRegLoc()) { 1871 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1872 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1873 } else { 1874 assert(VA.isMemLoc()); 1875 1876 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1877 dl, DAG, VA, Flags)); 1878 } 1879 } else { 1880 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1881 StackPtr, MemOpChains, Flags); 1882 } 1883 } else if (VA.isRegLoc()) { 1884 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1885 assert(VA.getLocVT() == MVT::i32 && 1886 "unexpected calling convention register assignment"); 1887 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1888 "unexpected use of 'returned'"); 1889 isThisReturn = true; 1890 } 1891 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1892 } else if (isByVal) { 1893 assert(VA.isMemLoc()); 1894 unsigned offset = 0; 1895 1896 // True if this byval aggregate will be split between registers 1897 // and memory. 1898 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1899 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed(); 1900 1901 if (CurByValIdx < ByValArgsCount) { 1902 1903 unsigned RegBegin, RegEnd; 1904 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1905 1906 EVT PtrVT = 1907 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1908 unsigned int i, j; 1909 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1910 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32); 1911 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1912 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1913 MachinePointerInfo(), 1914 DAG.InferPtrAlignment(AddArg)); 1915 MemOpChains.push_back(Load.getValue(1)); 1916 RegsToPass.push_back(std::make_pair(j, Load)); 1917 } 1918 1919 // If parameter size outsides register area, "offset" value 1920 // helps us to calculate stack slot for remained part properly. 1921 offset = RegEnd - RegBegin; 1922 1923 CCInfo.nextInRegsParam(); 1924 } 1925 1926 if (Flags.getByValSize() > 4*offset) { 1927 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1928 unsigned LocMemOffset = VA.getLocMemOffset(); 1929 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 1930 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff); 1931 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl); 1932 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset); 1933 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl, 1934 MVT::i32); 1935 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl, 1936 MVT::i32); 1937 1938 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1939 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1940 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1941 Ops)); 1942 } 1943 } else if (!isSibCall) { 1944 assert(VA.isMemLoc()); 1945 1946 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1947 dl, DAG, VA, Flags)); 1948 } 1949 } 1950 1951 if (!MemOpChains.empty()) 1952 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1953 1954 // Build a sequence of copy-to-reg nodes chained together with token chain 1955 // and flag operands which copy the outgoing args into the appropriate regs. 1956 SDValue InFlag; 1957 // Tail call byval lowering might overwrite argument registers so in case of 1958 // tail call optimization the copies to registers are lowered later. 1959 if (!isTailCall) 1960 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1961 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1962 RegsToPass[i].second, InFlag); 1963 InFlag = Chain.getValue(1); 1964 } 1965 1966 // For tail calls lower the arguments to the 'real' stack slot. 1967 if (isTailCall) { 1968 // Force all the incoming stack arguments to be loaded from the stack 1969 // before any new outgoing arguments are stored to the stack, because the 1970 // outgoing stack slots may alias the incoming argument stack slots, and 1971 // the alias isn't otherwise explicit. This is slightly more conservative 1972 // than necessary, because it means that each store effectively depends 1973 // on every argument instead of just those arguments it would clobber. 1974 1975 // Do not flag preceding copytoreg stuff together with the following stuff. 1976 InFlag = SDValue(); 1977 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1978 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1979 RegsToPass[i].second, InFlag); 1980 InFlag = Chain.getValue(1); 1981 } 1982 InFlag = SDValue(); 1983 } 1984 1985 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1986 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1987 // node so that legalize doesn't hack it. 1988 bool isDirect = false; 1989 1990 const TargetMachine &TM = getTargetMachine(); 1991 const Module *Mod = MF.getFunction()->getParent(); 1992 const GlobalValue *GV = nullptr; 1993 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1994 GV = G->getGlobal(); 1995 bool isStub = 1996 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); 1997 1998 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass()); 1999 bool isLocalARMFunc = false; 2000 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2001 auto PtrVt = getPointerTy(DAG.getDataLayout()); 2002 2003 if (Subtarget->genLongCalls()) { 2004 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) && 2005 "long-calls codegen is not position independent!"); 2006 // Handle a global address or an external symbol. If it's not one of 2007 // those, the target's already in a register, so we don't need to do 2008 // anything extra. 2009 if (isa<GlobalAddressSDNode>(Callee)) { 2010 // Create a constant pool entry for the callee address 2011 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2012 ARMConstantPoolValue *CPV = 2013 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 2014 2015 // Get the address of the callee into a register 2016 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2017 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2018 Callee = DAG.getLoad( 2019 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2020 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2021 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 2022 const char *Sym = S->getSymbol(); 2023 2024 // Create a constant pool entry for the callee address 2025 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2026 ARMConstantPoolValue *CPV = 2027 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2028 ARMPCLabelIndex, 0); 2029 // Get the address of the callee into a register 2030 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2031 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2032 Callee = DAG.getLoad( 2033 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2034 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2035 } 2036 } else if (isa<GlobalAddressSDNode>(Callee)) { 2037 // If we're optimizing for minimum size and the function is called three or 2038 // more times in this block, we can improve codesize by calling indirectly 2039 // as BLXr has a 16-bit encoding. 2040 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal(); 2041 auto *BB = CLI.CS->getParent(); 2042 bool PreferIndirect = 2043 Subtarget->isThumb() && MF.getFunction()->optForMinSize() && 2044 count_if(GV->users(), [&BB](const User *U) { 2045 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB; 2046 }) > 2; 2047 2048 if (!PreferIndirect) { 2049 isDirect = true; 2050 bool isDef = GV->isStrongDefinitionForLinker(); 2051 2052 // ARM call to a local ARM function is predicable. 2053 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking); 2054 // tBX takes a register source operand. 2055 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2056 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 2057 Callee = DAG.getNode( 2058 ARMISD::WrapperPIC, dl, PtrVt, 2059 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY)); 2060 Callee = DAG.getLoad( 2061 PtrVt, dl, DAG.getEntryNode(), Callee, 2062 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2063 /* Alignment = */ 0, MachineMemOperand::MODereferenceable | 2064 MachineMemOperand::MOInvariant); 2065 } else if (Subtarget->isTargetCOFF()) { 2066 assert(Subtarget->isTargetWindows() && 2067 "Windows is the only supported COFF target"); 2068 unsigned TargetFlags = GV->hasDLLImportStorageClass() 2069 ? ARMII::MO_DLLIMPORT 2070 : ARMII::MO_NO_FLAG; 2071 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0, 2072 TargetFlags); 2073 if (GV->hasDLLImportStorageClass()) 2074 Callee = 2075 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(), 2076 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee), 2077 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 2078 } else { 2079 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); 2080 } 2081 } 2082 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2083 isDirect = true; 2084 // tBX takes a register source operand. 2085 const char *Sym = S->getSymbol(); 2086 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 2087 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2088 ARMConstantPoolValue *CPV = 2089 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 2090 ARMPCLabelIndex, 4); 2091 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4); 2092 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2093 Callee = DAG.getLoad( 2094 PtrVt, dl, DAG.getEntryNode(), CPAddr, 2095 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2096 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2097 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel); 2098 } else { 2099 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0); 2100 } 2101 } 2102 2103 // FIXME: handle tail calls differently. 2104 unsigned CallOpc; 2105 if (Subtarget->isThumb()) { 2106 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 2107 CallOpc = ARMISD::CALL_NOLINK; 2108 else 2109 CallOpc = ARMISD::CALL; 2110 } else { 2111 if (!isDirect && !Subtarget->hasV5TOps()) 2112 CallOpc = ARMISD::CALL_NOLINK; 2113 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && 2114 // Emit regular call when code size is the priority 2115 !MF.getFunction()->optForMinSize()) 2116 // "mov lr, pc; b _foo" to avoid confusing the RSP 2117 CallOpc = ARMISD::CALL_NOLINK; 2118 else 2119 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 2120 } 2121 2122 std::vector<SDValue> Ops; 2123 Ops.push_back(Chain); 2124 Ops.push_back(Callee); 2125 2126 // Add argument registers to the end of the list so that they are known live 2127 // into the call. 2128 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2129 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2130 RegsToPass[i].second.getValueType())); 2131 2132 // Add a register mask operand representing the call-preserved registers. 2133 if (!isTailCall) { 2134 const uint32_t *Mask; 2135 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo(); 2136 if (isThisReturn) { 2137 // For 'this' returns, use the R0-preserving mask if applicable 2138 Mask = ARI->getThisReturnPreservedMask(MF, CallConv); 2139 if (!Mask) { 2140 // Set isThisReturn to false if the calling convention is not one that 2141 // allows 'returned' to be modeled in this way, so LowerCallResult does 2142 // not try to pass 'this' straight through 2143 isThisReturn = false; 2144 Mask = ARI->getCallPreservedMask(MF, CallConv); 2145 } 2146 } else 2147 Mask = ARI->getCallPreservedMask(MF, CallConv); 2148 2149 assert(Mask && "Missing call preserved mask for calling convention"); 2150 Ops.push_back(DAG.getRegisterMask(Mask)); 2151 } 2152 2153 if (InFlag.getNode()) 2154 Ops.push_back(InFlag); 2155 2156 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2157 if (isTailCall) { 2158 MF.getFrameInfo().setHasTailCall(); 2159 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 2160 } 2161 2162 // Returns a chain and a flag for retval copy to use. 2163 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 2164 InFlag = Chain.getValue(1); 2165 2166 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 2167 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 2168 if (!Ins.empty()) 2169 InFlag = Chain.getValue(1); 2170 2171 // Handle result values, copying them out of physregs into vregs that we 2172 // return. 2173 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 2174 InVals, isThisReturn, 2175 isThisReturn ? OutVals[0] : SDValue()); 2176 } 2177 2178 /// HandleByVal - Every parameter *after* a byval parameter is passed 2179 /// on the stack. Remember the next parameter register to allocate, 2180 /// and then confiscate the rest of the parameter registers to insure 2181 /// this. 2182 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, 2183 unsigned Align) const { 2184 assert((State->getCallOrPrologue() == Prologue || 2185 State->getCallOrPrologue() == Call) && 2186 "unhandled ParmContext"); 2187 2188 // Byval (as with any stack) slots are always at least 4 byte aligned. 2189 Align = std::max(Align, 4U); 2190 2191 unsigned Reg = State->AllocateReg(GPRArgRegs); 2192 if (!Reg) 2193 return; 2194 2195 unsigned AlignInRegs = Align / 4; 2196 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs; 2197 for (unsigned i = 0; i < Waste; ++i) 2198 Reg = State->AllocateReg(GPRArgRegs); 2199 2200 if (!Reg) 2201 return; 2202 2203 unsigned Excess = 4 * (ARM::R4 - Reg); 2204 2205 // Special case when NSAA != SP and parameter size greater than size of 2206 // all remained GPR regs. In that case we can't split parameter, we must 2207 // send it to stack. We also must set NCRN to R4, so waste all 2208 // remained registers. 2209 const unsigned NSAAOffset = State->getNextStackOffset(); 2210 if (NSAAOffset != 0 && Size > Excess) { 2211 while (State->AllocateReg(GPRArgRegs)) 2212 ; 2213 return; 2214 } 2215 2216 // First register for byval parameter is the first register that wasn't 2217 // allocated before this method call, so it would be "reg". 2218 // If parameter is small enough to be saved in range [reg, r4), then 2219 // the end (first after last) register would be reg + param-size-in-regs, 2220 // else parameter would be splitted between registers and stack, 2221 // end register would be r4 in this case. 2222 unsigned ByValRegBegin = Reg; 2223 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4); 2224 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 2225 // Note, first register is allocated in the beginning of function already, 2226 // allocate remained amount of registers we need. 2227 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i) 2228 State->AllocateReg(GPRArgRegs); 2229 // A byval parameter that is split between registers and memory needs its 2230 // size truncated here. 2231 // In the case where the entire structure fits in registers, we set the 2232 // size in memory to zero. 2233 Size = std::max<int>(Size - Excess, 0); 2234 } 2235 2236 /// MatchingStackOffset - Return true if the given stack call argument is 2237 /// already available in the same position (relatively) of the caller's 2238 /// incoming argument stack. 2239 static 2240 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2241 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, 2242 const TargetInstrInfo *TII) { 2243 unsigned Bytes = Arg.getValueSizeInBits() / 8; 2244 int FI = INT_MAX; 2245 if (Arg.getOpcode() == ISD::CopyFromReg) { 2246 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2247 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2248 return false; 2249 MachineInstr *Def = MRI->getVRegDef(VR); 2250 if (!Def) 2251 return false; 2252 if (!Flags.isByVal()) { 2253 if (!TII->isLoadFromStackSlot(*Def, FI)) 2254 return false; 2255 } else { 2256 return false; 2257 } 2258 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2259 if (Flags.isByVal()) 2260 // ByVal argument is passed in as a pointer but it's now being 2261 // dereferenced. e.g. 2262 // define @foo(%struct.X* %A) { 2263 // tail call @bar(%struct.X* byval %A) 2264 // } 2265 return false; 2266 SDValue Ptr = Ld->getBasePtr(); 2267 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2268 if (!FINode) 2269 return false; 2270 FI = FINode->getIndex(); 2271 } else 2272 return false; 2273 2274 assert(FI != INT_MAX); 2275 if (!MFI.isFixedObjectIndex(FI)) 2276 return false; 2277 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI); 2278 } 2279 2280 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2281 /// for tail call optimization. Targets which want to do tail call 2282 /// optimization should implement this function. 2283 bool 2284 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2285 CallingConv::ID CalleeCC, 2286 bool isVarArg, 2287 bool isCalleeStructRet, 2288 bool isCallerStructRet, 2289 const SmallVectorImpl<ISD::OutputArg> &Outs, 2290 const SmallVectorImpl<SDValue> &OutVals, 2291 const SmallVectorImpl<ISD::InputArg> &Ins, 2292 SelectionDAG& DAG) const { 2293 MachineFunction &MF = DAG.getMachineFunction(); 2294 const Function *CallerF = MF.getFunction(); 2295 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2296 2297 assert(Subtarget->supportsTailCall()); 2298 2299 // Look for obvious safe cases to perform tail call optimization that do not 2300 // require ABI changes. This is what gcc calls sibcall. 2301 2302 // Do not sibcall optimize vararg calls unless the call site is not passing 2303 // any arguments. 2304 if (isVarArg && !Outs.empty()) 2305 return false; 2306 2307 // Exception-handling functions need a special set of instructions to indicate 2308 // a return to the hardware. Tail-calling another function would probably 2309 // break this. 2310 if (CallerF->hasFnAttribute("interrupt")) 2311 return false; 2312 2313 // Also avoid sibcall optimization if either caller or callee uses struct 2314 // return semantics. 2315 if (isCalleeStructRet || isCallerStructRet) 2316 return false; 2317 2318 // Externally-defined functions with weak linkage should not be 2319 // tail-called on ARM when the OS does not support dynamic 2320 // pre-emption of symbols, as the AAELF spec requires normal calls 2321 // to undefined weak functions to be replaced with a NOP or jump to the 2322 // next instruction. The behaviour of branch instructions in this 2323 // situation (as used for tail calls) is implementation-defined, so we 2324 // cannot rely on the linker replacing the tail call with a return. 2325 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2326 const GlobalValue *GV = G->getGlobal(); 2327 const Triple &TT = getTargetMachine().getTargetTriple(); 2328 if (GV->hasExternalWeakLinkage() && 2329 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 2330 return false; 2331 } 2332 2333 // Check that the call results are passed in the same way. 2334 LLVMContext &C = *DAG.getContext(); 2335 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, 2336 CCAssignFnForNode(CalleeCC, true, isVarArg), 2337 CCAssignFnForNode(CallerCC, true, isVarArg))) 2338 return false; 2339 // The callee has to preserve all registers the caller needs to preserve. 2340 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2341 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2342 if (CalleeCC != CallerCC) { 2343 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2344 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2345 return false; 2346 } 2347 2348 // If Caller's vararg or byval argument has been split between registers and 2349 // stack, do not perform tail call, since part of the argument is in caller's 2350 // local frame. 2351 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>(); 2352 if (AFI_Caller->getArgRegsSaveSize()) 2353 return false; 2354 2355 // If the callee takes no arguments then go on to check the results of the 2356 // call. 2357 if (!Outs.empty()) { 2358 // Check if stack adjustment is needed. For now, do not do this if any 2359 // argument is passed on the stack. 2360 SmallVector<CCValAssign, 16> ArgLocs; 2361 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call); 2362 CCInfo.AnalyzeCallOperands(Outs, 2363 CCAssignFnForNode(CalleeCC, false, isVarArg)); 2364 if (CCInfo.getNextStackOffset()) { 2365 // Check if the arguments are already laid out in the right way as 2366 // the caller's fixed stack objects. 2367 MachineFrameInfo &MFI = MF.getFrameInfo(); 2368 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2369 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 2370 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2371 i != e; 2372 ++i, ++realArgIdx) { 2373 CCValAssign &VA = ArgLocs[i]; 2374 EVT RegVT = VA.getLocVT(); 2375 SDValue Arg = OutVals[realArgIdx]; 2376 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2377 if (VA.getLocInfo() == CCValAssign::Indirect) 2378 return false; 2379 if (VA.needsCustom()) { 2380 // f64 and vector types are split into multiple registers or 2381 // register/stack-slot combinations. The types will not match 2382 // the registers; give up on memory f64 refs until we figure 2383 // out what to do about this. 2384 if (!VA.isRegLoc()) 2385 return false; 2386 if (!ArgLocs[++i].isRegLoc()) 2387 return false; 2388 if (RegVT == MVT::v2f64) { 2389 if (!ArgLocs[++i].isRegLoc()) 2390 return false; 2391 if (!ArgLocs[++i].isRegLoc()) 2392 return false; 2393 } 2394 } else if (!VA.isRegLoc()) { 2395 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2396 MFI, MRI, TII)) 2397 return false; 2398 } 2399 } 2400 } 2401 2402 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2403 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 2404 return false; 2405 } 2406 2407 return true; 2408 } 2409 2410 bool 2411 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2412 MachineFunction &MF, bool isVarArg, 2413 const SmallVectorImpl<ISD::OutputArg> &Outs, 2414 LLVMContext &Context) const { 2415 SmallVector<CCValAssign, 16> RVLocs; 2416 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 2417 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2418 isVarArg)); 2419 } 2420 2421 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2422 const SDLoc &DL, SelectionDAG &DAG) { 2423 const MachineFunction &MF = DAG.getMachineFunction(); 2424 const Function *F = MF.getFunction(); 2425 2426 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); 2427 2428 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2429 // version of the "preferred return address". These offsets affect the return 2430 // instruction if this is a return from PL1 without hypervisor extensions. 2431 // IRQ/FIQ: +4 "subs pc, lr, #4" 2432 // SWI: 0 "subs pc, lr, #0" 2433 // ABORT: +4 "subs pc, lr, #4" 2434 // UNDEF: +4/+2 "subs pc, lr, #0" 2435 // UNDEF varies depending on where the exception came from ARM or Thumb 2436 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2437 2438 int64_t LROffset; 2439 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2440 IntKind == "ABORT") 2441 LROffset = 4; 2442 else if (IntKind == "SWI" || IntKind == "UNDEF") 2443 LROffset = 0; 2444 else 2445 report_fatal_error("Unsupported interrupt attribute. If present, value " 2446 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2447 2448 RetOps.insert(RetOps.begin() + 1, 2449 DAG.getConstant(LROffset, DL, MVT::i32, false)); 2450 2451 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2452 } 2453 2454 SDValue 2455 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2456 bool isVarArg, 2457 const SmallVectorImpl<ISD::OutputArg> &Outs, 2458 const SmallVectorImpl<SDValue> &OutVals, 2459 const SDLoc &dl, SelectionDAG &DAG) const { 2460 2461 // CCValAssign - represent the assignment of the return value to a location. 2462 SmallVector<CCValAssign, 16> RVLocs; 2463 2464 // CCState - Info about the registers and stack slots. 2465 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2466 *DAG.getContext(), Call); 2467 2468 // Analyze outgoing return values. 2469 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2470 isVarArg)); 2471 2472 SDValue Flag; 2473 SmallVector<SDValue, 4> RetOps; 2474 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2475 bool isLittleEndian = Subtarget->isLittle(); 2476 2477 MachineFunction &MF = DAG.getMachineFunction(); 2478 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2479 AFI->setReturnRegsCount(RVLocs.size()); 2480 2481 // Copy the result values into the output registers. 2482 for (unsigned i = 0, realRVLocIdx = 0; 2483 i != RVLocs.size(); 2484 ++i, ++realRVLocIdx) { 2485 CCValAssign &VA = RVLocs[i]; 2486 assert(VA.isRegLoc() && "Can only return in registers!"); 2487 2488 SDValue Arg = OutVals[realRVLocIdx]; 2489 2490 switch (VA.getLocInfo()) { 2491 default: llvm_unreachable("Unknown loc info!"); 2492 case CCValAssign::Full: break; 2493 case CCValAssign::BCvt: 2494 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2495 break; 2496 } 2497 2498 if (VA.needsCustom()) { 2499 if (VA.getLocVT() == MVT::v2f64) { 2500 // Extract the first half and return it in two registers. 2501 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2502 DAG.getConstant(0, dl, MVT::i32)); 2503 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2504 DAG.getVTList(MVT::i32, MVT::i32), Half); 2505 2506 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2507 HalfGPRs.getValue(isLittleEndian ? 0 : 1), 2508 Flag); 2509 Flag = Chain.getValue(1); 2510 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2511 VA = RVLocs[++i]; // skip ahead to next loc 2512 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2513 HalfGPRs.getValue(isLittleEndian ? 1 : 0), 2514 Flag); 2515 Flag = Chain.getValue(1); 2516 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2517 VA = RVLocs[++i]; // skip ahead to next loc 2518 2519 // Extract the 2nd half and fall through to handle it as an f64 value. 2520 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2521 DAG.getConstant(1, dl, MVT::i32)); 2522 } 2523 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2524 // available. 2525 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2526 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2527 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2528 fmrrd.getValue(isLittleEndian ? 0 : 1), 2529 Flag); 2530 Flag = Chain.getValue(1); 2531 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2532 VA = RVLocs[++i]; // skip ahead to next loc 2533 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2534 fmrrd.getValue(isLittleEndian ? 1 : 0), 2535 Flag); 2536 } else 2537 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2538 2539 // Guarantee that all emitted copies are 2540 // stuck together, avoiding something bad. 2541 Flag = Chain.getValue(1); 2542 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2543 } 2544 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2545 const MCPhysReg *I = 2546 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2547 if (I) { 2548 for (; *I; ++I) { 2549 if (ARM::GPRRegClass.contains(*I)) 2550 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2551 else if (ARM::DPRRegClass.contains(*I)) 2552 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 2553 else 2554 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2555 } 2556 } 2557 2558 // Update chain and glue. 2559 RetOps[0] = Chain; 2560 if (Flag.getNode()) 2561 RetOps.push_back(Flag); 2562 2563 // CPUs which aren't M-class use a special sequence to return from 2564 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2565 // though we use "subs pc, lr, #N"). 2566 // 2567 // M-class CPUs actually use a normal return sequence with a special 2568 // (hardware-provided) value in LR, so the normal code path works. 2569 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && 2570 !Subtarget->isMClass()) { 2571 if (Subtarget->isThumb1Only()) 2572 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2573 return LowerInterruptReturn(RetOps, dl, DAG); 2574 } 2575 2576 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); 2577 } 2578 2579 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2580 if (N->getNumValues() != 1) 2581 return false; 2582 if (!N->hasNUsesOfValue(1, 0)) 2583 return false; 2584 2585 SDValue TCChain = Chain; 2586 SDNode *Copy = *N->use_begin(); 2587 if (Copy->getOpcode() == ISD::CopyToReg) { 2588 // If the copy has a glue operand, we conservatively assume it isn't safe to 2589 // perform a tail call. 2590 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2591 return false; 2592 TCChain = Copy->getOperand(0); 2593 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2594 SDNode *VMov = Copy; 2595 // f64 returned in a pair of GPRs. 2596 SmallPtrSet<SDNode*, 2> Copies; 2597 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2598 UI != UE; ++UI) { 2599 if (UI->getOpcode() != ISD::CopyToReg) 2600 return false; 2601 Copies.insert(*UI); 2602 } 2603 if (Copies.size() > 2) 2604 return false; 2605 2606 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2607 UI != UE; ++UI) { 2608 SDValue UseChain = UI->getOperand(0); 2609 if (Copies.count(UseChain.getNode())) 2610 // Second CopyToReg 2611 Copy = *UI; 2612 else { 2613 // We are at the top of this chain. 2614 // If the copy has a glue operand, we conservatively assume it 2615 // isn't safe to perform a tail call. 2616 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue) 2617 return false; 2618 // First CopyToReg 2619 TCChain = UseChain; 2620 } 2621 } 2622 } else if (Copy->getOpcode() == ISD::BITCAST) { 2623 // f32 returned in a single GPR. 2624 if (!Copy->hasOneUse()) 2625 return false; 2626 Copy = *Copy->use_begin(); 2627 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2628 return false; 2629 // If the copy has a glue operand, we conservatively assume it isn't safe to 2630 // perform a tail call. 2631 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2632 return false; 2633 TCChain = Copy->getOperand(0); 2634 } else { 2635 return false; 2636 } 2637 2638 bool HasRet = false; 2639 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2640 UI != UE; ++UI) { 2641 if (UI->getOpcode() != ARMISD::RET_FLAG && 2642 UI->getOpcode() != ARMISD::INTRET_FLAG) 2643 return false; 2644 HasRet = true; 2645 } 2646 2647 if (!HasRet) 2648 return false; 2649 2650 Chain = TCChain; 2651 return true; 2652 } 2653 2654 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2655 if (!Subtarget->supportsTailCall()) 2656 return false; 2657 2658 auto Attr = 2659 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); 2660 if (!CI->isTailCall() || Attr.getValueAsString() == "true") 2661 return false; 2662 2663 return true; 2664 } 2665 2666 // Trying to write a 64 bit value so need to split into two 32 bit values first, 2667 // and pass the lower and high parts through. 2668 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { 2669 SDLoc DL(Op); 2670 SDValue WriteValue = Op->getOperand(2); 2671 2672 // This function is only supposed to be called for i64 type argument. 2673 assert(WriteValue.getValueType() == MVT::i64 2674 && "LowerWRITE_REGISTER called for non-i64 type argument."); 2675 2676 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2677 DAG.getConstant(0, DL, MVT::i32)); 2678 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue, 2679 DAG.getConstant(1, DL, MVT::i32)); 2680 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi }; 2681 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops); 2682 } 2683 2684 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2685 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2686 // one of the above mentioned nodes. It has to be wrapped because otherwise 2687 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2688 // be used to form addressing mode. These wrapped nodes will be selected 2689 // into MOVi. 2690 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2691 EVT PtrVT = Op.getValueType(); 2692 // FIXME there is no actual debug info here 2693 SDLoc dl(Op); 2694 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2695 SDValue Res; 2696 if (CP->isMachineConstantPoolEntry()) 2697 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2698 CP->getAlignment()); 2699 else 2700 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2701 CP->getAlignment()); 2702 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2703 } 2704 2705 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2706 return MachineJumpTableInfo::EK_Inline; 2707 } 2708 2709 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2710 SelectionDAG &DAG) const { 2711 MachineFunction &MF = DAG.getMachineFunction(); 2712 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2713 unsigned ARMPCLabelIndex = 0; 2714 SDLoc DL(Op); 2715 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2716 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2717 SDValue CPAddr; 2718 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI(); 2719 if (!IsPositionIndependent) { 2720 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2721 } else { 2722 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2723 ARMPCLabelIndex = AFI->createPICLabelUId(); 2724 ARMConstantPoolValue *CPV = 2725 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2726 ARMCP::CPBlockAddress, PCAdj); 2727 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2728 } 2729 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2730 SDValue Result = DAG.getLoad( 2731 PtrVT, DL, DAG.getEntryNode(), CPAddr, 2732 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2733 if (!IsPositionIndependent) 2734 return Result; 2735 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32); 2736 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2737 } 2738 2739 /// \brief Convert a TLS address reference into the correct sequence of loads 2740 /// and calls to compute the variable's address for Darwin, and return an 2741 /// SDValue containing the final node. 2742 2743 /// Darwin only has one TLS scheme which must be capable of dealing with the 2744 /// fully general situation, in the worst case. This means: 2745 /// + "extern __thread" declaration. 2746 /// + Defined in a possibly unknown dynamic library. 2747 /// 2748 /// The general system is that each __thread variable has a [3 x i32] descriptor 2749 /// which contains information used by the runtime to calculate the address. The 2750 /// only part of this the compiler needs to know about is the first word, which 2751 /// contains a function pointer that must be called with the address of the 2752 /// entire descriptor in "r0". 2753 /// 2754 /// Since this descriptor may be in a different unit, in general access must 2755 /// proceed along the usual ARM rules. A common sequence to produce is: 2756 /// 2757 /// movw rT1, :lower16:_var$non_lazy_ptr 2758 /// movt rT1, :upper16:_var$non_lazy_ptr 2759 /// ldr r0, [rT1] 2760 /// ldr rT2, [r0] 2761 /// blx rT2 2762 /// [...address now in r0...] 2763 SDValue 2764 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op, 2765 SelectionDAG &DAG) const { 2766 assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin"); 2767 SDLoc DL(Op); 2768 2769 // First step is to get the address of the actua global symbol. This is where 2770 // the TLS descriptor lives. 2771 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG); 2772 2773 // The first entry in the descriptor is a function pointer that we must call 2774 // to obtain the address of the variable. 2775 SDValue Chain = DAG.getEntryNode(); 2776 SDValue FuncTLVGet = DAG.getLoad( 2777 MVT::i32, DL, Chain, DescAddr, 2778 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 2779 /* Alignment = */ 4, 2780 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable | 2781 MachineMemOperand::MOInvariant); 2782 Chain = FuncTLVGet.getValue(1); 2783 2784 MachineFunction &F = DAG.getMachineFunction(); 2785 MachineFrameInfo &MFI = F.getFrameInfo(); 2786 MFI.setAdjustsStack(true); 2787 2788 // TLS calls preserve all registers except those that absolutely must be 2789 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be 2790 // silly). 2791 auto TRI = 2792 getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo(); 2793 auto ARI = static_cast<const ARMRegisterInfo *>(TRI); 2794 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction()); 2795 2796 // Finally, we can make the call. This is just a degenerate version of a 2797 // normal AArch64 call node: r0 takes the address of the descriptor, and 2798 // returns the address of the variable in this thread. 2799 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue()); 2800 Chain = 2801 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 2802 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32), 2803 DAG.getRegisterMask(Mask), Chain.getValue(1)); 2804 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1)); 2805 } 2806 2807 SDValue 2808 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op, 2809 SelectionDAG &DAG) const { 2810 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 2811 2812 SDValue Chain = DAG.getEntryNode(); 2813 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2814 SDLoc DL(Op); 2815 2816 // Load the current TEB (thread environment block) 2817 SDValue Ops[] = {Chain, 2818 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 2819 DAG.getConstant(15, DL, MVT::i32), 2820 DAG.getConstant(0, DL, MVT::i32), 2821 DAG.getConstant(13, DL, MVT::i32), 2822 DAG.getConstant(0, DL, MVT::i32), 2823 DAG.getConstant(2, DL, MVT::i32)}; 2824 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 2825 DAG.getVTList(MVT::i32, MVT::Other), Ops); 2826 2827 SDValue TEB = CurrentTEB.getValue(0); 2828 Chain = CurrentTEB.getValue(1); 2829 2830 // Load the ThreadLocalStoragePointer from the TEB 2831 // A pointer to the TLS array is located at offset 0x2c from the TEB. 2832 SDValue TLSArray = 2833 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL)); 2834 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); 2835 2836 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4 2837 // offset into the TLSArray. 2838 2839 // Load the TLS index from the C runtime 2840 SDValue TLSIndex = 2841 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG); 2842 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex); 2843 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo()); 2844 2845 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 2846 DAG.getConstant(2, DL, MVT::i32)); 2847 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 2848 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 2849 MachinePointerInfo()); 2850 2851 // Get the offset of the start of the .tls section (section base) 2852 const auto *GA = cast<GlobalAddressSDNode>(Op); 2853 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL); 2854 SDValue Offset = DAG.getLoad( 2855 PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32, 2856 DAG.getTargetConstantPool(CPV, PtrVT, 4)), 2857 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2858 2859 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset); 2860 } 2861 2862 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2863 SDValue 2864 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2865 SelectionDAG &DAG) const { 2866 SDLoc dl(GA); 2867 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2868 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2869 MachineFunction &MF = DAG.getMachineFunction(); 2870 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2871 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2872 ARMConstantPoolValue *CPV = 2873 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2874 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2875 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2876 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2877 Argument = DAG.getLoad( 2878 PtrVT, dl, DAG.getEntryNode(), Argument, 2879 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2880 SDValue Chain = Argument.getValue(1); 2881 2882 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2883 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2884 2885 // call __tls_get_addr. 2886 ArgListTy Args; 2887 ArgListEntry Entry; 2888 Entry.Node = Argument; 2889 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2890 Args.push_back(Entry); 2891 2892 // FIXME: is there useful debug info available here? 2893 TargetLowering::CallLoweringInfo CLI(DAG); 2894 CLI.setDebugLoc(dl).setChain(Chain) 2895 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 2896 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args)); 2897 2898 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2899 return CallResult.first; 2900 } 2901 2902 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2903 // "local exec" model. 2904 SDValue 2905 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2906 SelectionDAG &DAG, 2907 TLSModel::Model model) const { 2908 const GlobalValue *GV = GA->getGlobal(); 2909 SDLoc dl(GA); 2910 SDValue Offset; 2911 SDValue Chain = DAG.getEntryNode(); 2912 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2913 // Get the Thread Pointer 2914 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2915 2916 if (model == TLSModel::InitialExec) { 2917 MachineFunction &MF = DAG.getMachineFunction(); 2918 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2919 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2920 // Initial exec model. 2921 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2922 ARMConstantPoolValue *CPV = 2923 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2924 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2925 true); 2926 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2927 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2928 Offset = DAG.getLoad( 2929 PtrVT, dl, Chain, Offset, 2930 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2931 Chain = Offset.getValue(1); 2932 2933 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 2934 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2935 2936 Offset = DAG.getLoad( 2937 PtrVT, dl, Chain, Offset, 2938 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2939 } else { 2940 // local exec model 2941 assert(model == TLSModel::LocalExec); 2942 ARMConstantPoolValue *CPV = 2943 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2944 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2945 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2946 Offset = DAG.getLoad( 2947 PtrVT, dl, Chain, Offset, 2948 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 2949 } 2950 2951 // The address of the thread local variable is the add of the thread 2952 // pointer with the offset of the variable. 2953 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2954 } 2955 2956 SDValue 2957 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2958 if (Subtarget->isTargetDarwin()) 2959 return LowerGlobalTLSAddressDarwin(Op, DAG); 2960 2961 if (Subtarget->isTargetWindows()) 2962 return LowerGlobalTLSAddressWindows(Op, DAG); 2963 2964 // TODO: implement the "local dynamic" model 2965 assert(Subtarget->isTargetELF() && "Only ELF implemented here"); 2966 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2967 if (DAG.getTarget().Options.EmulatedTLS) 2968 return LowerToTLSEmulatedModel(GA, DAG); 2969 2970 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2971 2972 switch (model) { 2973 case TLSModel::GeneralDynamic: 2974 case TLSModel::LocalDynamic: 2975 return LowerToTLSGeneralDynamicModel(GA, DAG); 2976 case TLSModel::InitialExec: 2977 case TLSModel::LocalExec: 2978 return LowerToTLSExecModels(GA, DAG, model); 2979 } 2980 llvm_unreachable("bogus TLS model"); 2981 } 2982 2983 /// Return true if all users of V are within function F, looking through 2984 /// ConstantExprs. 2985 static bool allUsersAreInFunction(const Value *V, const Function *F) { 2986 SmallVector<const User*,4> Worklist; 2987 for (auto *U : V->users()) 2988 Worklist.push_back(U); 2989 while (!Worklist.empty()) { 2990 auto *U = Worklist.pop_back_val(); 2991 if (isa<ConstantExpr>(U)) { 2992 for (auto *UU : U->users()) 2993 Worklist.push_back(UU); 2994 continue; 2995 } 2996 2997 auto *I = dyn_cast<Instruction>(U); 2998 if (!I || I->getParent()->getParent() != F) 2999 return false; 3000 } 3001 return true; 3002 } 3003 3004 /// Return true if all users of V are within some (any) function, looking through 3005 /// ConstantExprs. In other words, are there any global constant users? 3006 static bool allUsersAreInFunctions(const Value *V) { 3007 SmallVector<const User*,4> Worklist; 3008 for (auto *U : V->users()) 3009 Worklist.push_back(U); 3010 while (!Worklist.empty()) { 3011 auto *U = Worklist.pop_back_val(); 3012 if (isa<ConstantExpr>(U)) { 3013 for (auto *UU : U->users()) 3014 Worklist.push_back(UU); 3015 continue; 3016 } 3017 3018 if (!isa<Instruction>(U)) 3019 return false; 3020 } 3021 return true; 3022 } 3023 3024 // Return true if T is an integer, float or an array/vector of either. 3025 static bool isSimpleType(Type *T) { 3026 if (T->isIntegerTy() || T->isFloatingPointTy()) 3027 return true; 3028 Type *SubT = nullptr; 3029 if (T->isArrayTy()) 3030 SubT = T->getArrayElementType(); 3031 else if (T->isVectorTy()) 3032 SubT = T->getVectorElementType(); 3033 else 3034 return false; 3035 return SubT->isIntegerTy() || SubT->isFloatingPointTy(); 3036 } 3037 3038 static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG, 3039 EVT PtrVT, SDLoc dl) { 3040 // If we're creating a pool entry for a constant global with unnamed address, 3041 // and the global is small enough, we can emit it inline into the constant pool 3042 // to save ourselves an indirection. 3043 // 3044 // This is a win if the constant is only used in one function (so it doesn't 3045 // need to be duplicated) or duplicating the constant wouldn't increase code 3046 // size (implying the constant is no larger than 4 bytes). 3047 const Function *F = DAG.getMachineFunction().getFunction(); 3048 3049 // We rely on this decision to inline being idemopotent and unrelated to the 3050 // use-site. We know that if we inline a variable at one use site, we'll 3051 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel 3052 // doesn't know about this optimization, so bail out if it's enabled else 3053 // we could decide to inline here (and thus never emit the GV) but require 3054 // the GV from fast-isel generated code. 3055 if (!EnableConstpoolPromotion || 3056 DAG.getMachineFunction().getTarget().Options.EnableFastISel) 3057 return SDValue(); 3058 3059 auto *GVar = dyn_cast<GlobalVariable>(GV); 3060 if (!GVar || !GVar->hasInitializer() || 3061 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() || 3062 !GVar->hasLocalLinkage()) 3063 return SDValue(); 3064 3065 // Ensure that we don't try and inline any type that contains pointers. If 3066 // we inline a value that contains relocations, we move the relocations from 3067 // .data to .text which is not ideal. 3068 auto *Init = GVar->getInitializer(); 3069 if (!isSimpleType(Init->getType())) 3070 return SDValue(); 3071 3072 // The constant islands pass can only really deal with alignment requests 3073 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote 3074 // any type wanting greater alignment requirements than 4 bytes. We also 3075 // can only promote constants that are multiples of 4 bytes in size or 3076 // are paddable to a multiple of 4. Currently we only try and pad constants 3077 // that are strings for simplicity. 3078 auto *CDAInit = dyn_cast<ConstantDataArray>(Init); 3079 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType()); 3080 unsigned Align = DAG.getDataLayout().getABITypeAlignment(Init->getType()); 3081 unsigned RequiredPadding = 4 - (Size % 4); 3082 bool PaddingPossible = 3083 RequiredPadding == 4 || (CDAInit && CDAInit->isString()); 3084 if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize) 3085 return SDValue(); 3086 3087 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding); 3088 MachineFunction &MF = DAG.getMachineFunction(); 3089 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3090 3091 // We can't bloat the constant pool too much, else the ConstantIslands pass 3092 // may fail to converge. If we haven't promoted this global yet (it may have 3093 // multiple uses), and promoting it would increase the constant pool size (Sz 3094 // > 4), ensure we have space to do so up to MaxTotal. 3095 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4) 3096 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >= 3097 ConstpoolPromotionMaxTotal) 3098 return SDValue(); 3099 3100 // This is only valid if all users are in a single function OR it has users 3101 // in multiple functions but it no larger than a pointer. We also check if 3102 // GVar has constant (non-ConstantExpr) users. If so, it essentially has its 3103 // address taken. 3104 if (!allUsersAreInFunction(GVar, F) && 3105 !(Size <= 4 && allUsersAreInFunctions(GVar))) 3106 return SDValue(); 3107 3108 // We're going to inline this global. Pad it out if needed. 3109 if (RequiredPadding != 4) { 3110 StringRef S = CDAInit->getAsString(); 3111 3112 SmallVector<uint8_t,16> V(S.size()); 3113 std::copy(S.bytes_begin(), S.bytes_end(), V.begin()); 3114 while (RequiredPadding--) 3115 V.push_back(0); 3116 Init = ConstantDataArray::get(*DAG.getContext(), V); 3117 } 3118 3119 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init); 3120 SDValue CPAddr = 3121 DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4); 3122 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) { 3123 AFI->markGlobalAsPromotedToConstantPool(GVar); 3124 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() + 3125 PaddedSize - 4); 3126 } 3127 ++NumConstpoolPromoted; 3128 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3129 } 3130 3131 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 3132 SelectionDAG &DAG) const { 3133 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3134 SDLoc dl(Op); 3135 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3136 const TargetMachine &TM = getTargetMachine(); 3137 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 3138 GV = GA->getBaseObject(); 3139 bool IsRO = 3140 (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) || 3141 isa<Function>(GV); 3142 3143 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 3144 if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl)) 3145 return V; 3146 3147 if (isPositionIndependent()) { 3148 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV); 3149 3150 MachineFunction &MF = DAG.getMachineFunction(); 3151 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3152 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3153 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3154 SDLoc dl(Op); 3155 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 3156 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create( 3157 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, 3158 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier, 3159 /*AddCurrentAddress=*/UseGOT_PREL); 3160 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3161 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3162 SDValue Result = DAG.getLoad( 3163 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3164 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3165 SDValue Chain = Result.getValue(1); 3166 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3167 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 3168 if (UseGOT_PREL) 3169 Result = 3170 DAG.getLoad(PtrVT, dl, Chain, Result, 3171 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3172 return Result; 3173 } else if (Subtarget->isROPI() && IsRO) { 3174 // PC-relative. 3175 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT); 3176 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G); 3177 return Result; 3178 } else if (Subtarget->isRWPI() && !IsRO) { 3179 // SB-relative. 3180 ARMConstantPoolValue *CPV = 3181 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL); 3182 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3183 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3184 SDValue G = DAG.getLoad( 3185 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3186 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3187 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT); 3188 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, G); 3189 return Result; 3190 } 3191 3192 // If we have T2 ops, we can materialize the address directly via movt/movw 3193 // pair. This is always cheaper. 3194 if (Subtarget->useMovt(DAG.getMachineFunction())) { 3195 ++NumMovwMovt; 3196 // FIXME: Once remat is capable of dealing with instructions with register 3197 // operands, expand this into two nodes. 3198 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 3199 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 3200 } else { 3201 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 3202 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3203 return DAG.getLoad( 3204 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3205 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3206 } 3207 } 3208 3209 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 3210 SelectionDAG &DAG) const { 3211 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3212 "ROPI/RWPI not currently supported for Darwin"); 3213 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3214 SDLoc dl(Op); 3215 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3216 3217 if (Subtarget->useMovt(DAG.getMachineFunction())) 3218 ++NumMovwMovt; 3219 3220 // FIXME: Once remat is capable of dealing with instructions with register 3221 // operands, expand this into multiple nodes 3222 unsigned Wrapper = 3223 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper; 3224 3225 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 3226 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 3227 3228 if (Subtarget->isGVIndirectSymbol(GV)) 3229 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 3230 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3231 return Result; 3232 } 3233 3234 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 3235 SelectionDAG &DAG) const { 3236 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 3237 assert(Subtarget->useMovt(DAG.getMachineFunction()) && 3238 "Windows on ARM expects to use movw/movt"); 3239 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 3240 "ROPI/RWPI not currently supported for Windows"); 3241 3242 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 3243 const ARMII::TOF TargetFlags = 3244 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG); 3245 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3246 SDValue Result; 3247 SDLoc DL(Op); 3248 3249 ++NumMovwMovt; 3250 3251 // FIXME: Once remat is capable of dealing with instructions with register 3252 // operands, expand this into two nodes. 3253 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 3254 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, 3255 TargetFlags)); 3256 if (GV->hasDLLImportStorageClass()) 3257 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3258 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3259 return Result; 3260 } 3261 3262 SDValue 3263 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 3264 SDLoc dl(Op); 3265 SDValue Val = DAG.getConstant(0, dl, MVT::i32); 3266 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 3267 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 3268 Op.getOperand(1), Val); 3269 } 3270 3271 SDValue 3272 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 3273 SDLoc dl(Op); 3274 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 3275 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 3276 } 3277 3278 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, 3279 SelectionDAG &DAG) const { 3280 SDLoc dl(Op); 3281 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other, 3282 Op.getOperand(0)); 3283 } 3284 3285 SDValue 3286 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 3287 const ARMSubtarget *Subtarget) const { 3288 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3289 SDLoc dl(Op); 3290 switch (IntNo) { 3291 default: return SDValue(); // Don't custom lower most intrinsics. 3292 case Intrinsic::arm_rbit: { 3293 assert(Op.getOperand(1).getValueType() == MVT::i32 && 3294 "RBIT intrinsic must have i32 type!"); 3295 return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1)); 3296 } 3297 case Intrinsic::thread_pointer: { 3298 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3299 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 3300 } 3301 case Intrinsic::eh_sjlj_lsda: { 3302 MachineFunction &MF = DAG.getMachineFunction(); 3303 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3304 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 3305 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3306 SDValue CPAddr; 3307 bool IsPositionIndependent = isPositionIndependent(); 3308 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0; 3309 ARMConstantPoolValue *CPV = 3310 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 3311 ARMCP::CPLSDA, PCAdj); 3312 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 3313 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 3314 SDValue Result = DAG.getLoad( 3315 PtrVT, dl, DAG.getEntryNode(), CPAddr, 3316 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 3317 3318 if (IsPositionIndependent) { 3319 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32); 3320 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 3321 } 3322 return Result; 3323 } 3324 case Intrinsic::arm_neon_vmulls: 3325 case Intrinsic::arm_neon_vmullu: { 3326 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 3327 ? ARMISD::VMULLs : ARMISD::VMULLu; 3328 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3329 Op.getOperand(1), Op.getOperand(2)); 3330 } 3331 case Intrinsic::arm_neon_vminnm: 3332 case Intrinsic::arm_neon_vmaxnm: { 3333 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm) 3334 ? ISD::FMINNUM : ISD::FMAXNUM; 3335 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3336 Op.getOperand(1), Op.getOperand(2)); 3337 } 3338 case Intrinsic::arm_neon_vminu: 3339 case Intrinsic::arm_neon_vmaxu: { 3340 if (Op.getValueType().isFloatingPoint()) 3341 return SDValue(); 3342 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu) 3343 ? ISD::UMIN : ISD::UMAX; 3344 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3345 Op.getOperand(1), Op.getOperand(2)); 3346 } 3347 case Intrinsic::arm_neon_vmins: 3348 case Intrinsic::arm_neon_vmaxs: { 3349 // v{min,max}s is overloaded between signed integers and floats. 3350 if (!Op.getValueType().isFloatingPoint()) { 3351 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3352 ? ISD::SMIN : ISD::SMAX; 3353 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3354 Op.getOperand(1), Op.getOperand(2)); 3355 } 3356 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins) 3357 ? ISD::FMINNAN : ISD::FMAXNAN; 3358 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 3359 Op.getOperand(1), Op.getOperand(2)); 3360 } 3361 } 3362 } 3363 3364 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 3365 const ARMSubtarget *Subtarget) { 3366 // FIXME: handle "fence singlethread" more efficiently. 3367 SDLoc dl(Op); 3368 if (!Subtarget->hasDataBarrier()) { 3369 // Some ARMv6 cpus can support data barriers with an mcr instruction. 3370 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 3371 // here. 3372 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 3373 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 3374 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 3375 DAG.getConstant(0, dl, MVT::i32)); 3376 } 3377 3378 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 3379 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 3380 ARM_MB::MemBOpt Domain = ARM_MB::ISH; 3381 if (Subtarget->isMClass()) { 3382 // Only a full system barrier exists in the M-class architectures. 3383 Domain = ARM_MB::SY; 3384 } else if (Subtarget->preferISHSTBarriers() && 3385 Ord == AtomicOrdering::Release) { 3386 // Swift happens to implement ISHST barriers in a way that's compatible with 3387 // Release semantics but weaker than ISH so we'd be fools not to use 3388 // it. Beware: other processors probably don't! 3389 Domain = ARM_MB::ISHST; 3390 } 3391 3392 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 3393 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32), 3394 DAG.getConstant(Domain, dl, MVT::i32)); 3395 } 3396 3397 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 3398 const ARMSubtarget *Subtarget) { 3399 // ARM pre v5TE and Thumb1 does not have preload instructions. 3400 if (!(Subtarget->isThumb2() || 3401 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 3402 // Just preserve the chain. 3403 return Op.getOperand(0); 3404 3405 SDLoc dl(Op); 3406 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 3407 if (!isRead && 3408 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 3409 // ARMv7 with MP extension has PLDW. 3410 return Op.getOperand(0); 3411 3412 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 3413 if (Subtarget->isThumb()) { 3414 // Invert the bits. 3415 isRead = ~isRead & 1; 3416 isData = ~isData & 1; 3417 } 3418 3419 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 3420 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32), 3421 DAG.getConstant(isData, dl, MVT::i32)); 3422 } 3423 3424 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 3425 MachineFunction &MF = DAG.getMachineFunction(); 3426 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 3427 3428 // vastart just stores the address of the VarArgsFrameIndex slot into the 3429 // memory location argument. 3430 SDLoc dl(Op); 3431 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 3432 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3433 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3434 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3435 MachinePointerInfo(SV)); 3436 } 3437 3438 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, 3439 CCValAssign &NextVA, 3440 SDValue &Root, 3441 SelectionDAG &DAG, 3442 const SDLoc &dl) const { 3443 MachineFunction &MF = DAG.getMachineFunction(); 3444 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3445 3446 const TargetRegisterClass *RC; 3447 if (AFI->isThumb1OnlyFunction()) 3448 RC = &ARM::tGPRRegClass; 3449 else 3450 RC = &ARM::GPRRegClass; 3451 3452 // Transform the arguments stored in physical registers into virtual ones. 3453 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3454 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3455 3456 SDValue ArgValue2; 3457 if (NextVA.isMemLoc()) { 3458 MachineFrameInfo &MFI = MF.getFrameInfo(); 3459 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true); 3460 3461 // Create load node to retrieve arguments from the stack. 3462 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3463 ArgValue2 = DAG.getLoad( 3464 MVT::i32, dl, Root, FIN, 3465 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); 3466 } else { 3467 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 3468 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 3469 } 3470 if (!Subtarget->isLittle()) 3471 std::swap (ArgValue, ArgValue2); 3472 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 3473 } 3474 3475 // The remaining GPRs hold either the beginning of variable-argument 3476 // data, or the beginning of an aggregate passed by value (usually 3477 // byval). Either way, we allocate stack slots adjacent to the data 3478 // provided by our caller, and store the unallocated registers there. 3479 // If this is a variadic function, the va_list pointer will begin with 3480 // these values; otherwise, this reassembles a (byval) structure that 3481 // was split between registers and memory. 3482 // Return: The frame index registers were stored into. 3483 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 3484 const SDLoc &dl, SDValue &Chain, 3485 const Value *OrigArg, 3486 unsigned InRegsParamRecordIdx, 3487 int ArgOffset, unsigned ArgSize) const { 3488 // Currently, two use-cases possible: 3489 // Case #1. Non-var-args function, and we meet first byval parameter. 3490 // Setup first unallocated register as first byval register; 3491 // eat all remained registers 3492 // (these two actions are performed by HandleByVal method). 3493 // Then, here, we initialize stack frame with 3494 // "store-reg" instructions. 3495 // Case #2. Var-args function, that doesn't contain byval parameters. 3496 // The same: eat all remained unallocated registers, 3497 // initialize stack frame. 3498 3499 MachineFunction &MF = DAG.getMachineFunction(); 3500 MachineFrameInfo &MFI = MF.getFrameInfo(); 3501 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3502 unsigned RBegin, REnd; 3503 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 3504 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 3505 } else { 3506 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3507 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx]; 3508 REnd = ARM::R4; 3509 } 3510 3511 if (REnd != RBegin) 3512 ArgOffset = -4 * (ARM::R4 - RBegin); 3513 3514 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3515 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false); 3516 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT); 3517 3518 SmallVector<SDValue, 4> MemOps; 3519 const TargetRegisterClass *RC = 3520 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 3521 3522 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) { 3523 unsigned VReg = MF.addLiveIn(Reg, RC); 3524 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3525 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3526 MachinePointerInfo(OrigArg, 4 * i)); 3527 MemOps.push_back(Store); 3528 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT)); 3529 } 3530 3531 if (!MemOps.empty()) 3532 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3533 return FrameIndex; 3534 } 3535 3536 // Setup stack frame, the va_list pointer will start from. 3537 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 3538 const SDLoc &dl, SDValue &Chain, 3539 unsigned ArgOffset, 3540 unsigned TotalArgRegsSaveSize, 3541 bool ForceMutable) const { 3542 MachineFunction &MF = DAG.getMachineFunction(); 3543 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3544 3545 // Try to store any remaining integer argument regs 3546 // to their spots on the stack so that they may be loaded by dereferencing 3547 // the result of va_next. 3548 // If there is no regs to be stored, just point address after last 3549 // argument passed via stack. 3550 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 3551 CCInfo.getInRegsParamsCount(), 3552 CCInfo.getNextStackOffset(), 4); 3553 AFI->setVarArgsFrameIndex(FrameIndex); 3554 } 3555 3556 SDValue ARMTargetLowering::LowerFormalArguments( 3557 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3558 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3559 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3560 MachineFunction &MF = DAG.getMachineFunction(); 3561 MachineFrameInfo &MFI = MF.getFrameInfo(); 3562 3563 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 3564 3565 // Assign locations to all of the incoming arguments. 3566 SmallVector<CCValAssign, 16> ArgLocs; 3567 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3568 *DAG.getContext(), Prologue); 3569 CCInfo.AnalyzeFormalArguments(Ins, 3570 CCAssignFnForNode(CallConv, /* Return*/ false, 3571 isVarArg)); 3572 3573 SmallVector<SDValue, 16> ArgValues; 3574 SDValue ArgValue; 3575 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 3576 unsigned CurArgIdx = 0; 3577 3578 // Initially ArgRegsSaveSize is zero. 3579 // Then we increase this value each time we meet byval parameter. 3580 // We also increase this value in case of varargs function. 3581 AFI->setArgRegsSaveSize(0); 3582 3583 // Calculate the amount of stack space that we need to allocate to store 3584 // byval and variadic arguments that are passed in registers. 3585 // We need to know this before we allocate the first byval or variadic 3586 // argument, as they will be allocated a stack slot below the CFA (Canonical 3587 // Frame Address, the stack pointer at entry to the function). 3588 unsigned ArgRegBegin = ARM::R4; 3589 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3590 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount()) 3591 break; 3592 3593 CCValAssign &VA = ArgLocs[i]; 3594 unsigned Index = VA.getValNo(); 3595 ISD::ArgFlagsTy Flags = Ins[Index].Flags; 3596 if (!Flags.isByVal()) 3597 continue; 3598 3599 assert(VA.isMemLoc() && "unexpected byval pointer in reg"); 3600 unsigned RBegin, REnd; 3601 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd); 3602 ArgRegBegin = std::min(ArgRegBegin, RBegin); 3603 3604 CCInfo.nextInRegsParam(); 3605 } 3606 CCInfo.rewindByValRegsInfo(); 3607 3608 int lastInsIndex = -1; 3609 if (isVarArg && MFI.hasVAStart()) { 3610 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs); 3611 if (RegIdx != array_lengthof(GPRArgRegs)) 3612 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]); 3613 } 3614 3615 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin); 3616 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize); 3617 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3618 3619 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3620 CCValAssign &VA = ArgLocs[i]; 3621 if (Ins[VA.getValNo()].isOrigArg()) { 3622 std::advance(CurOrigArg, 3623 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx); 3624 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex(); 3625 } 3626 // Arguments stored in registers. 3627 if (VA.isRegLoc()) { 3628 EVT RegVT = VA.getLocVT(); 3629 3630 if (VA.needsCustom()) { 3631 // f64 and vector types are split up into multiple registers or 3632 // combinations of registers and stack slots. 3633 if (VA.getLocVT() == MVT::v2f64) { 3634 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 3635 Chain, DAG, dl); 3636 VA = ArgLocs[++i]; // skip ahead to next loc 3637 SDValue ArgValue2; 3638 if (VA.isMemLoc()) { 3639 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true); 3640 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3641 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 3642 MachinePointerInfo::getFixedStack( 3643 DAG.getMachineFunction(), FI)); 3644 } else { 3645 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3646 Chain, DAG, dl); 3647 } 3648 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3649 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3650 ArgValue, ArgValue1, 3651 DAG.getIntPtrConstant(0, dl)); 3652 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3653 ArgValue, ArgValue2, 3654 DAG.getIntPtrConstant(1, dl)); 3655 } else 3656 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3657 3658 } else { 3659 const TargetRegisterClass *RC; 3660 3661 if (RegVT == MVT::f32) 3662 RC = &ARM::SPRRegClass; 3663 else if (RegVT == MVT::f64) 3664 RC = &ARM::DPRRegClass; 3665 else if (RegVT == MVT::v2f64) 3666 RC = &ARM::QPRRegClass; 3667 else if (RegVT == MVT::i32) 3668 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass 3669 : &ARM::GPRRegClass; 3670 else 3671 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3672 3673 // Transform the arguments in physical registers into virtual ones. 3674 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3675 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3676 } 3677 3678 // If this is an 8 or 16-bit value, it is really passed promoted 3679 // to 32 bits. Insert an assert[sz]ext to capture this, then 3680 // truncate to the right size. 3681 switch (VA.getLocInfo()) { 3682 default: llvm_unreachable("Unknown loc info!"); 3683 case CCValAssign::Full: break; 3684 case CCValAssign::BCvt: 3685 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3686 break; 3687 case CCValAssign::SExt: 3688 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3689 DAG.getValueType(VA.getValVT())); 3690 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3691 break; 3692 case CCValAssign::ZExt: 3693 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3694 DAG.getValueType(VA.getValVT())); 3695 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3696 break; 3697 } 3698 3699 InVals.push_back(ArgValue); 3700 3701 } else { // VA.isRegLoc() 3702 3703 // sanity check 3704 assert(VA.isMemLoc()); 3705 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3706 3707 int index = VA.getValNo(); 3708 3709 // Some Ins[] entries become multiple ArgLoc[] entries. 3710 // Process them only once. 3711 if (index != lastInsIndex) 3712 { 3713 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3714 // FIXME: For now, all byval parameter objects are marked mutable. 3715 // This can be changed with more analysis. 3716 // In case of tail call optimization mark all arguments mutable. 3717 // Since they could be overwritten by lowering of arguments in case of 3718 // a tail call. 3719 if (Flags.isByVal()) { 3720 assert(Ins[index].isOrigArg() && 3721 "Byval arguments cannot be implicit"); 3722 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed(); 3723 3724 int FrameIndex = StoreByValRegs( 3725 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex, 3726 VA.getLocMemOffset(), Flags.getByValSize()); 3727 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT)); 3728 CCInfo.nextInRegsParam(); 3729 } else { 3730 unsigned FIOffset = VA.getLocMemOffset(); 3731 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3732 FIOffset, true); 3733 3734 // Create load nodes to retrieve arguments from the stack. 3735 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3736 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 3737 MachinePointerInfo::getFixedStack( 3738 DAG.getMachineFunction(), FI))); 3739 } 3740 lastInsIndex = index; 3741 } 3742 } 3743 } 3744 3745 // varargs 3746 if (isVarArg && MFI.hasVAStart()) 3747 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3748 CCInfo.getNextStackOffset(), 3749 TotalArgRegsSaveSize); 3750 3751 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 3752 3753 return Chain; 3754 } 3755 3756 /// isFloatingPointZero - Return true if this is +0.0. 3757 static bool isFloatingPointZero(SDValue Op) { 3758 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3759 return CFP->getValueAPF().isPosZero(); 3760 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3761 // Maybe this has already been legalized into the constant pool? 3762 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3763 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3764 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3765 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3766 return CFP->getValueAPF().isPosZero(); 3767 } 3768 } else if (Op->getOpcode() == ISD::BITCAST && 3769 Op->getValueType(0) == MVT::f64) { 3770 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64) 3771 // created by LowerConstantFP(). 3772 SDValue BitcastOp = Op->getOperand(0); 3773 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM && 3774 isNullConstant(BitcastOp->getOperand(0))) 3775 return true; 3776 } 3777 return false; 3778 } 3779 3780 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3781 /// the given operands. 3782 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3783 SDValue &ARMcc, SelectionDAG &DAG, 3784 const SDLoc &dl) const { 3785 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3786 unsigned C = RHSC->getZExtValue(); 3787 if (!isLegalICmpImmediate(C)) { 3788 // Constant does not fit, try adjusting it by one? 3789 switch (CC) { 3790 default: break; 3791 case ISD::SETLT: 3792 case ISD::SETGE: 3793 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3794 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3795 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3796 } 3797 break; 3798 case ISD::SETULT: 3799 case ISD::SETUGE: 3800 if (C != 0 && isLegalICmpImmediate(C-1)) { 3801 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3802 RHS = DAG.getConstant(C - 1, dl, MVT::i32); 3803 } 3804 break; 3805 case ISD::SETLE: 3806 case ISD::SETGT: 3807 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3808 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3809 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3810 } 3811 break; 3812 case ISD::SETULE: 3813 case ISD::SETUGT: 3814 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3815 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3816 RHS = DAG.getConstant(C + 1, dl, MVT::i32); 3817 } 3818 break; 3819 } 3820 } 3821 } 3822 3823 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3824 ARMISD::NodeType CompareType; 3825 switch (CondCode) { 3826 default: 3827 CompareType = ARMISD::CMP; 3828 break; 3829 case ARMCC::EQ: 3830 case ARMCC::NE: 3831 // Uses only Z Flag 3832 CompareType = ARMISD::CMPZ; 3833 break; 3834 } 3835 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 3836 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3837 } 3838 3839 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3840 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, 3841 SelectionDAG &DAG, const SDLoc &dl) const { 3842 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); 3843 SDValue Cmp; 3844 if (!isFloatingPointZero(RHS)) 3845 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3846 else 3847 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3848 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3849 } 3850 3851 /// duplicateCmp - Glue values can have only one use, so this function 3852 /// duplicates a comparison node. 3853 SDValue 3854 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3855 unsigned Opc = Cmp.getOpcode(); 3856 SDLoc DL(Cmp); 3857 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3858 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3859 3860 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3861 Cmp = Cmp.getOperand(0); 3862 Opc = Cmp.getOpcode(); 3863 if (Opc == ARMISD::CMPFP) 3864 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3865 else { 3866 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3867 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3868 } 3869 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3870 } 3871 3872 std::pair<SDValue, SDValue> 3873 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 3874 SDValue &ARMcc) const { 3875 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 3876 3877 SDValue Value, OverflowCmp; 3878 SDValue LHS = Op.getOperand(0); 3879 SDValue RHS = Op.getOperand(1); 3880 SDLoc dl(Op); 3881 3882 // FIXME: We are currently always generating CMPs because we don't support 3883 // generating CMN through the backend. This is not as good as the natural 3884 // CMP case because it causes a register dependency and cannot be folded 3885 // later. 3886 3887 switch (Op.getOpcode()) { 3888 default: 3889 llvm_unreachable("Unknown overflow instruction!"); 3890 case ISD::SADDO: 3891 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3892 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3893 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3894 break; 3895 case ISD::UADDO: 3896 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3897 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS); 3898 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS); 3899 break; 3900 case ISD::SSUBO: 3901 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32); 3902 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3903 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3904 break; 3905 case ISD::USUBO: 3906 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32); 3907 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS); 3908 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS); 3909 break; 3910 } // switch (...) 3911 3912 return std::make_pair(Value, OverflowCmp); 3913 } 3914 3915 3916 SDValue 3917 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 3918 // Let legalize expand this if it isn't a legal type yet. 3919 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 3920 return SDValue(); 3921 3922 SDValue Value, OverflowCmp; 3923 SDValue ARMcc; 3924 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 3925 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3926 SDLoc dl(Op); 3927 // We use 0 and 1 as false and true values. 3928 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 3929 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 3930 EVT VT = Op.getValueType(); 3931 3932 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal, 3933 ARMcc, CCR, OverflowCmp); 3934 3935 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 3936 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 3937 } 3938 3939 3940 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3941 SDValue Cond = Op.getOperand(0); 3942 SDValue SelectTrue = Op.getOperand(1); 3943 SDValue SelectFalse = Op.getOperand(2); 3944 SDLoc dl(Op); 3945 unsigned Opc = Cond.getOpcode(); 3946 3947 if (Cond.getResNo() == 1 && 3948 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 3949 Opc == ISD::USUBO)) { 3950 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 3951 return SDValue(); 3952 3953 SDValue Value, OverflowCmp; 3954 SDValue ARMcc; 3955 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 3956 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3957 EVT VT = Op.getValueType(); 3958 3959 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR, 3960 OverflowCmp, DAG); 3961 } 3962 3963 // Convert: 3964 // 3965 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3966 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3967 // 3968 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3969 const ConstantSDNode *CMOVTrue = 3970 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3971 const ConstantSDNode *CMOVFalse = 3972 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3973 3974 if (CMOVTrue && CMOVFalse) { 3975 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3976 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3977 3978 SDValue True; 3979 SDValue False; 3980 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3981 True = SelectTrue; 3982 False = SelectFalse; 3983 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3984 True = SelectFalse; 3985 False = SelectTrue; 3986 } 3987 3988 if (True.getNode() && False.getNode()) { 3989 EVT VT = Op.getValueType(); 3990 SDValue ARMcc = Cond.getOperand(2); 3991 SDValue CCR = Cond.getOperand(3); 3992 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3993 assert(True.getValueType() == VT); 3994 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG); 3995 } 3996 } 3997 } 3998 3999 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 4000 // undefined bits before doing a full-word comparison with zero. 4001 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 4002 DAG.getConstant(1, dl, Cond.getValueType())); 4003 4004 return DAG.getSelectCC(dl, Cond, 4005 DAG.getConstant(0, dl, Cond.getValueType()), 4006 SelectTrue, SelectFalse, ISD::SETNE); 4007 } 4008 4009 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 4010 bool &swpCmpOps, bool &swpVselOps) { 4011 // Start by selecting the GE condition code for opcodes that return true for 4012 // 'equality' 4013 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 4014 CC == ISD::SETULE) 4015 CondCode = ARMCC::GE; 4016 4017 // and GT for opcodes that return false for 'equality'. 4018 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 4019 CC == ISD::SETULT) 4020 CondCode = ARMCC::GT; 4021 4022 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 4023 // to swap the compare operands. 4024 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 4025 CC == ISD::SETULT) 4026 swpCmpOps = true; 4027 4028 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 4029 // If we have an unordered opcode, we need to swap the operands to the VSEL 4030 // instruction (effectively negating the condition). 4031 // 4032 // This also has the effect of swapping which one of 'less' or 'greater' 4033 // returns true, so we also swap the compare operands. It also switches 4034 // whether we return true for 'equality', so we compensate by picking the 4035 // opposite condition code to our original choice. 4036 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 4037 CC == ISD::SETUGT) { 4038 swpCmpOps = !swpCmpOps; 4039 swpVselOps = !swpVselOps; 4040 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 4041 } 4042 4043 // 'ordered' is 'anything but unordered', so use the VS condition code and 4044 // swap the VSEL operands. 4045 if (CC == ISD::SETO) { 4046 CondCode = ARMCC::VS; 4047 swpVselOps = true; 4048 } 4049 4050 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 4051 // code and swap the VSEL operands. 4052 if (CC == ISD::SETUNE) { 4053 CondCode = ARMCC::EQ; 4054 swpVselOps = true; 4055 } 4056 } 4057 4058 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, 4059 SDValue TrueVal, SDValue ARMcc, SDValue CCR, 4060 SDValue Cmp, SelectionDAG &DAG) const { 4061 if (Subtarget->isFPOnlySP() && VT == MVT::f64) { 4062 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4063 DAG.getVTList(MVT::i32, MVT::i32), FalseVal); 4064 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, 4065 DAG.getVTList(MVT::i32, MVT::i32), TrueVal); 4066 4067 SDValue TrueLow = TrueVal.getValue(0); 4068 SDValue TrueHigh = TrueVal.getValue(1); 4069 SDValue FalseLow = FalseVal.getValue(0); 4070 SDValue FalseHigh = FalseVal.getValue(1); 4071 4072 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow, 4073 ARMcc, CCR, Cmp); 4074 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh, 4075 ARMcc, CCR, duplicateCmp(Cmp, DAG)); 4076 4077 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High); 4078 } else { 4079 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 4080 Cmp); 4081 } 4082 } 4083 4084 static bool isGTorGE(ISD::CondCode CC) { 4085 return CC == ISD::SETGT || CC == ISD::SETGE; 4086 } 4087 4088 static bool isLTorLE(ISD::CondCode CC) { 4089 return CC == ISD::SETLT || CC == ISD::SETLE; 4090 } 4091 4092 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating. 4093 // All of these conditions (and their <= and >= counterparts) will do: 4094 // x < k ? k : x 4095 // x > k ? x : k 4096 // k < x ? x : k 4097 // k > x ? k : x 4098 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, 4099 const SDValue TrueVal, const SDValue FalseVal, 4100 const ISD::CondCode CC, const SDValue K) { 4101 return (isGTorGE(CC) && 4102 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) || 4103 (isLTorLE(CC) && 4104 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))); 4105 } 4106 4107 // Similar to isLowerSaturate(), but checks for upper-saturating conditions. 4108 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, 4109 const SDValue TrueVal, const SDValue FalseVal, 4110 const ISD::CondCode CC, const SDValue K) { 4111 return (isGTorGE(CC) && 4112 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) || 4113 (isLTorLE(CC) && 4114 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))); 4115 } 4116 4117 // Check if two chained conditionals could be converted into SSAT. 4118 // 4119 // SSAT can replace a set of two conditional selectors that bound a number to an 4120 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples: 4121 // 4122 // x < -k ? -k : (x > k ? k : x) 4123 // x < -k ? -k : (x < k ? x : k) 4124 // x > -k ? (x > k ? k : x) : -k 4125 // x < k ? (x < -k ? -k : x) : k 4126 // etc. 4127 // 4128 // It returns true if the conversion can be done, false otherwise. 4129 // Additionally, the variable is returned in parameter V and the constant in K. 4130 static bool isSaturatingConditional(const SDValue &Op, SDValue &V, 4131 uint64_t &K) { 4132 4133 SDValue LHS1 = Op.getOperand(0); 4134 SDValue RHS1 = Op.getOperand(1); 4135 SDValue TrueVal1 = Op.getOperand(2); 4136 SDValue FalseVal1 = Op.getOperand(3); 4137 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4138 4139 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1; 4140 if (Op2.getOpcode() != ISD::SELECT_CC) 4141 return false; 4142 4143 SDValue LHS2 = Op2.getOperand(0); 4144 SDValue RHS2 = Op2.getOperand(1); 4145 SDValue TrueVal2 = Op2.getOperand(2); 4146 SDValue FalseVal2 = Op2.getOperand(3); 4147 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get(); 4148 4149 // Find out which are the constants and which are the variables 4150 // in each conditional 4151 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1) 4152 ? &RHS1 4153 : NULL; 4154 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2) 4155 ? &RHS2 4156 : NULL; 4157 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2; 4158 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1; 4159 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2; 4160 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2; 4161 4162 // We must detect cases where the original operations worked with 16- or 4163 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations 4164 // must work with sign-extended values but the select operations return 4165 // the original non-extended value. 4166 SDValue V2TmpReg = V2Tmp; 4167 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG) 4168 V2TmpReg = V2Tmp->getOperand(0); 4169 4170 // Check that the registers and the constants have the correct values 4171 // in both conditionals 4172 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp || 4173 V2TmpReg != V2) 4174 return false; 4175 4176 // Figure out which conditional is saturating the lower/upper bound. 4177 const SDValue *LowerCheckOp = 4178 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 4179 ? &Op 4180 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2 4181 : NULL; 4182 const SDValue *UpperCheckOp = 4183 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1) 4184 ? &Op 4185 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2 4186 : NULL; 4187 4188 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp) 4189 return false; 4190 4191 // Check that the constant in the lower-bound check is 4192 // the opposite of the constant in the upper-bound check 4193 // in 1's complement. 4194 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue(); 4195 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue(); 4196 int64_t PosVal = std::max(Val1, Val2); 4197 4198 if (((Val1 > Val2 && UpperCheckOp == &Op) || 4199 (Val1 < Val2 && UpperCheckOp == &Op2)) && 4200 Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) { 4201 4202 V = V2; 4203 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive 4204 return true; 4205 } 4206 4207 return false; 4208 } 4209 4210 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 4211 4212 EVT VT = Op.getValueType(); 4213 SDLoc dl(Op); 4214 4215 // Try to convert two saturating conditional selects into a single SSAT 4216 SDValue SatValue; 4217 uint64_t SatConstant; 4218 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) && 4219 isSaturatingConditional(Op, SatValue, SatConstant)) 4220 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue, 4221 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT)); 4222 4223 SDValue LHS = Op.getOperand(0); 4224 SDValue RHS = Op.getOperand(1); 4225 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4226 SDValue TrueVal = Op.getOperand(2); 4227 SDValue FalseVal = Op.getOperand(3); 4228 4229 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 4230 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 4231 dl); 4232 4233 // If softenSetCCOperands only returned one value, we should compare it to 4234 // zero. 4235 if (!RHS.getNode()) { 4236 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4237 CC = ISD::SETNE; 4238 } 4239 } 4240 4241 if (LHS.getValueType() == MVT::i32) { 4242 // Try to generate VSEL on ARMv8. 4243 // The VSEL instruction can't use all the usual ARM condition 4244 // codes: it only has two bits to select the condition code, so it's 4245 // constrained to use only GE, GT, VS and EQ. 4246 // 4247 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 4248 // swap the operands of the previous compare instruction (effectively 4249 // inverting the compare condition, swapping 'less' and 'greater') and 4250 // sometimes need to swap the operands to the VSEL (which inverts the 4251 // condition in the sense of firing whenever the previous condition didn't) 4252 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 4253 TrueVal.getValueType() == MVT::f64)) { 4254 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4255 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 4256 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 4257 CC = ISD::getSetCCInverse(CC, true); 4258 std::swap(TrueVal, FalseVal); 4259 } 4260 } 4261 4262 SDValue ARMcc; 4263 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4264 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4265 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 4266 } 4267 4268 ARMCC::CondCodes CondCode, CondCode2; 4269 FPCCToARMCC(CC, CondCode, CondCode2); 4270 4271 // Try to generate VMAXNM/VMINNM on ARMv8. 4272 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 4273 TrueVal.getValueType() == MVT::f64)) { 4274 bool swpCmpOps = false; 4275 bool swpVselOps = false; 4276 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 4277 4278 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 4279 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 4280 if (swpCmpOps) 4281 std::swap(LHS, RHS); 4282 if (swpVselOps) 4283 std::swap(TrueVal, FalseVal); 4284 } 4285 } 4286 4287 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4288 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 4289 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4290 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); 4291 if (CondCode2 != ARMCC::AL) { 4292 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); 4293 // FIXME: Needs another CMP because flag can have but one use. 4294 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 4295 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); 4296 } 4297 return Result; 4298 } 4299 4300 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 4301 /// to morph to an integer compare sequence. 4302 static bool canChangeToInt(SDValue Op, bool &SeenZero, 4303 const ARMSubtarget *Subtarget) { 4304 SDNode *N = Op.getNode(); 4305 if (!N->hasOneUse()) 4306 // Otherwise it requires moving the value from fp to integer registers. 4307 return false; 4308 if (!N->getNumValues()) 4309 return false; 4310 EVT VT = Op.getValueType(); 4311 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 4312 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 4313 // vmrs are very slow, e.g. cortex-a8. 4314 return false; 4315 4316 if (isFloatingPointZero(Op)) { 4317 SeenZero = true; 4318 return true; 4319 } 4320 return ISD::isNormalLoad(N); 4321 } 4322 4323 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 4324 if (isFloatingPointZero(Op)) 4325 return DAG.getConstant(0, SDLoc(Op), MVT::i32); 4326 4327 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 4328 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), 4329 Ld->getPointerInfo(), Ld->getAlignment(), 4330 Ld->getMemOperand()->getFlags()); 4331 4332 llvm_unreachable("Unknown VFP cmp argument!"); 4333 } 4334 4335 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 4336 SDValue &RetVal1, SDValue &RetVal2) { 4337 SDLoc dl(Op); 4338 4339 if (isFloatingPointZero(Op)) { 4340 RetVal1 = DAG.getConstant(0, dl, MVT::i32); 4341 RetVal2 = DAG.getConstant(0, dl, MVT::i32); 4342 return; 4343 } 4344 4345 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 4346 SDValue Ptr = Ld->getBasePtr(); 4347 RetVal1 = 4348 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), 4349 Ld->getAlignment(), Ld->getMemOperand()->getFlags()); 4350 4351 EVT PtrType = Ptr.getValueType(); 4352 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 4353 SDValue NewPtr = DAG.getNode(ISD::ADD, dl, 4354 PtrType, Ptr, DAG.getConstant(4, dl, PtrType)); 4355 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr, 4356 Ld->getPointerInfo().getWithOffset(4), NewAlign, 4357 Ld->getMemOperand()->getFlags()); 4358 return; 4359 } 4360 4361 llvm_unreachable("Unknown VFP cmp argument!"); 4362 } 4363 4364 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 4365 /// f32 and even f64 comparisons to integer ones. 4366 SDValue 4367 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 4368 SDValue Chain = Op.getOperand(0); 4369 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 4370 SDValue LHS = Op.getOperand(2); 4371 SDValue RHS = Op.getOperand(3); 4372 SDValue Dest = Op.getOperand(4); 4373 SDLoc dl(Op); 4374 4375 bool LHSSeenZero = false; 4376 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 4377 bool RHSSeenZero = false; 4378 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 4379 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 4380 // If unsafe fp math optimization is enabled and there are no other uses of 4381 // the CMP operands, and the condition code is EQ or NE, we can optimize it 4382 // to an integer comparison. 4383 if (CC == ISD::SETOEQ) 4384 CC = ISD::SETEQ; 4385 else if (CC == ISD::SETUNE) 4386 CC = ISD::SETNE; 4387 4388 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32); 4389 SDValue ARMcc; 4390 if (LHS.getValueType() == MVT::f32) { 4391 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 4392 bitcastf32Toi32(LHS, DAG), Mask); 4393 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 4394 bitcastf32Toi32(RHS, DAG), Mask); 4395 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4396 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4397 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 4398 Chain, Dest, ARMcc, CCR, Cmp); 4399 } 4400 4401 SDValue LHS1, LHS2; 4402 SDValue RHS1, RHS2; 4403 expandf64Toi32(LHS, DAG, LHS1, LHS2); 4404 expandf64Toi32(RHS, DAG, RHS1, RHS2); 4405 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 4406 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 4407 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 4408 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4409 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 4410 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 4411 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 4412 } 4413 4414 return SDValue(); 4415 } 4416 4417 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 4418 SDValue Chain = Op.getOperand(0); 4419 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 4420 SDValue LHS = Op.getOperand(2); 4421 SDValue RHS = Op.getOperand(3); 4422 SDValue Dest = Op.getOperand(4); 4423 SDLoc dl(Op); 4424 4425 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { 4426 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, 4427 dl); 4428 4429 // If softenSetCCOperands only returned one value, we should compare it to 4430 // zero. 4431 if (!RHS.getNode()) { 4432 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4433 CC = ISD::SETNE; 4434 } 4435 } 4436 4437 if (LHS.getValueType() == MVT::i32) { 4438 SDValue ARMcc; 4439 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 4440 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4441 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 4442 Chain, Dest, ARMcc, CCR, Cmp); 4443 } 4444 4445 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 4446 4447 if (getTargetMachine().Options.UnsafeFPMath && 4448 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 4449 CC == ISD::SETNE || CC == ISD::SETUNE)) { 4450 if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) 4451 return Result; 4452 } 4453 4454 ARMCC::CondCodes CondCode, CondCode2; 4455 FPCCToARMCC(CC, CondCode, CondCode2); 4456 4457 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); 4458 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 4459 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4460 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 4461 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 4462 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 4463 if (CondCode2 != ARMCC::AL) { 4464 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32); 4465 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 4466 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 4467 } 4468 return Res; 4469 } 4470 4471 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 4472 SDValue Chain = Op.getOperand(0); 4473 SDValue Table = Op.getOperand(1); 4474 SDValue Index = Op.getOperand(2); 4475 SDLoc dl(Op); 4476 4477 EVT PTy = getPointerTy(DAG.getDataLayout()); 4478 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 4479 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 4480 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); 4481 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); 4482 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 4483 if (Subtarget->isThumb2()) { 4484 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 4485 // which does another jump to the destination. This also makes it easier 4486 // to translate it to TBB / TBH later. 4487 // FIXME: This might not work if the function is extremely large. 4488 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 4489 Addr, Op.getOperand(2), JTI); 4490 } 4491 if (isPositionIndependent() || Subtarget->isROPI()) { 4492 Addr = 4493 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 4494 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 4495 Chain = Addr.getValue(1); 4496 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 4497 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4498 } else { 4499 Addr = 4500 DAG.getLoad(PTy, dl, Chain, Addr, 4501 MachinePointerInfo::getJumpTable(DAG.getMachineFunction())); 4502 Chain = Addr.getValue(1); 4503 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI); 4504 } 4505 } 4506 4507 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 4508 EVT VT = Op.getValueType(); 4509 SDLoc dl(Op); 4510 4511 if (Op.getValueType().getVectorElementType() == MVT::i32) { 4512 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 4513 return Op; 4514 return DAG.UnrollVectorOp(Op.getNode()); 4515 } 4516 4517 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 4518 "Invalid type for custom lowering!"); 4519 if (VT != MVT::v4i16) 4520 return DAG.UnrollVectorOp(Op.getNode()); 4521 4522 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 4523 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 4524 } 4525 4526 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { 4527 EVT VT = Op.getValueType(); 4528 if (VT.isVector()) 4529 return LowerVectorFP_TO_INT(Op, DAG); 4530 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { 4531 RTLIB::Libcall LC; 4532 if (Op.getOpcode() == ISD::FP_TO_SINT) 4533 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), 4534 Op.getValueType()); 4535 else 4536 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), 4537 Op.getValueType()); 4538 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4539 /*isSigned*/ false, SDLoc(Op)).first; 4540 } 4541 4542 return Op; 4543 } 4544 4545 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 4546 EVT VT = Op.getValueType(); 4547 SDLoc dl(Op); 4548 4549 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 4550 if (VT.getVectorElementType() == MVT::f32) 4551 return Op; 4552 return DAG.UnrollVectorOp(Op.getNode()); 4553 } 4554 4555 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 4556 "Invalid type for custom lowering!"); 4557 if (VT != MVT::v4f32) 4558 return DAG.UnrollVectorOp(Op.getNode()); 4559 4560 unsigned CastOpc; 4561 unsigned Opc; 4562 switch (Op.getOpcode()) { 4563 default: llvm_unreachable("Invalid opcode!"); 4564 case ISD::SINT_TO_FP: 4565 CastOpc = ISD::SIGN_EXTEND; 4566 Opc = ISD::SINT_TO_FP; 4567 break; 4568 case ISD::UINT_TO_FP: 4569 CastOpc = ISD::ZERO_EXTEND; 4570 Opc = ISD::UINT_TO_FP; 4571 break; 4572 } 4573 4574 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 4575 return DAG.getNode(Opc, dl, VT, Op); 4576 } 4577 4578 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { 4579 EVT VT = Op.getValueType(); 4580 if (VT.isVector()) 4581 return LowerVectorINT_TO_FP(Op, DAG); 4582 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { 4583 RTLIB::Libcall LC; 4584 if (Op.getOpcode() == ISD::SINT_TO_FP) 4585 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), 4586 Op.getValueType()); 4587 else 4588 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), 4589 Op.getValueType()); 4590 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0), 4591 /*isSigned*/ false, SDLoc(Op)).first; 4592 } 4593 4594 return Op; 4595 } 4596 4597 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 4598 // Implement fcopysign with a fabs and a conditional fneg. 4599 SDValue Tmp0 = Op.getOperand(0); 4600 SDValue Tmp1 = Op.getOperand(1); 4601 SDLoc dl(Op); 4602 EVT VT = Op.getValueType(); 4603 EVT SrcVT = Tmp1.getValueType(); 4604 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 4605 Tmp0.getOpcode() == ARMISD::VMOVDRR; 4606 bool UseNEON = !InGPR && Subtarget->hasNEON(); 4607 4608 if (UseNEON) { 4609 // Use VBSL to copy the sign bit. 4610 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 4611 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 4612 DAG.getTargetConstant(EncodedVal, dl, MVT::i32)); 4613 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 4614 if (VT == MVT::f64) 4615 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4616 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 4617 DAG.getConstant(32, dl, MVT::i32)); 4618 else /*if (VT == MVT::f32)*/ 4619 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 4620 if (SrcVT == MVT::f32) { 4621 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 4622 if (VT == MVT::f64) 4623 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 4624 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 4625 DAG.getConstant(32, dl, MVT::i32)); 4626 } else if (VT == MVT::f32) 4627 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 4628 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 4629 DAG.getConstant(32, dl, MVT::i32)); 4630 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 4631 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 4632 4633 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 4634 dl, MVT::i32); 4635 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 4636 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 4637 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 4638 4639 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 4640 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 4641 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 4642 if (VT == MVT::f32) { 4643 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 4644 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 4645 DAG.getConstant(0, dl, MVT::i32)); 4646 } else { 4647 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 4648 } 4649 4650 return Res; 4651 } 4652 4653 // Bitcast operand 1 to i32. 4654 if (SrcVT == MVT::f64) 4655 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4656 Tmp1).getValue(1); 4657 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 4658 4659 // Or in the signbit with integer operations. 4660 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32); 4661 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32); 4662 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 4663 if (VT == MVT::f32) { 4664 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 4665 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 4666 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4667 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 4668 } 4669 4670 // f64: Or the high part with signbit and then combine two parts. 4671 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 4672 Tmp0); 4673 SDValue Lo = Tmp0.getValue(0); 4674 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 4675 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 4676 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 4677 } 4678 4679 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 4680 MachineFunction &MF = DAG.getMachineFunction(); 4681 MachineFrameInfo &MFI = MF.getFrameInfo(); 4682 MFI.setReturnAddressIsTaken(true); 4683 4684 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 4685 return SDValue(); 4686 4687 EVT VT = Op.getValueType(); 4688 SDLoc dl(Op); 4689 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4690 if (Depth) { 4691 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 4692 SDValue Offset = DAG.getConstant(4, dl, MVT::i32); 4693 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 4694 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 4695 MachinePointerInfo()); 4696 } 4697 4698 // Return LR, which contains the return address. Mark it an implicit live-in. 4699 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 4700 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 4701 } 4702 4703 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 4704 const ARMBaseRegisterInfo &ARI = 4705 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 4706 MachineFunction &MF = DAG.getMachineFunction(); 4707 MachineFrameInfo &MFI = MF.getFrameInfo(); 4708 MFI.setFrameAddressIsTaken(true); 4709 4710 EVT VT = Op.getValueType(); 4711 SDLoc dl(Op); // FIXME probably not meaningful 4712 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4713 unsigned FrameReg = ARI.getFrameRegister(MF); 4714 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 4715 while (Depth--) 4716 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 4717 MachinePointerInfo()); 4718 return FrameAddr; 4719 } 4720 4721 // FIXME? Maybe this could be a TableGen attribute on some registers and 4722 // this table could be generated automatically from RegInfo. 4723 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT, 4724 SelectionDAG &DAG) const { 4725 unsigned Reg = StringSwitch<unsigned>(RegName) 4726 .Case("sp", ARM::SP) 4727 .Default(0); 4728 if (Reg) 4729 return Reg; 4730 report_fatal_error(Twine("Invalid register name \"" 4731 + StringRef(RegName) + "\".")); 4732 } 4733 4734 // Result is 64 bit value so split into two 32 bit values and return as a 4735 // pair of values. 4736 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results, 4737 SelectionDAG &DAG) { 4738 SDLoc DL(N); 4739 4740 // This function is only supposed to be called for i64 type destination. 4741 assert(N->getValueType(0) == MVT::i64 4742 && "ExpandREAD_REGISTER called for non-i64 type result."); 4743 4744 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, 4745 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), 4746 N->getOperand(0), 4747 N->getOperand(1)); 4748 4749 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), 4750 Read.getValue(1))); 4751 Results.push_back(Read.getOperand(0)); 4752 } 4753 4754 /// \p BC is a bitcast that is about to be turned into a VMOVDRR. 4755 /// When \p DstVT, the destination type of \p BC, is on the vector 4756 /// register bank and the source of bitcast, \p Op, operates on the same bank, 4757 /// it might be possible to combine them, such that everything stays on the 4758 /// vector register bank. 4759 /// \p return The node that would replace \p BT, if the combine 4760 /// is possible. 4761 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, 4762 SelectionDAG &DAG) { 4763 SDValue Op = BC->getOperand(0); 4764 EVT DstVT = BC->getValueType(0); 4765 4766 // The only vector instruction that can produce a scalar (remember, 4767 // since the bitcast was about to be turned into VMOVDRR, the source 4768 // type is i64) from a vector is EXTRACT_VECTOR_ELT. 4769 // Moreover, we can do this combine only if there is one use. 4770 // Finally, if the destination type is not a vector, there is not 4771 // much point on forcing everything on the vector bank. 4772 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 4773 !Op.hasOneUse()) 4774 return SDValue(); 4775 4776 // If the index is not constant, we will introduce an additional 4777 // multiply that will stick. 4778 // Give up in that case. 4779 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4780 if (!Index) 4781 return SDValue(); 4782 unsigned DstNumElt = DstVT.getVectorNumElements(); 4783 4784 // Compute the new index. 4785 const APInt &APIntIndex = Index->getAPIntValue(); 4786 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt); 4787 NewIndex *= APIntIndex; 4788 // Check if the new constant index fits into i32. 4789 if (NewIndex.getBitWidth() > 32) 4790 return SDValue(); 4791 4792 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) -> 4793 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M) 4794 SDLoc dl(Op); 4795 SDValue ExtractSrc = Op.getOperand(0); 4796 EVT VecVT = EVT::getVectorVT( 4797 *DAG.getContext(), DstVT.getScalarType(), 4798 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt); 4799 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc); 4800 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast, 4801 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32)); 4802 } 4803 4804 /// ExpandBITCAST - If the target supports VFP, this function is called to 4805 /// expand a bit convert where either the source or destination type is i64 to 4806 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 4807 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 4808 /// vectors), since the legalizer won't know what to do with that. 4809 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 4810 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4811 SDLoc dl(N); 4812 SDValue Op = N->getOperand(0); 4813 4814 // This function is only supposed to be called for i64 types, either as the 4815 // source or destination of the bit convert. 4816 EVT SrcVT = Op.getValueType(); 4817 EVT DstVT = N->getValueType(0); 4818 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 4819 "ExpandBITCAST called for non-i64 type"); 4820 4821 // Turn i64->f64 into VMOVDRR. 4822 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 4823 // Do not force values to GPRs (this is what VMOVDRR does for the inputs) 4824 // if we can combine the bitcast with its source. 4825 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG)) 4826 return Val; 4827 4828 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4829 DAG.getConstant(0, dl, MVT::i32)); 4830 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 4831 DAG.getConstant(1, dl, MVT::i32)); 4832 return DAG.getNode(ISD::BITCAST, dl, DstVT, 4833 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 4834 } 4835 4836 // Turn f64->i64 into VMOVRRD. 4837 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 4838 SDValue Cvt; 4839 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() && 4840 SrcVT.getVectorNumElements() > 1) 4841 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4842 DAG.getVTList(MVT::i32, MVT::i32), 4843 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 4844 else 4845 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 4846 DAG.getVTList(MVT::i32, MVT::i32), Op); 4847 // Merge the pieces into a single i64 value. 4848 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 4849 } 4850 4851 return SDValue(); 4852 } 4853 4854 /// getZeroVector - Returns a vector of specified type with all zero elements. 4855 /// Zero vectors are used to represent vector negation and in those cases 4856 /// will be implemented with the NEON VNEG instruction. However, VNEG does 4857 /// not support i64 elements, so sometimes the zero vectors will need to be 4858 /// explicitly constructed. Regardless, use a canonical VMOV to create the 4859 /// zero vector. 4860 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { 4861 assert(VT.isVector() && "Expected a vector type"); 4862 // The canonical modified immediate encoding of a zero vector is....0! 4863 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32); 4864 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 4865 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 4866 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4867 } 4868 4869 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 4870 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4871 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 4872 SelectionDAG &DAG) const { 4873 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4874 EVT VT = Op.getValueType(); 4875 unsigned VTBits = VT.getSizeInBits(); 4876 SDLoc dl(Op); 4877 SDValue ShOpLo = Op.getOperand(0); 4878 SDValue ShOpHi = Op.getOperand(1); 4879 SDValue ShAmt = Op.getOperand(2); 4880 SDValue ARMcc; 4881 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 4882 4883 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 4884 4885 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4886 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4887 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 4888 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4889 DAG.getConstant(VTBits, dl, MVT::i32)); 4890 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 4891 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4892 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 4893 4894 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4895 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4896 ISD::SETGE, ARMcc, DAG, dl); 4897 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 4898 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 4899 CCR, Cmp); 4900 4901 SDValue Ops[2] = { Lo, Hi }; 4902 return DAG.getMergeValues(Ops, dl); 4903 } 4904 4905 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 4906 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4907 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 4908 SelectionDAG &DAG) const { 4909 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4910 EVT VT = Op.getValueType(); 4911 unsigned VTBits = VT.getSizeInBits(); 4912 SDLoc dl(Op); 4913 SDValue ShOpLo = Op.getOperand(0); 4914 SDValue ShOpHi = Op.getOperand(1); 4915 SDValue ShAmt = Op.getOperand(2); 4916 SDValue ARMcc; 4917 4918 assert(Op.getOpcode() == ISD::SHL_PARTS); 4919 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4920 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt); 4921 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 4922 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4923 DAG.getConstant(VTBits, dl, MVT::i32)); 4924 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 4925 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 4926 4927 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4928 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4929 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32), 4930 ISD::SETGE, ARMcc, DAG, dl); 4931 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 4932 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 4933 CCR, Cmp); 4934 4935 SDValue Ops[2] = { Lo, Hi }; 4936 return DAG.getMergeValues(Ops, dl); 4937 } 4938 4939 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4940 SelectionDAG &DAG) const { 4941 // The rounding mode is in bits 23:22 of the FPSCR. 4942 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 4943 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 4944 // so that the shift + and get folded into a bitfield extract. 4945 SDLoc dl(Op); 4946 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 4947 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, 4948 MVT::i32)); 4949 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 4950 DAG.getConstant(1U << 22, dl, MVT::i32)); 4951 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 4952 DAG.getConstant(22, dl, MVT::i32)); 4953 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 4954 DAG.getConstant(3, dl, MVT::i32)); 4955 } 4956 4957 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 4958 const ARMSubtarget *ST) { 4959 SDLoc dl(N); 4960 EVT VT = N->getValueType(0); 4961 if (VT.isVector()) { 4962 assert(ST->hasNEON()); 4963 4964 // Compute the least significant set bit: LSB = X & -X 4965 SDValue X = N->getOperand(0); 4966 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X); 4967 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX); 4968 4969 EVT ElemTy = VT.getVectorElementType(); 4970 4971 if (ElemTy == MVT::i8) { 4972 // Compute with: cttz(x) = ctpop(lsb - 1) 4973 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4974 DAG.getTargetConstant(1, dl, ElemTy)); 4975 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 4976 return DAG.getNode(ISD::CTPOP, dl, VT, Bits); 4977 } 4978 4979 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) && 4980 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) { 4981 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0 4982 unsigned NumBits = ElemTy.getSizeInBits(); 4983 SDValue WidthMinus1 = 4984 DAG.getNode(ARMISD::VMOVIMM, dl, VT, 4985 DAG.getTargetConstant(NumBits - 1, dl, ElemTy)); 4986 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB); 4987 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ); 4988 } 4989 4990 // Compute with: cttz(x) = ctpop(lsb - 1) 4991 4992 // Since we can only compute the number of bits in a byte with vcnt.8, we 4993 // have to gather the result with pairwise addition (vpaddl) for i16, i32, 4994 // and i64. 4995 4996 // Compute LSB - 1. 4997 SDValue Bits; 4998 if (ElemTy == MVT::i64) { 4999 // Load constant 0xffff'ffff'ffff'ffff to register. 5000 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5001 DAG.getTargetConstant(0x1eff, dl, MVT::i32)); 5002 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF); 5003 } else { 5004 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT, 5005 DAG.getTargetConstant(1, dl, ElemTy)); 5006 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One); 5007 } 5008 5009 // Count #bits with vcnt.8. 5010 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 5011 SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits); 5012 SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8); 5013 5014 // Gather the #bits with vpaddl (pairwise add.) 5015 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 5016 SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit, 5017 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 5018 Cnt8); 5019 if (ElemTy == MVT::i16) 5020 return Cnt16; 5021 5022 EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32; 5023 SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit, 5024 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 5025 Cnt16); 5026 if (ElemTy == MVT::i32) 5027 return Cnt32; 5028 5029 assert(ElemTy == MVT::i64); 5030 SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 5031 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32), 5032 Cnt32); 5033 return Cnt64; 5034 } 5035 5036 if (!ST->hasV6T2Ops()) 5037 return SDValue(); 5038 5039 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0)); 5040 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 5041 } 5042 5043 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 5044 /// for each 16-bit element from operand, repeated. The basic idea is to 5045 /// leverage vcnt to get the 8-bit counts, gather and add the results. 5046 /// 5047 /// Trace for v4i16: 5048 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 5049 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 5050 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 5051 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 5052 /// [b0 b1 b2 b3 b4 b5 b6 b7] 5053 /// +[b1 b0 b3 b2 b5 b4 b7 b6] 5054 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 5055 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 5056 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 5057 EVT VT = N->getValueType(0); 5058 SDLoc DL(N); 5059 5060 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 5061 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 5062 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 5063 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 5064 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 5065 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 5066 } 5067 5068 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 5069 /// bit-count for each 16-bit element from the operand. We need slightly 5070 /// different sequencing for v4i16 and v8i16 to stay within NEON's available 5071 /// 64/128-bit registers. 5072 /// 5073 /// Trace for v4i16: 5074 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 5075 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 5076 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 5077 /// v4i16:Extracted = [k0 k1 k2 k3 ] 5078 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 5079 EVT VT = N->getValueType(0); 5080 SDLoc DL(N); 5081 5082 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 5083 if (VT.is64BitVector()) { 5084 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 5085 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 5086 DAG.getIntPtrConstant(0, DL)); 5087 } else { 5088 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 5089 BitCounts, DAG.getIntPtrConstant(0, DL)); 5090 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 5091 } 5092 } 5093 5094 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 5095 /// bit-count for each 32-bit element from the operand. The idea here is 5096 /// to split the vector into 16-bit elements, leverage the 16-bit count 5097 /// routine, and then combine the results. 5098 /// 5099 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 5100 /// input = [v0 v1 ] (vi: 32-bit elements) 5101 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 5102 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 5103 /// vrev: N0 = [k1 k0 k3 k2 ] 5104 /// [k0 k1 k2 k3 ] 5105 /// N1 =+[k1 k0 k3 k2 ] 5106 /// [k0 k2 k1 k3 ] 5107 /// N2 =+[k1 k3 k0 k2 ] 5108 /// [k0 k2 k1 k3 ] 5109 /// Extended =+[k1 k3 k0 k2 ] 5110 /// [k0 k2 ] 5111 /// Extracted=+[k1 k3 ] 5112 /// 5113 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 5114 EVT VT = N->getValueType(0); 5115 SDLoc DL(N); 5116 5117 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 5118 5119 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 5120 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 5121 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 5122 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 5123 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 5124 5125 if (VT.is64BitVector()) { 5126 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 5127 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 5128 DAG.getIntPtrConstant(0, DL)); 5129 } else { 5130 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 5131 DAG.getIntPtrConstant(0, DL)); 5132 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 5133 } 5134 } 5135 5136 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 5137 const ARMSubtarget *ST) { 5138 EVT VT = N->getValueType(0); 5139 5140 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 5141 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 5142 VT == MVT::v4i16 || VT == MVT::v8i16) && 5143 "Unexpected type for custom ctpop lowering"); 5144 5145 if (VT.getVectorElementType() == MVT::i32) 5146 return lowerCTPOP32BitElements(N, DAG); 5147 else 5148 return lowerCTPOP16BitElements(N, DAG); 5149 } 5150 5151 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 5152 const ARMSubtarget *ST) { 5153 EVT VT = N->getValueType(0); 5154 SDLoc dl(N); 5155 5156 if (!VT.isVector()) 5157 return SDValue(); 5158 5159 // Lower vector shifts on NEON to use VSHL. 5160 assert(ST->hasNEON() && "unexpected vector shift"); 5161 5162 // Left shifts translate directly to the vshiftu intrinsic. 5163 if (N->getOpcode() == ISD::SHL) 5164 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 5165 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl, 5166 MVT::i32), 5167 N->getOperand(0), N->getOperand(1)); 5168 5169 assert((N->getOpcode() == ISD::SRA || 5170 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 5171 5172 // NEON uses the same intrinsics for both left and right shifts. For 5173 // right shifts, the shift amounts are negative, so negate the vector of 5174 // shift amounts. 5175 EVT ShiftVT = N->getOperand(1).getValueType(); 5176 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 5177 getZeroVector(ShiftVT, DAG, dl), 5178 N->getOperand(1)); 5179 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 5180 Intrinsic::arm_neon_vshifts : 5181 Intrinsic::arm_neon_vshiftu); 5182 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 5183 DAG.getConstant(vshiftInt, dl, MVT::i32), 5184 N->getOperand(0), NegatedCount); 5185 } 5186 5187 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 5188 const ARMSubtarget *ST) { 5189 EVT VT = N->getValueType(0); 5190 SDLoc dl(N); 5191 5192 // We can get here for a node like i32 = ISD::SHL i32, i64 5193 if (VT != MVT::i64) 5194 return SDValue(); 5195 5196 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 5197 "Unknown shift to lower!"); 5198 5199 // We only lower SRA, SRL of 1 here, all others use generic lowering. 5200 if (!isOneConstant(N->getOperand(1))) 5201 return SDValue(); 5202 5203 // If we are in thumb mode, we don't have RRX. 5204 if (ST->isThumb1Only()) return SDValue(); 5205 5206 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 5207 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 5208 DAG.getConstant(0, dl, MVT::i32)); 5209 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 5210 DAG.getConstant(1, dl, MVT::i32)); 5211 5212 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 5213 // captures the result into a carry flag. 5214 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 5215 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 5216 5217 // The low part is an ARMISD::RRX operand, which shifts the carry in. 5218 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 5219 5220 // Merge the pieces into a single i64 value. 5221 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5222 } 5223 5224 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 5225 SDValue TmpOp0, TmpOp1; 5226 bool Invert = false; 5227 bool Swap = false; 5228 unsigned Opc = 0; 5229 5230 SDValue Op0 = Op.getOperand(0); 5231 SDValue Op1 = Op.getOperand(1); 5232 SDValue CC = Op.getOperand(2); 5233 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger(); 5234 EVT VT = Op.getValueType(); 5235 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 5236 SDLoc dl(Op); 5237 5238 if (CmpVT.getVectorElementType() == MVT::i64) 5239 // 64-bit comparisons are not legal. We've marked SETCC as non-Custom, 5240 // but it's possible that our operands are 64-bit but our result is 32-bit. 5241 // Bail in this case. 5242 return SDValue(); 5243 5244 if (Op1.getValueType().isFloatingPoint()) { 5245 switch (SetCCOpcode) { 5246 default: llvm_unreachable("Illegal FP comparison"); 5247 case ISD::SETUNE: 5248 case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH; 5249 case ISD::SETOEQ: 5250 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 5251 case ISD::SETOLT: 5252 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH; 5253 case ISD::SETOGT: 5254 case ISD::SETGT: Opc = ARMISD::VCGT; break; 5255 case ISD::SETOLE: 5256 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH; 5257 case ISD::SETOGE: 5258 case ISD::SETGE: Opc = ARMISD::VCGE; break; 5259 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH; 5260 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 5261 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH; 5262 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 5263 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH; 5264 case ISD::SETONE: 5265 // Expand this to (OLT | OGT). 5266 TmpOp0 = Op0; 5267 TmpOp1 = Op1; 5268 Opc = ISD::OR; 5269 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 5270 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1); 5271 break; 5272 case ISD::SETUO: 5273 Invert = true; 5274 LLVM_FALLTHROUGH; 5275 case ISD::SETO: 5276 // Expand this to (OLT | OGE). 5277 TmpOp0 = Op0; 5278 TmpOp1 = Op1; 5279 Opc = ISD::OR; 5280 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0); 5281 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1); 5282 break; 5283 } 5284 } else { 5285 // Integer comparisons. 5286 switch (SetCCOpcode) { 5287 default: llvm_unreachable("Illegal integer comparison"); 5288 case ISD::SETNE: Invert = true; 5289 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 5290 case ISD::SETLT: Swap = true; 5291 case ISD::SETGT: Opc = ARMISD::VCGT; break; 5292 case ISD::SETLE: Swap = true; 5293 case ISD::SETGE: Opc = ARMISD::VCGE; break; 5294 case ISD::SETULT: Swap = true; 5295 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 5296 case ISD::SETULE: Swap = true; 5297 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 5298 } 5299 5300 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 5301 if (Opc == ARMISD::VCEQ) { 5302 5303 SDValue AndOp; 5304 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 5305 AndOp = Op0; 5306 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 5307 AndOp = Op1; 5308 5309 // Ignore bitconvert. 5310 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 5311 AndOp = AndOp.getOperand(0); 5312 5313 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 5314 Opc = ARMISD::VTST; 5315 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0)); 5316 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1)); 5317 Invert = !Invert; 5318 } 5319 } 5320 } 5321 5322 if (Swap) 5323 std::swap(Op0, Op1); 5324 5325 // If one of the operands is a constant vector zero, attempt to fold the 5326 // comparison to a specialized compare-against-zero form. 5327 SDValue SingleOp; 5328 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 5329 SingleOp = Op0; 5330 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 5331 if (Opc == ARMISD::VCGE) 5332 Opc = ARMISD::VCLEZ; 5333 else if (Opc == ARMISD::VCGT) 5334 Opc = ARMISD::VCLTZ; 5335 SingleOp = Op1; 5336 } 5337 5338 SDValue Result; 5339 if (SingleOp.getNode()) { 5340 switch (Opc) { 5341 case ARMISD::VCEQ: 5342 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break; 5343 case ARMISD::VCGE: 5344 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break; 5345 case ARMISD::VCLEZ: 5346 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break; 5347 case ARMISD::VCGT: 5348 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break; 5349 case ARMISD::VCLTZ: 5350 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break; 5351 default: 5352 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 5353 } 5354 } else { 5355 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1); 5356 } 5357 5358 Result = DAG.getSExtOrTrunc(Result, dl, VT); 5359 5360 if (Invert) 5361 Result = DAG.getNOT(dl, Result, VT); 5362 5363 return Result; 5364 } 5365 5366 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) { 5367 SDValue LHS = Op.getOperand(0); 5368 SDValue RHS = Op.getOperand(1); 5369 SDValue Carry = Op.getOperand(2); 5370 SDValue Cond = Op.getOperand(3); 5371 SDLoc DL(Op); 5372 5373 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only."); 5374 5375 assert(Carry.getOpcode() != ISD::CARRY_FALSE); 5376 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); 5377 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry); 5378 5379 SDValue FVal = DAG.getConstant(0, DL, MVT::i32); 5380 SDValue TVal = DAG.getConstant(1, DL, MVT::i32); 5381 SDValue ARMcc = DAG.getConstant( 5382 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32); 5383 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 5384 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR, 5385 Cmp.getValue(1), SDValue()); 5386 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc, 5387 CCR, Chain.getValue(1)); 5388 } 5389 5390 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 5391 /// valid vector constant for a NEON instruction with a "modified immediate" 5392 /// operand (e.g., VMOV). If so, return the encoded value. 5393 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 5394 unsigned SplatBitSize, SelectionDAG &DAG, 5395 const SDLoc &dl, EVT &VT, bool is128Bits, 5396 NEONModImmType type) { 5397 unsigned OpCmode, Imm; 5398 5399 // SplatBitSize is set to the smallest size that splats the vector, so a 5400 // zero vector will always have SplatBitSize == 8. However, NEON modified 5401 // immediate instructions others than VMOV do not support the 8-bit encoding 5402 // of a zero vector, and the default encoding of zero is supposed to be the 5403 // 32-bit version. 5404 if (SplatBits == 0) 5405 SplatBitSize = 32; 5406 5407 switch (SplatBitSize) { 5408 case 8: 5409 if (type != VMOVModImm) 5410 return SDValue(); 5411 // Any 1-byte value is OK. Op=0, Cmode=1110. 5412 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 5413 OpCmode = 0xe; 5414 Imm = SplatBits; 5415 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 5416 break; 5417 5418 case 16: 5419 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 5420 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 5421 if ((SplatBits & ~0xff) == 0) { 5422 // Value = 0x00nn: Op=x, Cmode=100x. 5423 OpCmode = 0x8; 5424 Imm = SplatBits; 5425 break; 5426 } 5427 if ((SplatBits & ~0xff00) == 0) { 5428 // Value = 0xnn00: Op=x, Cmode=101x. 5429 OpCmode = 0xa; 5430 Imm = SplatBits >> 8; 5431 break; 5432 } 5433 return SDValue(); 5434 5435 case 32: 5436 // NEON's 32-bit VMOV supports splat values where: 5437 // * only one byte is nonzero, or 5438 // * the least significant byte is 0xff and the second byte is nonzero, or 5439 // * the least significant 2 bytes are 0xff and the third is nonzero. 5440 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 5441 if ((SplatBits & ~0xff) == 0) { 5442 // Value = 0x000000nn: Op=x, Cmode=000x. 5443 OpCmode = 0; 5444 Imm = SplatBits; 5445 break; 5446 } 5447 if ((SplatBits & ~0xff00) == 0) { 5448 // Value = 0x0000nn00: Op=x, Cmode=001x. 5449 OpCmode = 0x2; 5450 Imm = SplatBits >> 8; 5451 break; 5452 } 5453 if ((SplatBits & ~0xff0000) == 0) { 5454 // Value = 0x00nn0000: Op=x, Cmode=010x. 5455 OpCmode = 0x4; 5456 Imm = SplatBits >> 16; 5457 break; 5458 } 5459 if ((SplatBits & ~0xff000000) == 0) { 5460 // Value = 0xnn000000: Op=x, Cmode=011x. 5461 OpCmode = 0x6; 5462 Imm = SplatBits >> 24; 5463 break; 5464 } 5465 5466 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 5467 if (type == OtherModImm) return SDValue(); 5468 5469 if ((SplatBits & ~0xffff) == 0 && 5470 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 5471 // Value = 0x0000nnff: Op=x, Cmode=1100. 5472 OpCmode = 0xc; 5473 Imm = SplatBits >> 8; 5474 break; 5475 } 5476 5477 if ((SplatBits & ~0xffffff) == 0 && 5478 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 5479 // Value = 0x00nnffff: Op=x, Cmode=1101. 5480 OpCmode = 0xd; 5481 Imm = SplatBits >> 16; 5482 break; 5483 } 5484 5485 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 5486 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 5487 // VMOV.I32. A (very) minor optimization would be to replicate the value 5488 // and fall through here to test for a valid 64-bit splat. But, then the 5489 // caller would also need to check and handle the change in size. 5490 return SDValue(); 5491 5492 case 64: { 5493 if (type != VMOVModImm) 5494 return SDValue(); 5495 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 5496 uint64_t BitMask = 0xff; 5497 uint64_t Val = 0; 5498 unsigned ImmMask = 1; 5499 Imm = 0; 5500 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 5501 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 5502 Val |= BitMask; 5503 Imm |= ImmMask; 5504 } else if ((SplatBits & BitMask) != 0) { 5505 return SDValue(); 5506 } 5507 BitMask <<= 8; 5508 ImmMask <<= 1; 5509 } 5510 5511 if (DAG.getDataLayout().isBigEndian()) 5512 // swap higher and lower 32 bit word 5513 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); 5514 5515 // Op=1, Cmode=1110. 5516 OpCmode = 0x1e; 5517 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 5518 break; 5519 } 5520 5521 default: 5522 llvm_unreachable("unexpected size for isNEONModifiedImm"); 5523 } 5524 5525 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 5526 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32); 5527 } 5528 5529 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 5530 const ARMSubtarget *ST) const { 5531 if (!ST->hasVFP3()) 5532 return SDValue(); 5533 5534 bool IsDouble = Op.getValueType() == MVT::f64; 5535 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 5536 5537 // Use the default (constant pool) lowering for double constants when we have 5538 // an SP-only FPU 5539 if (IsDouble && Subtarget->isFPOnlySP()) 5540 return SDValue(); 5541 5542 // Try splatting with a VMOV.f32... 5543 const APFloat &FPVal = CFP->getValueAPF(); 5544 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 5545 5546 if (ImmVal != -1) { 5547 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 5548 // We have code in place to select a valid ConstantFP already, no need to 5549 // do any mangling. 5550 return Op; 5551 } 5552 5553 // It's a float and we are trying to use NEON operations where 5554 // possible. Lower it to a splat followed by an extract. 5555 SDLoc DL(Op); 5556 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32); 5557 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 5558 NewVal); 5559 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 5560 DAG.getConstant(0, DL, MVT::i32)); 5561 } 5562 5563 // The rest of our options are NEON only, make sure that's allowed before 5564 // proceeding.. 5565 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 5566 return SDValue(); 5567 5568 EVT VMovVT; 5569 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 5570 5571 // It wouldn't really be worth bothering for doubles except for one very 5572 // important value, which does happen to match: 0.0. So make sure we don't do 5573 // anything stupid. 5574 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 5575 return SDValue(); 5576 5577 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 5578 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), 5579 VMovVT, false, VMOVModImm); 5580 if (NewVal != SDValue()) { 5581 SDLoc DL(Op); 5582 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 5583 NewVal); 5584 if (IsDouble) 5585 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5586 5587 // It's a float: cast and extract a vector element. 5588 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5589 VecConstant); 5590 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5591 DAG.getConstant(0, DL, MVT::i32)); 5592 } 5593 5594 // Finally, try a VMVN.i32 5595 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT, 5596 false, VMVNModImm); 5597 if (NewVal != SDValue()) { 5598 SDLoc DL(Op); 5599 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 5600 5601 if (IsDouble) 5602 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 5603 5604 // It's a float: cast and extract a vector element. 5605 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 5606 VecConstant); 5607 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 5608 DAG.getConstant(0, DL, MVT::i32)); 5609 } 5610 5611 return SDValue(); 5612 } 5613 5614 // check if an VEXT instruction can handle the shuffle mask when the 5615 // vector sources of the shuffle are the same. 5616 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 5617 unsigned NumElts = VT.getVectorNumElements(); 5618 5619 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5620 if (M[0] < 0) 5621 return false; 5622 5623 Imm = M[0]; 5624 5625 // If this is a VEXT shuffle, the immediate value is the index of the first 5626 // element. The other shuffle indices must be the successive elements after 5627 // the first one. 5628 unsigned ExpectedElt = Imm; 5629 for (unsigned i = 1; i < NumElts; ++i) { 5630 // Increment the expected index. If it wraps around, just follow it 5631 // back to index zero and keep going. 5632 ++ExpectedElt; 5633 if (ExpectedElt == NumElts) 5634 ExpectedElt = 0; 5635 5636 if (M[i] < 0) continue; // ignore UNDEF indices 5637 if (ExpectedElt != static_cast<unsigned>(M[i])) 5638 return false; 5639 } 5640 5641 return true; 5642 } 5643 5644 5645 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 5646 bool &ReverseVEXT, unsigned &Imm) { 5647 unsigned NumElts = VT.getVectorNumElements(); 5648 ReverseVEXT = false; 5649 5650 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5651 if (M[0] < 0) 5652 return false; 5653 5654 Imm = M[0]; 5655 5656 // If this is a VEXT shuffle, the immediate value is the index of the first 5657 // element. The other shuffle indices must be the successive elements after 5658 // the first one. 5659 unsigned ExpectedElt = Imm; 5660 for (unsigned i = 1; i < NumElts; ++i) { 5661 // Increment the expected index. If it wraps around, it may still be 5662 // a VEXT but the source vectors must be swapped. 5663 ExpectedElt += 1; 5664 if (ExpectedElt == NumElts * 2) { 5665 ExpectedElt = 0; 5666 ReverseVEXT = true; 5667 } 5668 5669 if (M[i] < 0) continue; // ignore UNDEF indices 5670 if (ExpectedElt != static_cast<unsigned>(M[i])) 5671 return false; 5672 } 5673 5674 // Adjust the index value if the source operands will be swapped. 5675 if (ReverseVEXT) 5676 Imm -= NumElts; 5677 5678 return true; 5679 } 5680 5681 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 5682 /// instruction with the specified blocksize. (The order of the elements 5683 /// within each block of the vector is reversed.) 5684 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 5685 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 5686 "Only possible block sizes for VREV are: 16, 32, 64"); 5687 5688 unsigned EltSz = VT.getScalarSizeInBits(); 5689 if (EltSz == 64) 5690 return false; 5691 5692 unsigned NumElts = VT.getVectorNumElements(); 5693 unsigned BlockElts = M[0] + 1; 5694 // If the first shuffle index is UNDEF, be optimistic. 5695 if (M[0] < 0) 5696 BlockElts = BlockSize / EltSz; 5697 5698 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 5699 return false; 5700 5701 for (unsigned i = 0; i < NumElts; ++i) { 5702 if (M[i] < 0) continue; // ignore UNDEF indices 5703 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 5704 return false; 5705 } 5706 5707 return true; 5708 } 5709 5710 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 5711 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 5712 // range, then 0 is placed into the resulting vector. So pretty much any mask 5713 // of 8 elements can work here. 5714 return VT == MVT::v8i8 && M.size() == 8; 5715 } 5716 5717 // Checks whether the shuffle mask represents a vector transpose (VTRN) by 5718 // checking that pairs of elements in the shuffle mask represent the same index 5719 // in each vector, incrementing the expected index by 2 at each step. 5720 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6] 5721 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g} 5722 // v2={e,f,g,h} 5723 // WhichResult gives the offset for each element in the mask based on which 5724 // of the two results it belongs to. 5725 // 5726 // The transpose can be represented either as: 5727 // result1 = shufflevector v1, v2, result1_shuffle_mask 5728 // result2 = shufflevector v1, v2, result2_shuffle_mask 5729 // where v1/v2 and the shuffle masks have the same number of elements 5730 // (here WhichResult (see below) indicates which result is being checked) 5731 // 5732 // or as: 5733 // results = shufflevector v1, v2, shuffle_mask 5734 // where both results are returned in one vector and the shuffle mask has twice 5735 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we 5736 // want to check the low half and high half of the shuffle mask as if it were 5737 // the other case 5738 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5739 unsigned EltSz = VT.getScalarSizeInBits(); 5740 if (EltSz == 64) 5741 return false; 5742 5743 unsigned NumElts = VT.getVectorNumElements(); 5744 if (M.size() != NumElts && M.size() != NumElts*2) 5745 return false; 5746 5747 // If the mask is twice as long as the input vector then we need to check the 5748 // upper and lower parts of the mask with a matching value for WhichResult 5749 // FIXME: A mask with only even values will be rejected in case the first 5750 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only 5751 // M[0] is used to determine WhichResult 5752 for (unsigned i = 0; i < M.size(); i += NumElts) { 5753 if (M.size() == NumElts * 2) 5754 WhichResult = i / NumElts; 5755 else 5756 WhichResult = M[i] == 0 ? 0 : 1; 5757 for (unsigned j = 0; j < NumElts; j += 2) { 5758 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5759 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult)) 5760 return false; 5761 } 5762 } 5763 5764 if (M.size() == NumElts*2) 5765 WhichResult = 0; 5766 5767 return true; 5768 } 5769 5770 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 5771 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5772 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 5773 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5774 unsigned EltSz = VT.getScalarSizeInBits(); 5775 if (EltSz == 64) 5776 return false; 5777 5778 unsigned NumElts = VT.getVectorNumElements(); 5779 if (M.size() != NumElts && M.size() != NumElts*2) 5780 return false; 5781 5782 for (unsigned i = 0; i < M.size(); i += NumElts) { 5783 if (M.size() == NumElts * 2) 5784 WhichResult = i / NumElts; 5785 else 5786 WhichResult = M[i] == 0 ? 0 : 1; 5787 for (unsigned j = 0; j < NumElts; j += 2) { 5788 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) || 5789 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult)) 5790 return false; 5791 } 5792 } 5793 5794 if (M.size() == NumElts*2) 5795 WhichResult = 0; 5796 5797 return true; 5798 } 5799 5800 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking 5801 // that the mask elements are either all even and in steps of size 2 or all odd 5802 // and in steps of size 2. 5803 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6] 5804 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g} 5805 // v2={e,f,g,h} 5806 // Requires similar checks to that of isVTRNMask with 5807 // respect the how results are returned. 5808 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5809 unsigned EltSz = VT.getScalarSizeInBits(); 5810 if (EltSz == 64) 5811 return false; 5812 5813 unsigned NumElts = VT.getVectorNumElements(); 5814 if (M.size() != NumElts && M.size() != NumElts*2) 5815 return false; 5816 5817 for (unsigned i = 0; i < M.size(); i += NumElts) { 5818 WhichResult = M[i] == 0 ? 0 : 1; 5819 for (unsigned j = 0; j < NumElts; ++j) { 5820 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult) 5821 return false; 5822 } 5823 } 5824 5825 if (M.size() == NumElts*2) 5826 WhichResult = 0; 5827 5828 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5829 if (VT.is64BitVector() && EltSz == 32) 5830 return false; 5831 5832 return true; 5833 } 5834 5835 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 5836 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5837 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 5838 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5839 unsigned EltSz = VT.getScalarSizeInBits(); 5840 if (EltSz == 64) 5841 return false; 5842 5843 unsigned NumElts = VT.getVectorNumElements(); 5844 if (M.size() != NumElts && M.size() != NumElts*2) 5845 return false; 5846 5847 unsigned Half = NumElts / 2; 5848 for (unsigned i = 0; i < M.size(); i += NumElts) { 5849 WhichResult = M[i] == 0 ? 0 : 1; 5850 for (unsigned j = 0; j < NumElts; j += Half) { 5851 unsigned Idx = WhichResult; 5852 for (unsigned k = 0; k < Half; ++k) { 5853 int MIdx = M[i + j + k]; 5854 if (MIdx >= 0 && (unsigned) MIdx != Idx) 5855 return false; 5856 Idx += 2; 5857 } 5858 } 5859 } 5860 5861 if (M.size() == NumElts*2) 5862 WhichResult = 0; 5863 5864 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5865 if (VT.is64BitVector() && EltSz == 32) 5866 return false; 5867 5868 return true; 5869 } 5870 5871 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking 5872 // that pairs of elements of the shufflemask represent the same index in each 5873 // vector incrementing sequentially through the vectors. 5874 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5] 5875 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f} 5876 // v2={e,f,g,h} 5877 // Requires similar checks to that of isVTRNMask with respect the how results 5878 // are returned. 5879 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 5880 unsigned EltSz = VT.getScalarSizeInBits(); 5881 if (EltSz == 64) 5882 return false; 5883 5884 unsigned NumElts = VT.getVectorNumElements(); 5885 if (M.size() != NumElts && M.size() != NumElts*2) 5886 return false; 5887 5888 for (unsigned i = 0; i < M.size(); i += NumElts) { 5889 WhichResult = M[i] == 0 ? 0 : 1; 5890 unsigned Idx = WhichResult * NumElts / 2; 5891 for (unsigned j = 0; j < NumElts; j += 2) { 5892 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5893 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts)) 5894 return false; 5895 Idx += 1; 5896 } 5897 } 5898 5899 if (M.size() == NumElts*2) 5900 WhichResult = 0; 5901 5902 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5903 if (VT.is64BitVector() && EltSz == 32) 5904 return false; 5905 5906 return true; 5907 } 5908 5909 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 5910 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 5911 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 5912 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 5913 unsigned EltSz = VT.getScalarSizeInBits(); 5914 if (EltSz == 64) 5915 return false; 5916 5917 unsigned NumElts = VT.getVectorNumElements(); 5918 if (M.size() != NumElts && M.size() != NumElts*2) 5919 return false; 5920 5921 for (unsigned i = 0; i < M.size(); i += NumElts) { 5922 WhichResult = M[i] == 0 ? 0 : 1; 5923 unsigned Idx = WhichResult * NumElts / 2; 5924 for (unsigned j = 0; j < NumElts; j += 2) { 5925 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) || 5926 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx)) 5927 return false; 5928 Idx += 1; 5929 } 5930 } 5931 5932 if (M.size() == NumElts*2) 5933 WhichResult = 0; 5934 5935 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 5936 if (VT.is64BitVector() && EltSz == 32) 5937 return false; 5938 5939 return true; 5940 } 5941 5942 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), 5943 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't. 5944 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT, 5945 unsigned &WhichResult, 5946 bool &isV_UNDEF) { 5947 isV_UNDEF = false; 5948 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5949 return ARMISD::VTRN; 5950 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5951 return ARMISD::VUZP; 5952 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5953 return ARMISD::VZIP; 5954 5955 isV_UNDEF = true; 5956 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5957 return ARMISD::VTRN; 5958 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5959 return ARMISD::VUZP; 5960 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5961 return ARMISD::VZIP; 5962 5963 return 0; 5964 } 5965 5966 /// \return true if this is a reverse operation on an vector. 5967 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 5968 unsigned NumElts = VT.getVectorNumElements(); 5969 // Make sure the mask has the right size. 5970 if (NumElts != M.size()) 5971 return false; 5972 5973 // Look for <15, ..., 3, -1, 1, 0>. 5974 for (unsigned i = 0; i != NumElts; ++i) 5975 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 5976 return false; 5977 5978 return true; 5979 } 5980 5981 // If N is an integer constant that can be moved into a register in one 5982 // instruction, return an SDValue of such a constant (will become a MOV 5983 // instruction). Otherwise return null. 5984 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 5985 const ARMSubtarget *ST, const SDLoc &dl) { 5986 uint64_t Val; 5987 if (!isa<ConstantSDNode>(N)) 5988 return SDValue(); 5989 Val = cast<ConstantSDNode>(N)->getZExtValue(); 5990 5991 if (ST->isThumb1Only()) { 5992 if (Val <= 255 || ~Val <= 255) 5993 return DAG.getConstant(Val, dl, MVT::i32); 5994 } else { 5995 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 5996 return DAG.getConstant(Val, dl, MVT::i32); 5997 } 5998 return SDValue(); 5999 } 6000 6001 // If this is a case we can't handle, return null and let the default 6002 // expansion code take care of it. 6003 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 6004 const ARMSubtarget *ST) const { 6005 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 6006 SDLoc dl(Op); 6007 EVT VT = Op.getValueType(); 6008 6009 APInt SplatBits, SplatUndef; 6010 unsigned SplatBitSize; 6011 bool HasAnyUndefs; 6012 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6013 if (SplatBitSize <= 64) { 6014 // Check if an immediate VMOV works. 6015 EVT VmovVT; 6016 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 6017 SplatUndef.getZExtValue(), SplatBitSize, 6018 DAG, dl, VmovVT, VT.is128BitVector(), 6019 VMOVModImm); 6020 if (Val.getNode()) { 6021 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 6022 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6023 } 6024 6025 // Try an immediate VMVN. 6026 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 6027 Val = isNEONModifiedImm(NegatedImm, 6028 SplatUndef.getZExtValue(), SplatBitSize, 6029 DAG, dl, VmovVT, VT.is128BitVector(), 6030 VMVNModImm); 6031 if (Val.getNode()) { 6032 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 6033 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 6034 } 6035 6036 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 6037 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 6038 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 6039 if (ImmVal != -1) { 6040 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32); 6041 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 6042 } 6043 } 6044 } 6045 } 6046 6047 // Scan through the operands to see if only one value is used. 6048 // 6049 // As an optimisation, even if more than one value is used it may be more 6050 // profitable to splat with one value then change some lanes. 6051 // 6052 // Heuristically we decide to do this if the vector has a "dominant" value, 6053 // defined as splatted to more than half of the lanes. 6054 unsigned NumElts = VT.getVectorNumElements(); 6055 bool isOnlyLowElement = true; 6056 bool usesOnlyOneValue = true; 6057 bool hasDominantValue = false; 6058 bool isConstant = true; 6059 6060 // Map of the number of times a particular SDValue appears in the 6061 // element list. 6062 DenseMap<SDValue, unsigned> ValueCounts; 6063 SDValue Value; 6064 for (unsigned i = 0; i < NumElts; ++i) { 6065 SDValue V = Op.getOperand(i); 6066 if (V.isUndef()) 6067 continue; 6068 if (i > 0) 6069 isOnlyLowElement = false; 6070 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 6071 isConstant = false; 6072 6073 ValueCounts.insert(std::make_pair(V, 0)); 6074 unsigned &Count = ValueCounts[V]; 6075 6076 // Is this value dominant? (takes up more than half of the lanes) 6077 if (++Count > (NumElts / 2)) { 6078 hasDominantValue = true; 6079 Value = V; 6080 } 6081 } 6082 if (ValueCounts.size() != 1) 6083 usesOnlyOneValue = false; 6084 if (!Value.getNode() && ValueCounts.size() > 0) 6085 Value = ValueCounts.begin()->first; 6086 6087 if (ValueCounts.size() == 0) 6088 return DAG.getUNDEF(VT); 6089 6090 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 6091 // Keep going if we are hitting this case. 6092 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 6093 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 6094 6095 unsigned EltSize = VT.getScalarSizeInBits(); 6096 6097 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 6098 // i32 and try again. 6099 if (hasDominantValue && EltSize <= 32) { 6100 if (!isConstant) { 6101 SDValue N; 6102 6103 // If we are VDUPing a value that comes directly from a vector, that will 6104 // cause an unnecessary move to and from a GPR, where instead we could 6105 // just use VDUPLANE. We can only do this if the lane being extracted 6106 // is at a constant index, as the VDUP from lane instructions only have 6107 // constant-index forms. 6108 ConstantSDNode *constIndex; 6109 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 6110 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) { 6111 // We need to create a new undef vector to use for the VDUPLANE if the 6112 // size of the vector from which we get the value is different than the 6113 // size of the vector that we need to create. We will insert the element 6114 // such that the register coalescer will remove unnecessary copies. 6115 if (VT != Value->getOperand(0).getValueType()) { 6116 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 6117 VT.getVectorNumElements(); 6118 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6119 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 6120 Value, DAG.getConstant(index, dl, MVT::i32)), 6121 DAG.getConstant(index, dl, MVT::i32)); 6122 } else 6123 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6124 Value->getOperand(0), Value->getOperand(1)); 6125 } else 6126 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 6127 6128 if (!usesOnlyOneValue) { 6129 // The dominant value was splatted as 'N', but we now have to insert 6130 // all differing elements. 6131 for (unsigned I = 0; I < NumElts; ++I) { 6132 if (Op.getOperand(I) == Value) 6133 continue; 6134 SmallVector<SDValue, 3> Ops; 6135 Ops.push_back(N); 6136 Ops.push_back(Op.getOperand(I)); 6137 Ops.push_back(DAG.getConstant(I, dl, MVT::i32)); 6138 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 6139 } 6140 } 6141 return N; 6142 } 6143 if (VT.getVectorElementType().isFloatingPoint()) { 6144 SmallVector<SDValue, 8> Ops; 6145 for (unsigned i = 0; i < NumElts; ++i) 6146 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 6147 Op.getOperand(i))); 6148 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 6149 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 6150 Val = LowerBUILD_VECTOR(Val, DAG, ST); 6151 if (Val.getNode()) 6152 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6153 } 6154 if (usesOnlyOneValue) { 6155 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 6156 if (isConstant && Val.getNode()) 6157 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 6158 } 6159 } 6160 6161 // If all elements are constants and the case above didn't get hit, fall back 6162 // to the default expansion, which will generate a load from the constant 6163 // pool. 6164 if (isConstant) 6165 return SDValue(); 6166 6167 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 6168 if (NumElts >= 4) { 6169 SDValue shuffle = ReconstructShuffle(Op, DAG); 6170 if (shuffle != SDValue()) 6171 return shuffle; 6172 } 6173 6174 // Vectors with 32- or 64-bit elements can be built by directly assigning 6175 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 6176 // will be legalized. 6177 if (EltSize >= 32) { 6178 // Do the expansion with floating-point types, since that is what the VFP 6179 // registers are defined to use, and since i64 is not legal. 6180 EVT EltVT = EVT::getFloatingPointVT(EltSize); 6181 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 6182 SmallVector<SDValue, 8> Ops; 6183 for (unsigned i = 0; i < NumElts; ++i) 6184 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 6185 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 6186 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6187 } 6188 6189 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 6190 // know the default expansion would otherwise fall back on something even 6191 // worse. For a vector with one or two non-undef values, that's 6192 // scalar_to_vector for the elements followed by a shuffle (provided the 6193 // shuffle is valid for the target) and materialization element by element 6194 // on the stack followed by a load for everything else. 6195 if (!isConstant && !usesOnlyOneValue) { 6196 SDValue Vec = DAG.getUNDEF(VT); 6197 for (unsigned i = 0 ; i < NumElts; ++i) { 6198 SDValue V = Op.getOperand(i); 6199 if (V.isUndef()) 6200 continue; 6201 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32); 6202 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 6203 } 6204 return Vec; 6205 } 6206 6207 return SDValue(); 6208 } 6209 6210 // Gather data to see if the operation can be modelled as a 6211 // shuffle in combination with VEXTs. 6212 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 6213 SelectionDAG &DAG) const { 6214 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 6215 SDLoc dl(Op); 6216 EVT VT = Op.getValueType(); 6217 unsigned NumElts = VT.getVectorNumElements(); 6218 6219 struct ShuffleSourceInfo { 6220 SDValue Vec; 6221 unsigned MinElt; 6222 unsigned MaxElt; 6223 6224 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 6225 // be compatible with the shuffle we intend to construct. As a result 6226 // ShuffleVec will be some sliding window into the original Vec. 6227 SDValue ShuffleVec; 6228 6229 // Code should guarantee that element i in Vec starts at element "WindowBase 6230 // + i * WindowScale in ShuffleVec". 6231 int WindowBase; 6232 int WindowScale; 6233 6234 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 6235 ShuffleSourceInfo(SDValue Vec) 6236 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0), 6237 WindowScale(1) {} 6238 }; 6239 6240 // First gather all vectors used as an immediate source for this BUILD_VECTOR 6241 // node. 6242 SmallVector<ShuffleSourceInfo, 2> Sources; 6243 for (unsigned i = 0; i < NumElts; ++i) { 6244 SDValue V = Op.getOperand(i); 6245 if (V.isUndef()) 6246 continue; 6247 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 6248 // A shuffle can only come from building a vector from various 6249 // elements of other vectors. 6250 return SDValue(); 6251 } else if (!isa<ConstantSDNode>(V.getOperand(1))) { 6252 // Furthermore, shuffles require a constant mask, whereas extractelts 6253 // accept variable indices. 6254 return SDValue(); 6255 } 6256 6257 // Add this element source to the list if it's not already there. 6258 SDValue SourceVec = V.getOperand(0); 6259 auto Source = find(Sources, SourceVec); 6260 if (Source == Sources.end()) 6261 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 6262 6263 // Update the minimum and maximum lane number seen. 6264 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 6265 Source->MinElt = std::min(Source->MinElt, EltNo); 6266 Source->MaxElt = std::max(Source->MaxElt, EltNo); 6267 } 6268 6269 // Currently only do something sane when at most two source vectors 6270 // are involved. 6271 if (Sources.size() > 2) 6272 return SDValue(); 6273 6274 // Find out the smallest element size among result and two sources, and use 6275 // it as element size to build the shuffle_vector. 6276 EVT SmallestEltTy = VT.getVectorElementType(); 6277 for (auto &Source : Sources) { 6278 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 6279 if (SrcEltTy.bitsLT(SmallestEltTy)) 6280 SmallestEltTy = SrcEltTy; 6281 } 6282 unsigned ResMultiplier = 6283 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); 6284 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 6285 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 6286 6287 // If the source vector is too wide or too narrow, we may nevertheless be able 6288 // to construct a compatible shuffle either by concatenating it with UNDEF or 6289 // extracting a suitable range of elements. 6290 for (auto &Src : Sources) { 6291 EVT SrcVT = Src.ShuffleVec.getValueType(); 6292 6293 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 6294 continue; 6295 6296 // This stage of the search produces a source with the same element type as 6297 // the original, but with a total width matching the BUILD_VECTOR output. 6298 EVT EltVT = SrcVT.getVectorElementType(); 6299 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 6300 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 6301 6302 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 6303 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) 6304 return SDValue(); 6305 // We can pad out the smaller vector for free, so if it's part of a 6306 // shuffle... 6307 Src.ShuffleVec = 6308 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 6309 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 6310 continue; 6311 } 6312 6313 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) 6314 return SDValue(); 6315 6316 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 6317 // Span too large for a VEXT to cope 6318 return SDValue(); 6319 } 6320 6321 if (Src.MinElt >= NumSrcElts) { 6322 // The extraction can just take the second half 6323 Src.ShuffleVec = 6324 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6325 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 6326 Src.WindowBase = -NumSrcElts; 6327 } else if (Src.MaxElt < NumSrcElts) { 6328 // The extraction can just take the first half 6329 Src.ShuffleVec = 6330 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6331 DAG.getConstant(0, dl, MVT::i32)); 6332 } else { 6333 // An actual VEXT is needed 6334 SDValue VEXTSrc1 = 6335 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6336 DAG.getConstant(0, dl, MVT::i32)); 6337 SDValue VEXTSrc2 = 6338 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 6339 DAG.getConstant(NumSrcElts, dl, MVT::i32)); 6340 6341 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1, 6342 VEXTSrc2, 6343 DAG.getConstant(Src.MinElt, dl, MVT::i32)); 6344 Src.WindowBase = -Src.MinElt; 6345 } 6346 } 6347 6348 // Another possible incompatibility occurs from the vector element types. We 6349 // can fix this by bitcasting the source vectors to the same type we intend 6350 // for the shuffle. 6351 for (auto &Src : Sources) { 6352 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 6353 if (SrcEltTy == SmallestEltTy) 6354 continue; 6355 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 6356 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); 6357 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 6358 Src.WindowBase *= Src.WindowScale; 6359 } 6360 6361 // Final sanity check before we try to actually produce a shuffle. 6362 DEBUG( 6363 for (auto Src : Sources) 6364 assert(Src.ShuffleVec.getValueType() == ShuffleVT); 6365 ); 6366 6367 // The stars all align, our next step is to produce the mask for the shuffle. 6368 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 6369 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); 6370 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 6371 SDValue Entry = Op.getOperand(i); 6372 if (Entry.isUndef()) 6373 continue; 6374 6375 auto Src = find(Sources, Entry.getOperand(0)); 6376 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 6377 6378 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 6379 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 6380 // segment. 6381 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 6382 int BitsDefined = std::min(OrigEltTy.getSizeInBits(), 6383 VT.getScalarSizeInBits()); 6384 int LanesDefined = BitsDefined / BitsPerShuffleLane; 6385 6386 // This source is expected to fill ResMultiplier lanes of the final shuffle, 6387 // starting at the appropriate offset. 6388 int *LaneMask = &Mask[i * ResMultiplier]; 6389 6390 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 6391 ExtractBase += NumElts * (Src - Sources.begin()); 6392 for (int j = 0; j < LanesDefined; ++j) 6393 LaneMask[j] = ExtractBase + j; 6394 } 6395 6396 // Final check before we try to produce nonsense... 6397 if (!isShuffleMaskLegal(Mask, ShuffleVT)) 6398 return SDValue(); 6399 6400 // We can't handle more than two sources. This should have already 6401 // been checked before this point. 6402 assert(Sources.size() <= 2 && "Too many sources!"); 6403 6404 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 6405 for (unsigned i = 0; i < Sources.size(); ++i) 6406 ShuffleOps[i] = Sources[i].ShuffleVec; 6407 6408 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 6409 ShuffleOps[1], Mask); 6410 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 6411 } 6412 6413 /// isShuffleMaskLegal - Targets can use this to indicate that they only 6414 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 6415 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 6416 /// are assumed to be legal. 6417 bool 6418 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 6419 EVT VT) const { 6420 if (VT.getVectorNumElements() == 4 && 6421 (VT.is128BitVector() || VT.is64BitVector())) { 6422 unsigned PFIndexes[4]; 6423 for (unsigned i = 0; i != 4; ++i) { 6424 if (M[i] < 0) 6425 PFIndexes[i] = 8; 6426 else 6427 PFIndexes[i] = M[i]; 6428 } 6429 6430 // Compute the index in the perfect shuffle table. 6431 unsigned PFTableIndex = 6432 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6433 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6434 unsigned Cost = (PFEntry >> 30); 6435 6436 if (Cost <= 4) 6437 return true; 6438 } 6439 6440 bool ReverseVEXT, isV_UNDEF; 6441 unsigned Imm, WhichResult; 6442 6443 unsigned EltSize = VT.getScalarSizeInBits(); 6444 return (EltSize >= 32 || 6445 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 6446 isVREVMask(M, VT, 64) || 6447 isVREVMask(M, VT, 32) || 6448 isVREVMask(M, VT, 16) || 6449 isVEXTMask(M, VT, ReverseVEXT, Imm) || 6450 isVTBLMask(M, VT) || 6451 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) || 6452 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 6453 } 6454 6455 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 6456 /// the specified operations to build the shuffle. 6457 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 6458 SDValue RHS, SelectionDAG &DAG, 6459 const SDLoc &dl) { 6460 unsigned OpNum = (PFEntry >> 26) & 0x0F; 6461 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 6462 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 6463 6464 enum { 6465 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6466 OP_VREV, 6467 OP_VDUP0, 6468 OP_VDUP1, 6469 OP_VDUP2, 6470 OP_VDUP3, 6471 OP_VEXT1, 6472 OP_VEXT2, 6473 OP_VEXT3, 6474 OP_VUZPL, // VUZP, left result 6475 OP_VUZPR, // VUZP, right result 6476 OP_VZIPL, // VZIP, left result 6477 OP_VZIPR, // VZIP, right result 6478 OP_VTRNL, // VTRN, left result 6479 OP_VTRNR // VTRN, right result 6480 }; 6481 6482 if (OpNum == OP_COPY) { 6483 if (LHSID == (1*9+2)*9+3) return LHS; 6484 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 6485 return RHS; 6486 } 6487 6488 SDValue OpLHS, OpRHS; 6489 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6490 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6491 EVT VT = OpLHS.getValueType(); 6492 6493 switch (OpNum) { 6494 default: llvm_unreachable("Unknown shuffle opcode!"); 6495 case OP_VREV: 6496 // VREV divides the vector in half and swaps within the half. 6497 if (VT.getVectorElementType() == MVT::i32 || 6498 VT.getVectorElementType() == MVT::f32) 6499 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 6500 // vrev <4 x i16> -> VREV32 6501 if (VT.getVectorElementType() == MVT::i16) 6502 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 6503 // vrev <4 x i8> -> VREV16 6504 assert(VT.getVectorElementType() == MVT::i8); 6505 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 6506 case OP_VDUP0: 6507 case OP_VDUP1: 6508 case OP_VDUP2: 6509 case OP_VDUP3: 6510 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 6511 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32)); 6512 case OP_VEXT1: 6513 case OP_VEXT2: 6514 case OP_VEXT3: 6515 return DAG.getNode(ARMISD::VEXT, dl, VT, 6516 OpLHS, OpRHS, 6517 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32)); 6518 case OP_VUZPL: 6519 case OP_VUZPR: 6520 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 6521 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 6522 case OP_VZIPL: 6523 case OP_VZIPR: 6524 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 6525 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 6526 case OP_VTRNL: 6527 case OP_VTRNR: 6528 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 6529 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 6530 } 6531 } 6532 6533 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 6534 ArrayRef<int> ShuffleMask, 6535 SelectionDAG &DAG) { 6536 // Check to see if we can use the VTBL instruction. 6537 SDValue V1 = Op.getOperand(0); 6538 SDValue V2 = Op.getOperand(1); 6539 SDLoc DL(Op); 6540 6541 SmallVector<SDValue, 8> VTBLMask; 6542 for (ArrayRef<int>::iterator 6543 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 6544 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32)); 6545 6546 if (V2.getNode()->isUndef()) 6547 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 6548 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6549 6550 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 6551 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask)); 6552 } 6553 6554 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 6555 SelectionDAG &DAG) { 6556 SDLoc DL(Op); 6557 SDValue OpLHS = Op.getOperand(0); 6558 EVT VT = OpLHS.getValueType(); 6559 6560 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 6561 "Expect an v8i16/v16i8 type"); 6562 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 6563 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 6564 // extract the first 8 bytes into the top double word and the last 8 bytes 6565 // into the bottom double word. The v8i16 case is similar. 6566 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 6567 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 6568 DAG.getConstant(ExtractNum, DL, MVT::i32)); 6569 } 6570 6571 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 6572 SDValue V1 = Op.getOperand(0); 6573 SDValue V2 = Op.getOperand(1); 6574 SDLoc dl(Op); 6575 EVT VT = Op.getValueType(); 6576 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 6577 6578 // Convert shuffles that are directly supported on NEON to target-specific 6579 // DAG nodes, instead of keeping them as shuffles and matching them again 6580 // during code selection. This is more efficient and avoids the possibility 6581 // of inconsistencies between legalization and selection. 6582 // FIXME: floating-point vectors should be canonicalized to integer vectors 6583 // of the same time so that they get CSEd properly. 6584 ArrayRef<int> ShuffleMask = SVN->getMask(); 6585 6586 unsigned EltSize = VT.getScalarSizeInBits(); 6587 if (EltSize <= 32) { 6588 if (SVN->isSplat()) { 6589 int Lane = SVN->getSplatIndex(); 6590 // If this is undef splat, generate it via "just" vdup, if possible. 6591 if (Lane == -1) Lane = 0; 6592 6593 // Test if V1 is a SCALAR_TO_VECTOR. 6594 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 6595 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6596 } 6597 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 6598 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 6599 // reaches it). 6600 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 6601 !isa<ConstantSDNode>(V1.getOperand(0))) { 6602 bool IsScalarToVector = true; 6603 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 6604 if (!V1.getOperand(i).isUndef()) { 6605 IsScalarToVector = false; 6606 break; 6607 } 6608 if (IsScalarToVector) 6609 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 6610 } 6611 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 6612 DAG.getConstant(Lane, dl, MVT::i32)); 6613 } 6614 6615 bool ReverseVEXT; 6616 unsigned Imm; 6617 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 6618 if (ReverseVEXT) 6619 std::swap(V1, V2); 6620 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 6621 DAG.getConstant(Imm, dl, MVT::i32)); 6622 } 6623 6624 if (isVREVMask(ShuffleMask, VT, 64)) 6625 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 6626 if (isVREVMask(ShuffleMask, VT, 32)) 6627 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 6628 if (isVREVMask(ShuffleMask, VT, 16)) 6629 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 6630 6631 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 6632 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 6633 DAG.getConstant(Imm, dl, MVT::i32)); 6634 } 6635 6636 // Check for Neon shuffles that modify both input vectors in place. 6637 // If both results are used, i.e., if there are two shuffles with the same 6638 // source operands and with masks corresponding to both results of one of 6639 // these operations, DAG memoization will ensure that a single node is 6640 // used for both shuffles. 6641 unsigned WhichResult; 6642 bool isV_UNDEF; 6643 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6644 ShuffleMask, VT, WhichResult, isV_UNDEF)) { 6645 if (isV_UNDEF) 6646 V2 = V1; 6647 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2) 6648 .getValue(WhichResult); 6649 } 6650 6651 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize 6652 // shuffles that produce a result larger than their operands with: 6653 // shuffle(concat(v1, undef), concat(v2, undef)) 6654 // -> 6655 // shuffle(concat(v1, v2), undef) 6656 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine). 6657 // 6658 // This is useful in the general case, but there are special cases where 6659 // native shuffles produce larger results: the two-result ops. 6660 // 6661 // Look through the concat when lowering them: 6662 // shuffle(concat(v1, v2), undef) 6663 // -> 6664 // concat(VZIP(v1, v2):0, :1) 6665 // 6666 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) { 6667 SDValue SubV1 = V1->getOperand(0); 6668 SDValue SubV2 = V1->getOperand(1); 6669 EVT SubVT = SubV1.getValueType(); 6670 6671 // We expect these to have been canonicalized to -1. 6672 assert(all_of(ShuffleMask, [&](int i) { 6673 return i < (int)VT.getVectorNumElements(); 6674 }) && "Unexpected shuffle index into UNDEF operand!"); 6675 6676 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask( 6677 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) { 6678 if (isV_UNDEF) 6679 SubV2 = SubV1; 6680 assert((WhichResult == 0) && 6681 "In-place shuffle of concat can only have one result!"); 6682 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT), 6683 SubV1, SubV2); 6684 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0), 6685 Res.getValue(1)); 6686 } 6687 } 6688 } 6689 6690 // If the shuffle is not directly supported and it has 4 elements, use 6691 // the PerfectShuffle-generated table to synthesize it from other shuffles. 6692 unsigned NumElts = VT.getVectorNumElements(); 6693 if (NumElts == 4) { 6694 unsigned PFIndexes[4]; 6695 for (unsigned i = 0; i != 4; ++i) { 6696 if (ShuffleMask[i] < 0) 6697 PFIndexes[i] = 8; 6698 else 6699 PFIndexes[i] = ShuffleMask[i]; 6700 } 6701 6702 // Compute the index in the perfect shuffle table. 6703 unsigned PFTableIndex = 6704 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 6705 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6706 unsigned Cost = (PFEntry >> 30); 6707 6708 if (Cost <= 4) 6709 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 6710 } 6711 6712 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 6713 if (EltSize >= 32) { 6714 // Do the expansion with floating-point types, since that is what the VFP 6715 // registers are defined to use, and since i64 is not legal. 6716 EVT EltVT = EVT::getFloatingPointVT(EltSize); 6717 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 6718 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 6719 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 6720 SmallVector<SDValue, 8> Ops; 6721 for (unsigned i = 0; i < NumElts; ++i) { 6722 if (ShuffleMask[i] < 0) 6723 Ops.push_back(DAG.getUNDEF(EltVT)); 6724 else 6725 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 6726 ShuffleMask[i] < (int)NumElts ? V1 : V2, 6727 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 6728 dl, MVT::i32))); 6729 } 6730 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 6731 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 6732 } 6733 6734 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 6735 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 6736 6737 if (VT == MVT::v8i8) 6738 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG)) 6739 return NewOp; 6740 6741 return SDValue(); 6742 } 6743 6744 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6745 // INSERT_VECTOR_ELT is legal only for immediate indexes. 6746 SDValue Lane = Op.getOperand(2); 6747 if (!isa<ConstantSDNode>(Lane)) 6748 return SDValue(); 6749 6750 return Op; 6751 } 6752 6753 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 6754 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 6755 SDValue Lane = Op.getOperand(1); 6756 if (!isa<ConstantSDNode>(Lane)) 6757 return SDValue(); 6758 6759 SDValue Vec = Op.getOperand(0); 6760 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) { 6761 SDLoc dl(Op); 6762 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 6763 } 6764 6765 return Op; 6766 } 6767 6768 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 6769 // The only time a CONCAT_VECTORS operation can have legal types is when 6770 // two 64-bit vectors are concatenated to a 128-bit vector. 6771 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 6772 "unexpected CONCAT_VECTORS"); 6773 SDLoc dl(Op); 6774 SDValue Val = DAG.getUNDEF(MVT::v2f64); 6775 SDValue Op0 = Op.getOperand(0); 6776 SDValue Op1 = Op.getOperand(1); 6777 if (!Op0.isUndef()) 6778 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6779 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 6780 DAG.getIntPtrConstant(0, dl)); 6781 if (!Op1.isUndef()) 6782 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 6783 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 6784 DAG.getIntPtrConstant(1, dl)); 6785 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 6786 } 6787 6788 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 6789 /// element has been zero/sign-extended, depending on the isSigned parameter, 6790 /// from an integer type half its size. 6791 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 6792 bool isSigned) { 6793 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 6794 EVT VT = N->getValueType(0); 6795 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 6796 SDNode *BVN = N->getOperand(0).getNode(); 6797 if (BVN->getValueType(0) != MVT::v4i32 || 6798 BVN->getOpcode() != ISD::BUILD_VECTOR) 6799 return false; 6800 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6801 unsigned HiElt = 1 - LoElt; 6802 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 6803 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 6804 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 6805 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 6806 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 6807 return false; 6808 if (isSigned) { 6809 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 6810 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 6811 return true; 6812 } else { 6813 if (Hi0->isNullValue() && Hi1->isNullValue()) 6814 return true; 6815 } 6816 return false; 6817 } 6818 6819 if (N->getOpcode() != ISD::BUILD_VECTOR) 6820 return false; 6821 6822 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 6823 SDNode *Elt = N->getOperand(i).getNode(); 6824 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 6825 unsigned EltSize = VT.getScalarSizeInBits(); 6826 unsigned HalfSize = EltSize / 2; 6827 if (isSigned) { 6828 if (!isIntN(HalfSize, C->getSExtValue())) 6829 return false; 6830 } else { 6831 if (!isUIntN(HalfSize, C->getZExtValue())) 6832 return false; 6833 } 6834 continue; 6835 } 6836 return false; 6837 } 6838 6839 return true; 6840 } 6841 6842 /// isSignExtended - Check if a node is a vector value that is sign-extended 6843 /// or a constant BUILD_VECTOR with sign-extended elements. 6844 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 6845 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 6846 return true; 6847 if (isExtendedBUILD_VECTOR(N, DAG, true)) 6848 return true; 6849 return false; 6850 } 6851 6852 /// isZeroExtended - Check if a node is a vector value that is zero-extended 6853 /// or a constant BUILD_VECTOR with zero-extended elements. 6854 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 6855 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 6856 return true; 6857 if (isExtendedBUILD_VECTOR(N, DAG, false)) 6858 return true; 6859 return false; 6860 } 6861 6862 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 6863 if (OrigVT.getSizeInBits() >= 64) 6864 return OrigVT; 6865 6866 assert(OrigVT.isSimple() && "Expecting a simple value type"); 6867 6868 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 6869 switch (OrigSimpleTy) { 6870 default: llvm_unreachable("Unexpected Vector Type"); 6871 case MVT::v2i8: 6872 case MVT::v2i16: 6873 return MVT::v2i32; 6874 case MVT::v4i8: 6875 return MVT::v4i16; 6876 } 6877 } 6878 6879 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 6880 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 6881 /// We insert the required extension here to get the vector to fill a D register. 6882 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 6883 const EVT &OrigTy, 6884 const EVT &ExtTy, 6885 unsigned ExtOpcode) { 6886 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 6887 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 6888 // 64-bits we need to insert a new extension so that it will be 64-bits. 6889 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 6890 if (OrigTy.getSizeInBits() >= 64) 6891 return N; 6892 6893 // Must extend size to at least 64 bits to be used as an operand for VMULL. 6894 EVT NewVT = getExtensionTo64Bits(OrigTy); 6895 6896 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 6897 } 6898 6899 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 6900 /// does not do any sign/zero extension. If the original vector is less 6901 /// than 64 bits, an appropriate extension will be added after the load to 6902 /// reach a total size of 64 bits. We have to add the extension separately 6903 /// because ARM does not have a sign/zero extending load for vectors. 6904 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 6905 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 6906 6907 // The load already has the right type. 6908 if (ExtendedTy == LD->getMemoryVT()) 6909 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 6910 LD->getBasePtr(), LD->getPointerInfo(), 6911 LD->getAlignment(), LD->getMemOperand()->getFlags()); 6912 6913 // We need to create a zextload/sextload. We cannot just create a load 6914 // followed by a zext/zext node because LowerMUL is also run during normal 6915 // operation legalization where we can't create illegal types. 6916 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 6917 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 6918 LD->getMemoryVT(), LD->getAlignment(), 6919 LD->getMemOperand()->getFlags()); 6920 } 6921 6922 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 6923 /// extending load, or BUILD_VECTOR with extended elements, return the 6924 /// unextended value. The unextended vector should be 64 bits so that it can 6925 /// be used as an operand to a VMULL instruction. If the original vector size 6926 /// before extension is less than 64 bits we add a an extension to resize 6927 /// the vector to 64 bits. 6928 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 6929 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 6930 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 6931 N->getOperand(0)->getValueType(0), 6932 N->getValueType(0), 6933 N->getOpcode()); 6934 6935 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 6936 return SkipLoadExtensionForVMULL(LD, DAG); 6937 6938 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 6939 // have been legalized as a BITCAST from v4i32. 6940 if (N->getOpcode() == ISD::BITCAST) { 6941 SDNode *BVN = N->getOperand(0).getNode(); 6942 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 6943 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 6944 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; 6945 return DAG.getBuildVector( 6946 MVT::v2i32, SDLoc(N), 6947 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)}); 6948 } 6949 // Construct a new BUILD_VECTOR with elements truncated to half the size. 6950 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 6951 EVT VT = N->getValueType(0); 6952 unsigned EltSize = VT.getScalarSizeInBits() / 2; 6953 unsigned NumElts = VT.getVectorNumElements(); 6954 MVT TruncVT = MVT::getIntegerVT(EltSize); 6955 SmallVector<SDValue, 8> Ops; 6956 SDLoc dl(N); 6957 for (unsigned i = 0; i != NumElts; ++i) { 6958 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 6959 const APInt &CInt = C->getAPIntValue(); 6960 // Element types smaller than 32 bits are not legal, so use i32 elements. 6961 // The values are implicitly truncated so sext vs. zext doesn't matter. 6962 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 6963 } 6964 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 6965 } 6966 6967 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 6968 unsigned Opcode = N->getOpcode(); 6969 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6970 SDNode *N0 = N->getOperand(0).getNode(); 6971 SDNode *N1 = N->getOperand(1).getNode(); 6972 return N0->hasOneUse() && N1->hasOneUse() && 6973 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 6974 } 6975 return false; 6976 } 6977 6978 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 6979 unsigned Opcode = N->getOpcode(); 6980 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 6981 SDNode *N0 = N->getOperand(0).getNode(); 6982 SDNode *N1 = N->getOperand(1).getNode(); 6983 return N0->hasOneUse() && N1->hasOneUse() && 6984 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 6985 } 6986 return false; 6987 } 6988 6989 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 6990 // Multiplications are only custom-lowered for 128-bit vectors so that 6991 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 6992 EVT VT = Op.getValueType(); 6993 assert(VT.is128BitVector() && VT.isInteger() && 6994 "unexpected type for custom-lowering ISD::MUL"); 6995 SDNode *N0 = Op.getOperand(0).getNode(); 6996 SDNode *N1 = Op.getOperand(1).getNode(); 6997 unsigned NewOpc = 0; 6998 bool isMLA = false; 6999 bool isN0SExt = isSignExtended(N0, DAG); 7000 bool isN1SExt = isSignExtended(N1, DAG); 7001 if (isN0SExt && isN1SExt) 7002 NewOpc = ARMISD::VMULLs; 7003 else { 7004 bool isN0ZExt = isZeroExtended(N0, DAG); 7005 bool isN1ZExt = isZeroExtended(N1, DAG); 7006 if (isN0ZExt && isN1ZExt) 7007 NewOpc = ARMISD::VMULLu; 7008 else if (isN1SExt || isN1ZExt) { 7009 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 7010 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 7011 if (isN1SExt && isAddSubSExt(N0, DAG)) { 7012 NewOpc = ARMISD::VMULLs; 7013 isMLA = true; 7014 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 7015 NewOpc = ARMISD::VMULLu; 7016 isMLA = true; 7017 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 7018 std::swap(N0, N1); 7019 NewOpc = ARMISD::VMULLu; 7020 isMLA = true; 7021 } 7022 } 7023 7024 if (!NewOpc) { 7025 if (VT == MVT::v2i64) 7026 // Fall through to expand this. It is not legal. 7027 return SDValue(); 7028 else 7029 // Other vector multiplications are legal. 7030 return Op; 7031 } 7032 } 7033 7034 // Legalize to a VMULL instruction. 7035 SDLoc DL(Op); 7036 SDValue Op0; 7037 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 7038 if (!isMLA) { 7039 Op0 = SkipExtensionForVMULL(N0, DAG); 7040 assert(Op0.getValueType().is64BitVector() && 7041 Op1.getValueType().is64BitVector() && 7042 "unexpected types for extended operands to VMULL"); 7043 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 7044 } 7045 7046 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 7047 // isel lowering to take advantage of no-stall back to back vmul + vmla. 7048 // vmull q0, d4, d6 7049 // vmlal q0, d5, d6 7050 // is faster than 7051 // vaddl q0, d4, d5 7052 // vmovl q1, d6 7053 // vmul q0, q0, q1 7054 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 7055 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 7056 EVT Op1VT = Op1.getValueType(); 7057 return DAG.getNode(N0->getOpcode(), DL, VT, 7058 DAG.getNode(NewOpc, DL, VT, 7059 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 7060 DAG.getNode(NewOpc, DL, VT, 7061 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 7062 } 7063 7064 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, 7065 SelectionDAG &DAG) { 7066 // TODO: Should this propagate fast-math-flags? 7067 7068 // Convert to float 7069 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 7070 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 7071 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 7072 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 7073 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 7074 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 7075 // Get reciprocal estimate. 7076 // float4 recip = vrecpeq_f32(yf); 7077 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7078 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7079 Y); 7080 // Because char has a smaller range than uchar, we can actually get away 7081 // without any newton steps. This requires that we use a weird bias 7082 // of 0xb000, however (again, this has been exhaustively tested). 7083 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 7084 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 7085 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 7086 Y = DAG.getConstant(0xb000, dl, MVT::v4i32); 7087 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 7088 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 7089 // Convert back to short. 7090 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 7091 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 7092 return X; 7093 } 7094 7095 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, 7096 SelectionDAG &DAG) { 7097 // TODO: Should this propagate fast-math-flags? 7098 7099 SDValue N2; 7100 // Convert to float. 7101 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 7102 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 7103 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 7104 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 7105 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 7106 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 7107 7108 // Use reciprocal estimate and one refinement step. 7109 // float4 recip = vrecpeq_f32(yf); 7110 // recip *= vrecpsq_f32(yf, recip); 7111 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7112 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7113 N1); 7114 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7115 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7116 N1, N2); 7117 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7118 // Because short has a smaller range than ushort, we can actually get away 7119 // with only a single newton step. This requires that we use a weird bias 7120 // of 89, however (again, this has been exhaustively tested). 7121 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 7122 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 7123 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 7124 N1 = DAG.getConstant(0x89, dl, MVT::v4i32); 7125 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 7126 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 7127 // Convert back to integer and return. 7128 // return vmovn_s32(vcvt_s32_f32(result)); 7129 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 7130 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 7131 return N0; 7132 } 7133 7134 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 7135 EVT VT = Op.getValueType(); 7136 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 7137 "unexpected type for custom-lowering ISD::SDIV"); 7138 7139 SDLoc dl(Op); 7140 SDValue N0 = Op.getOperand(0); 7141 SDValue N1 = Op.getOperand(1); 7142 SDValue N2, N3; 7143 7144 if (VT == MVT::v8i8) { 7145 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 7146 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 7147 7148 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7149 DAG.getIntPtrConstant(4, dl)); 7150 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7151 DAG.getIntPtrConstant(4, dl)); 7152 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7153 DAG.getIntPtrConstant(0, dl)); 7154 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7155 DAG.getIntPtrConstant(0, dl)); 7156 7157 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 7158 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 7159 7160 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 7161 N0 = LowerCONCAT_VECTORS(N0, DAG); 7162 7163 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 7164 return N0; 7165 } 7166 return LowerSDIV_v4i16(N0, N1, dl, DAG); 7167 } 7168 7169 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 7170 // TODO: Should this propagate fast-math-flags? 7171 EVT VT = Op.getValueType(); 7172 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 7173 "unexpected type for custom-lowering ISD::UDIV"); 7174 7175 SDLoc dl(Op); 7176 SDValue N0 = Op.getOperand(0); 7177 SDValue N1 = Op.getOperand(1); 7178 SDValue N2, N3; 7179 7180 if (VT == MVT::v8i8) { 7181 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 7182 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 7183 7184 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7185 DAG.getIntPtrConstant(4, dl)); 7186 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7187 DAG.getIntPtrConstant(4, dl)); 7188 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 7189 DAG.getIntPtrConstant(0, dl)); 7190 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 7191 DAG.getIntPtrConstant(0, dl)); 7192 7193 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 7194 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 7195 7196 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 7197 N0 = LowerCONCAT_VECTORS(N0, DAG); 7198 7199 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 7200 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl, 7201 MVT::i32), 7202 N0); 7203 return N0; 7204 } 7205 7206 // v4i16 sdiv ... Convert to float. 7207 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 7208 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 7209 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 7210 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 7211 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 7212 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 7213 7214 // Use reciprocal estimate and two refinement steps. 7215 // float4 recip = vrecpeq_f32(yf); 7216 // recip *= vrecpsq_f32(yf, recip); 7217 // recip *= vrecpsq_f32(yf, recip); 7218 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7219 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32), 7220 BN1); 7221 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7222 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7223 BN1, N2); 7224 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7225 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 7226 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32), 7227 BN1, N2); 7228 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 7229 // Simply multiplying by the reciprocal estimate can leave us a few ulps 7230 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 7231 // and that it will never cause us to return an answer too large). 7232 // float4 result = as_float4(as_int4(xf*recip) + 2); 7233 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 7234 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 7235 N1 = DAG.getConstant(2, dl, MVT::v4i32); 7236 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 7237 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 7238 // Convert back to integer and return. 7239 // return vmovn_u32(vcvt_s32_f32(result)); 7240 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 7241 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 7242 return N0; 7243 } 7244 7245 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 7246 EVT VT = Op.getNode()->getValueType(0); 7247 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 7248 7249 unsigned Opc; 7250 bool ExtraOp = false; 7251 switch (Op.getOpcode()) { 7252 default: llvm_unreachable("Invalid code"); 7253 case ISD::ADDC: Opc = ARMISD::ADDC; break; 7254 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 7255 case ISD::SUBC: Opc = ARMISD::SUBC; break; 7256 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 7257 } 7258 7259 if (!ExtraOp) 7260 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 7261 Op.getOperand(1)); 7262 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 7263 Op.getOperand(1), Op.getOperand(2)); 7264 } 7265 7266 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 7267 assert(Subtarget->isTargetDarwin()); 7268 7269 // For iOS, we want to call an alternative entry point: __sincos_stret, 7270 // return values are passed via sret. 7271 SDLoc dl(Op); 7272 SDValue Arg = Op.getOperand(0); 7273 EVT ArgVT = Arg.getValueType(); 7274 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 7275 auto PtrVT = getPointerTy(DAG.getDataLayout()); 7276 7277 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7278 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7279 7280 // Pair of floats / doubles used to pass the result. 7281 Type *RetTy = StructType::get(ArgTy, ArgTy, nullptr); 7282 auto &DL = DAG.getDataLayout(); 7283 7284 ArgListTy Args; 7285 bool ShouldUseSRet = Subtarget->isAPCS_ABI(); 7286 SDValue SRet; 7287 if (ShouldUseSRet) { 7288 // Create stack object for sret. 7289 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy); 7290 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy); 7291 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false); 7292 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL)); 7293 7294 ArgListEntry Entry; 7295 Entry.Node = SRet; 7296 Entry.Ty = RetTy->getPointerTo(); 7297 Entry.isSExt = false; 7298 Entry.isZExt = false; 7299 Entry.isSRet = true; 7300 Args.push_back(Entry); 7301 RetTy = Type::getVoidTy(*DAG.getContext()); 7302 } 7303 7304 ArgListEntry Entry; 7305 Entry.Node = Arg; 7306 Entry.Ty = ArgTy; 7307 Entry.isSExt = false; 7308 Entry.isZExt = false; 7309 Args.push_back(Entry); 7310 7311 const char *LibcallName = 7312 (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret"; 7313 RTLIB::Libcall LC = 7314 (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32; 7315 CallingConv::ID CC = getLibcallCallingConv(LC); 7316 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL)); 7317 7318 TargetLowering::CallLoweringInfo CLI(DAG); 7319 CLI.setDebugLoc(dl) 7320 .setChain(DAG.getEntryNode()) 7321 .setCallee(CC, RetTy, Callee, std::move(Args)) 7322 .setDiscardResult(ShouldUseSRet); 7323 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7324 7325 if (!ShouldUseSRet) 7326 return CallResult.first; 7327 7328 SDValue LoadSin = 7329 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo()); 7330 7331 // Address of cos field. 7332 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet, 7333 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl)); 7334 SDValue LoadCos = 7335 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo()); 7336 7337 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 7338 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 7339 LoadSin.getValue(0), LoadCos.getValue(0)); 7340 } 7341 7342 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, 7343 bool Signed, 7344 SDValue &Chain) const { 7345 EVT VT = Op.getValueType(); 7346 assert((VT == MVT::i32 || VT == MVT::i64) && 7347 "unexpected type for custom lowering DIV"); 7348 SDLoc dl(Op); 7349 7350 const auto &DL = DAG.getDataLayout(); 7351 const auto &TLI = DAG.getTargetLoweringInfo(); 7352 7353 const char *Name = nullptr; 7354 if (Signed) 7355 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64"; 7356 else 7357 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64"; 7358 7359 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL)); 7360 7361 ARMTargetLowering::ArgListTy Args; 7362 7363 for (auto AI : {1, 0}) { 7364 ArgListEntry Arg; 7365 Arg.Node = Op.getOperand(AI); 7366 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext()); 7367 Args.push_back(Arg); 7368 } 7369 7370 CallLoweringInfo CLI(DAG); 7371 CLI.setDebugLoc(dl) 7372 .setChain(Chain) 7373 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()), 7374 ES, std::move(Args)); 7375 7376 return LowerCallTo(CLI).first; 7377 } 7378 7379 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, 7380 bool Signed) const { 7381 assert(Op.getValueType() == MVT::i32 && 7382 "unexpected type for custom lowering DIV"); 7383 SDLoc dl(Op); 7384 7385 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, 7386 DAG.getEntryNode(), Op.getOperand(1)); 7387 7388 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 7389 } 7390 7391 void ARMTargetLowering::ExpandDIV_Windows( 7392 SDValue Op, SelectionDAG &DAG, bool Signed, 7393 SmallVectorImpl<SDValue> &Results) const { 7394 const auto &DL = DAG.getDataLayout(); 7395 const auto &TLI = DAG.getTargetLoweringInfo(); 7396 7397 assert(Op.getValueType() == MVT::i64 && 7398 "unexpected type for custom lowering DIV"); 7399 SDLoc dl(Op); 7400 7401 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 7402 DAG.getConstant(0, dl, MVT::i32)); 7403 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op.getOperand(1), 7404 DAG.getConstant(1, dl, MVT::i32)); 7405 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, Lo, Hi); 7406 7407 SDValue DBZCHK = 7408 DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other, DAG.getEntryNode(), Or); 7409 7410 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK); 7411 7412 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result); 7413 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result, 7414 DAG.getConstant(32, dl, TLI.getPointerTy(DL))); 7415 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper); 7416 7417 Results.push_back(Lower); 7418 Results.push_back(Upper); 7419 } 7420 7421 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 7422 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering())) 7423 // Acquire/Release load/store is not legal for targets without a dmb or 7424 // equivalent available. 7425 return SDValue(); 7426 7427 // Monotonic load/store is legal for all targets. 7428 return Op; 7429 } 7430 7431 static void ReplaceREADCYCLECOUNTER(SDNode *N, 7432 SmallVectorImpl<SDValue> &Results, 7433 SelectionDAG &DAG, 7434 const ARMSubtarget *Subtarget) { 7435 SDLoc DL(N); 7436 // Under Power Management extensions, the cycle-count is: 7437 // mrc p15, #0, <Rt>, c9, c13, #0 7438 SDValue Ops[] = { N->getOperand(0), // Chain 7439 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32), 7440 DAG.getConstant(15, DL, MVT::i32), 7441 DAG.getConstant(0, DL, MVT::i32), 7442 DAG.getConstant(9, DL, MVT::i32), 7443 DAG.getConstant(13, DL, MVT::i32), 7444 DAG.getConstant(0, DL, MVT::i32) 7445 }; 7446 7447 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 7448 DAG.getVTList(MVT::i32, MVT::Other), Ops); 7449 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32, 7450 DAG.getConstant(0, DL, MVT::i32))); 7451 Results.push_back(Cycles32.getValue(1)); 7452 } 7453 7454 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 7455 SDLoc dl(V.getNode()); 7456 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32); 7457 SDValue VHi = DAG.getAnyExtOrTrunc( 7458 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)), 7459 dl, MVT::i32); 7460 SDValue RegClass = 7461 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32); 7462 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32); 7463 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32); 7464 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 7465 return SDValue( 7466 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 7467 } 7468 7469 static void ReplaceCMP_SWAP_64Results(SDNode *N, 7470 SmallVectorImpl<SDValue> & Results, 7471 SelectionDAG &DAG) { 7472 assert(N->getValueType(0) == MVT::i64 && 7473 "AtomicCmpSwap on types less than 64 should be legal"); 7474 SDValue Ops[] = {N->getOperand(1), 7475 createGPRPairNode(DAG, N->getOperand(2)), 7476 createGPRPairNode(DAG, N->getOperand(3)), 7477 N->getOperand(0)}; 7478 SDNode *CmpSwap = DAG.getMachineNode( 7479 ARM::CMP_SWAP_64, SDLoc(N), 7480 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops); 7481 7482 MachineFunction &MF = DAG.getMachineFunction(); 7483 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); 7484 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 7485 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); 7486 7487 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32, 7488 SDValue(CmpSwap, 0))); 7489 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32, 7490 SDValue(CmpSwap, 0))); 7491 Results.push_back(SDValue(CmpSwap, 2)); 7492 } 7493 7494 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7495 switch (Op.getOpcode()) { 7496 default: llvm_unreachable("Don't know how to custom lower this!"); 7497 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG); 7498 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 7499 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 7500 case ISD::GlobalAddress: 7501 switch (Subtarget->getTargetTriple().getObjectFormat()) { 7502 default: llvm_unreachable("unknown object format"); 7503 case Triple::COFF: 7504 return LowerGlobalAddressWindows(Op, DAG); 7505 case Triple::ELF: 7506 return LowerGlobalAddressELF(Op, DAG); 7507 case Triple::MachO: 7508 return LowerGlobalAddressDarwin(Op, DAG); 7509 } 7510 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 7511 case ISD::SELECT: return LowerSELECT(Op, DAG); 7512 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 7513 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 7514 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 7515 case ISD::VASTART: return LowerVASTART(Op, DAG); 7516 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 7517 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 7518 case ISD::SINT_TO_FP: 7519 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 7520 case ISD::FP_TO_SINT: 7521 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 7522 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 7523 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 7524 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 7525 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 7526 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 7527 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); 7528 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 7529 Subtarget); 7530 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 7531 case ISD::SHL: 7532 case ISD::SRL: 7533 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 7534 case ISD::SREM: return LowerREM(Op.getNode(), DAG); 7535 case ISD::UREM: return LowerREM(Op.getNode(), DAG); 7536 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 7537 case ISD::SRL_PARTS: 7538 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 7539 case ISD::CTTZ: 7540 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 7541 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 7542 case ISD::SETCC: return LowerVSETCC(Op, DAG); 7543 case ISD::SETCCE: return LowerSETCCE(Op, DAG); 7544 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 7545 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 7546 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 7547 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 7548 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 7549 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 7550 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 7551 case ISD::MUL: return LowerMUL(Op, DAG); 7552 case ISD::SDIV: 7553 if (Subtarget->isTargetWindows()) 7554 return LowerDIV_Windows(Op, DAG, /* Signed */ true); 7555 return LowerSDIV(Op, DAG); 7556 case ISD::UDIV: 7557 if (Subtarget->isTargetWindows()) 7558 return LowerDIV_Windows(Op, DAG, /* Signed */ false); 7559 return LowerUDIV(Op, DAG); 7560 case ISD::ADDC: 7561 case ISD::ADDE: 7562 case ISD::SUBC: 7563 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 7564 case ISD::SADDO: 7565 case ISD::UADDO: 7566 case ISD::SSUBO: 7567 case ISD::USUBO: 7568 return LowerXALUO(Op, DAG); 7569 case ISD::ATOMIC_LOAD: 7570 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 7571 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 7572 case ISD::SDIVREM: 7573 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 7574 case ISD::DYNAMIC_STACKALLOC: 7575 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 7576 return LowerDYNAMIC_STACKALLOC(Op, DAG); 7577 llvm_unreachable("Don't know how to custom lower this!"); 7578 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 7579 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 7580 case ARMISD::WIN__DBZCHK: return SDValue(); 7581 } 7582 } 7583 7584 /// ReplaceNodeResults - Replace the results of node with an illegal result 7585 /// type with new values built out of custom code. 7586 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 7587 SmallVectorImpl<SDValue> &Results, 7588 SelectionDAG &DAG) const { 7589 SDValue Res; 7590 switch (N->getOpcode()) { 7591 default: 7592 llvm_unreachable("Don't know how to custom expand this!"); 7593 case ISD::READ_REGISTER: 7594 ExpandREAD_REGISTER(N, Results, DAG); 7595 break; 7596 case ISD::BITCAST: 7597 Res = ExpandBITCAST(N, DAG); 7598 break; 7599 case ISD::SRL: 7600 case ISD::SRA: 7601 Res = Expand64BitShift(N, DAG, Subtarget); 7602 break; 7603 case ISD::SREM: 7604 case ISD::UREM: 7605 Res = LowerREM(N, DAG); 7606 break; 7607 case ISD::SDIVREM: 7608 case ISD::UDIVREM: 7609 Res = LowerDivRem(SDValue(N, 0), DAG); 7610 assert(Res.getNumOperands() == 2 && "DivRem needs two values"); 7611 Results.push_back(Res.getValue(0)); 7612 Results.push_back(Res.getValue(1)); 7613 return; 7614 case ISD::READCYCLECOUNTER: 7615 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 7616 return; 7617 case ISD::UDIV: 7618 case ISD::SDIV: 7619 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows"); 7620 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV, 7621 Results); 7622 case ISD::ATOMIC_CMP_SWAP: 7623 ReplaceCMP_SWAP_64Results(N, Results, DAG); 7624 return; 7625 } 7626 if (Res.getNode()) 7627 Results.push_back(Res); 7628 } 7629 7630 //===----------------------------------------------------------------------===// 7631 // ARM Scheduler Hooks 7632 //===----------------------------------------------------------------------===// 7633 7634 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 7635 /// registers the function context. 7636 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, 7637 MachineBasicBlock *MBB, 7638 MachineBasicBlock *DispatchBB, 7639 int FI) const { 7640 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() && 7641 "ROPI/RWPI not currently supported with SjLj"); 7642 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7643 DebugLoc dl = MI.getDebugLoc(); 7644 MachineFunction *MF = MBB->getParent(); 7645 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7646 MachineConstantPool *MCP = MF->getConstantPool(); 7647 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 7648 const Function *F = MF->getFunction(); 7649 7650 bool isThumb = Subtarget->isThumb(); 7651 bool isThumb2 = Subtarget->isThumb2(); 7652 7653 unsigned PCLabelId = AFI->createPICLabelUId(); 7654 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 7655 ARMConstantPoolValue *CPV = 7656 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 7657 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 7658 7659 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass 7660 : &ARM::GPRRegClass; 7661 7662 // Grab constant pool and fixed stack memory operands. 7663 MachineMemOperand *CPMMO = 7664 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), 7665 MachineMemOperand::MOLoad, 4, 4); 7666 7667 MachineMemOperand *FIMMOSt = 7668 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), 7669 MachineMemOperand::MOStore, 4, 4); 7670 7671 // Load the address of the dispatch MBB into the jump buffer. 7672 if (isThumb2) { 7673 // Incoming value: jbuf 7674 // ldr.n r5, LCPI1_1 7675 // orr r5, r5, #1 7676 // add r5, pc 7677 // str r5, [$jbuf, #+4] ; &jbuf[1] 7678 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7679 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 7680 .addConstantPoolIndex(CPI) 7681 .addMemOperand(CPMMO)); 7682 // Set the low bit because of thumb mode. 7683 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7684 AddDefaultCC( 7685 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 7686 .addReg(NewVReg1, RegState::Kill) 7687 .addImm(0x01))); 7688 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7689 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 7690 .addReg(NewVReg2, RegState::Kill) 7691 .addImm(PCLabelId); 7692 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 7693 .addReg(NewVReg3, RegState::Kill) 7694 .addFrameIndex(FI) 7695 .addImm(36) // &jbuf[1] :: pc 7696 .addMemOperand(FIMMOSt)); 7697 } else if (isThumb) { 7698 // Incoming value: jbuf 7699 // ldr.n r1, LCPI1_4 7700 // add r1, pc 7701 // mov r2, #1 7702 // orrs r1, r2 7703 // add r2, $jbuf, #+4 ; &jbuf[1] 7704 // str r1, [r2] 7705 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7706 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 7707 .addConstantPoolIndex(CPI) 7708 .addMemOperand(CPMMO)); 7709 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7710 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 7711 .addReg(NewVReg1, RegState::Kill) 7712 .addImm(PCLabelId); 7713 // Set the low bit because of thumb mode. 7714 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7715 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 7716 .addReg(ARM::CPSR, RegState::Define) 7717 .addImm(1)); 7718 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7719 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 7720 .addReg(ARM::CPSR, RegState::Define) 7721 .addReg(NewVReg2, RegState::Kill) 7722 .addReg(NewVReg3, RegState::Kill)); 7723 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7724 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5) 7725 .addFrameIndex(FI) 7726 .addImm(36); // &jbuf[1] :: pc 7727 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 7728 .addReg(NewVReg4, RegState::Kill) 7729 .addReg(NewVReg5, RegState::Kill) 7730 .addImm(0) 7731 .addMemOperand(FIMMOSt)); 7732 } else { 7733 // Incoming value: jbuf 7734 // ldr r1, LCPI1_1 7735 // add r1, pc, r1 7736 // str r1, [$jbuf, #+4] ; &jbuf[1] 7737 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7738 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 7739 .addConstantPoolIndex(CPI) 7740 .addImm(0) 7741 .addMemOperand(CPMMO)); 7742 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7743 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 7744 .addReg(NewVReg1, RegState::Kill) 7745 .addImm(PCLabelId)); 7746 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 7747 .addReg(NewVReg2, RegState::Kill) 7748 .addFrameIndex(FI) 7749 .addImm(36) // &jbuf[1] :: pc 7750 .addMemOperand(FIMMOSt)); 7751 } 7752 } 7753 7754 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, 7755 MachineBasicBlock *MBB) const { 7756 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 7757 DebugLoc dl = MI.getDebugLoc(); 7758 MachineFunction *MF = MBB->getParent(); 7759 MachineRegisterInfo *MRI = &MF->getRegInfo(); 7760 MachineFrameInfo &MFI = MF->getFrameInfo(); 7761 int FI = MFI.getFunctionContextIndex(); 7762 7763 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass 7764 : &ARM::GPRnopcRegClass; 7765 7766 // Get a mapping of the call site numbers to all of the landing pads they're 7767 // associated with. 7768 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 7769 unsigned MaxCSNum = 0; 7770 MachineModuleInfo &MMI = MF->getMMI(); 7771 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 7772 ++BB) { 7773 if (!BB->isEHPad()) continue; 7774 7775 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 7776 // pad. 7777 for (MachineBasicBlock::iterator 7778 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 7779 if (!II->isEHLabel()) continue; 7780 7781 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 7782 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 7783 7784 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 7785 for (SmallVectorImpl<unsigned>::iterator 7786 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 7787 CSI != CSE; ++CSI) { 7788 CallSiteNumToLPad[*CSI].push_back(&*BB); 7789 MaxCSNum = std::max(MaxCSNum, *CSI); 7790 } 7791 break; 7792 } 7793 } 7794 7795 // Get an ordered list of the machine basic blocks for the jump table. 7796 std::vector<MachineBasicBlock*> LPadList; 7797 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs; 7798 LPadList.reserve(CallSiteNumToLPad.size()); 7799 for (unsigned I = 1; I <= MaxCSNum; ++I) { 7800 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 7801 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7802 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 7803 LPadList.push_back(*II); 7804 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 7805 } 7806 } 7807 7808 assert(!LPadList.empty() && 7809 "No landing pad destinations for the dispatch jump table!"); 7810 7811 // Create the jump table and associated information. 7812 MachineJumpTableInfo *JTI = 7813 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 7814 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 7815 7816 // Create the MBBs for the dispatch code. 7817 7818 // Shove the dispatch's address into the return slot in the function context. 7819 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 7820 DispatchBB->setIsEHPad(); 7821 7822 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 7823 unsigned trap_opcode; 7824 if (Subtarget->isThumb()) 7825 trap_opcode = ARM::tTRAP; 7826 else 7827 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 7828 7829 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 7830 DispatchBB->addSuccessor(TrapBB); 7831 7832 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 7833 DispatchBB->addSuccessor(DispContBB); 7834 7835 // Insert and MBBs. 7836 MF->insert(MF->end(), DispatchBB); 7837 MF->insert(MF->end(), DispContBB); 7838 MF->insert(MF->end(), TrapBB); 7839 7840 // Insert code into the entry block that creates and registers the function 7841 // context. 7842 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 7843 7844 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand( 7845 MachinePointerInfo::getFixedStack(*MF, FI), 7846 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); 7847 7848 MachineInstrBuilder MIB; 7849 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 7850 7851 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 7852 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 7853 7854 // Add a register mask with no preserved registers. This results in all 7855 // registers being marked as clobbered. 7856 MIB.addRegMask(RI.getNoPreservedMask()); 7857 7858 bool IsPositionIndependent = isPositionIndependent(); 7859 unsigned NumLPads = LPadList.size(); 7860 if (Subtarget->isThumb2()) { 7861 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7862 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 7863 .addFrameIndex(FI) 7864 .addImm(4) 7865 .addMemOperand(FIMMOLd)); 7866 7867 if (NumLPads < 256) { 7868 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 7869 .addReg(NewVReg1) 7870 .addImm(LPadList.size())); 7871 } else { 7872 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7873 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 7874 .addImm(NumLPads & 0xFFFF)); 7875 7876 unsigned VReg2 = VReg1; 7877 if ((NumLPads & 0xFFFF0000) != 0) { 7878 VReg2 = MRI->createVirtualRegister(TRC); 7879 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 7880 .addReg(VReg1) 7881 .addImm(NumLPads >> 16)); 7882 } 7883 7884 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 7885 .addReg(NewVReg1) 7886 .addReg(VReg2)); 7887 } 7888 7889 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 7890 .addMBB(TrapBB) 7891 .addImm(ARMCC::HI) 7892 .addReg(ARM::CPSR); 7893 7894 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7895 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 7896 .addJumpTableIndex(MJTI)); 7897 7898 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7899 AddDefaultCC( 7900 AddDefaultPred( 7901 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 7902 .addReg(NewVReg3, RegState::Kill) 7903 .addReg(NewVReg1) 7904 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7905 7906 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 7907 .addReg(NewVReg4, RegState::Kill) 7908 .addReg(NewVReg1) 7909 .addJumpTableIndex(MJTI); 7910 } else if (Subtarget->isThumb()) { 7911 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7912 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 7913 .addFrameIndex(FI) 7914 .addImm(1) 7915 .addMemOperand(FIMMOLd)); 7916 7917 if (NumLPads < 256) { 7918 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 7919 .addReg(NewVReg1) 7920 .addImm(NumLPads)); 7921 } else { 7922 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7923 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7924 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7925 7926 // MachineConstantPool wants an explicit alignment. 7927 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 7928 if (Align == 0) 7929 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 7930 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7931 7932 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7933 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 7934 .addReg(VReg1, RegState::Define) 7935 .addConstantPoolIndex(Idx)); 7936 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 7937 .addReg(NewVReg1) 7938 .addReg(VReg1)); 7939 } 7940 7941 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 7942 .addMBB(TrapBB) 7943 .addImm(ARMCC::HI) 7944 .addReg(ARM::CPSR); 7945 7946 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7947 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 7948 .addReg(ARM::CPSR, RegState::Define) 7949 .addReg(NewVReg1) 7950 .addImm(2)); 7951 7952 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7953 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 7954 .addJumpTableIndex(MJTI)); 7955 7956 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7957 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 7958 .addReg(ARM::CPSR, RegState::Define) 7959 .addReg(NewVReg2, RegState::Kill) 7960 .addReg(NewVReg3)); 7961 7962 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 7963 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 7964 7965 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7966 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 7967 .addReg(NewVReg4, RegState::Kill) 7968 .addImm(0) 7969 .addMemOperand(JTMMOLd)); 7970 7971 unsigned NewVReg6 = NewVReg5; 7972 if (IsPositionIndependent) { 7973 NewVReg6 = MRI->createVirtualRegister(TRC); 7974 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 7975 .addReg(ARM::CPSR, RegState::Define) 7976 .addReg(NewVReg5, RegState::Kill) 7977 .addReg(NewVReg3)); 7978 } 7979 7980 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 7981 .addReg(NewVReg6, RegState::Kill) 7982 .addJumpTableIndex(MJTI); 7983 } else { 7984 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7985 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 7986 .addFrameIndex(FI) 7987 .addImm(4) 7988 .addMemOperand(FIMMOLd)); 7989 7990 if (NumLPads < 256) { 7991 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 7992 .addReg(NewVReg1) 7993 .addImm(NumLPads)); 7994 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 7995 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7996 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 7997 .addImm(NumLPads & 0xFFFF)); 7998 7999 unsigned VReg2 = VReg1; 8000 if ((NumLPads & 0xFFFF0000) != 0) { 8001 VReg2 = MRI->createVirtualRegister(TRC); 8002 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 8003 .addReg(VReg1) 8004 .addImm(NumLPads >> 16)); 8005 } 8006 8007 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 8008 .addReg(NewVReg1) 8009 .addReg(VReg2)); 8010 } else { 8011 MachineConstantPool *ConstantPool = MF->getConstantPool(); 8012 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 8013 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 8014 8015 // MachineConstantPool wants an explicit alignment. 8016 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 8017 if (Align == 0) 8018 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 8019 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 8020 8021 unsigned VReg1 = MRI->createVirtualRegister(TRC); 8022 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 8023 .addReg(VReg1, RegState::Define) 8024 .addConstantPoolIndex(Idx) 8025 .addImm(0)); 8026 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 8027 .addReg(NewVReg1) 8028 .addReg(VReg1, RegState::Kill)); 8029 } 8030 8031 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 8032 .addMBB(TrapBB) 8033 .addImm(ARMCC::HI) 8034 .addReg(ARM::CPSR); 8035 8036 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 8037 AddDefaultCC( 8038 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 8039 .addReg(NewVReg1) 8040 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 8041 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 8042 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 8043 .addJumpTableIndex(MJTI)); 8044 8045 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand( 8046 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4); 8047 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 8048 AddDefaultPred( 8049 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 8050 .addReg(NewVReg3, RegState::Kill) 8051 .addReg(NewVReg4) 8052 .addImm(0) 8053 .addMemOperand(JTMMOLd)); 8054 8055 if (IsPositionIndependent) { 8056 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 8057 .addReg(NewVReg5, RegState::Kill) 8058 .addReg(NewVReg4) 8059 .addJumpTableIndex(MJTI); 8060 } else { 8061 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 8062 .addReg(NewVReg5, RegState::Kill) 8063 .addJumpTableIndex(MJTI); 8064 } 8065 } 8066 8067 // Add the jump table entries as successors to the MBB. 8068 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 8069 for (std::vector<MachineBasicBlock*>::iterator 8070 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 8071 MachineBasicBlock *CurMBB = *I; 8072 if (SeenMBBs.insert(CurMBB).second) 8073 DispContBB->addSuccessor(CurMBB); 8074 } 8075 8076 // N.B. the order the invoke BBs are processed in doesn't matter here. 8077 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 8078 SmallVector<MachineBasicBlock*, 64> MBBLPads; 8079 for (MachineBasicBlock *BB : InvokeBBs) { 8080 8081 // Remove the landing pad successor from the invoke block and replace it 8082 // with the new dispatch block. 8083 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 8084 BB->succ_end()); 8085 while (!Successors.empty()) { 8086 MachineBasicBlock *SMBB = Successors.pop_back_val(); 8087 if (SMBB->isEHPad()) { 8088 BB->removeSuccessor(SMBB); 8089 MBBLPads.push_back(SMBB); 8090 } 8091 } 8092 8093 BB->addSuccessor(DispatchBB, BranchProbability::getZero()); 8094 BB->normalizeSuccProbs(); 8095 8096 // Find the invoke call and mark all of the callee-saved registers as 8097 // 'implicit defined' so that they're spilled. This prevents code from 8098 // moving instructions to before the EH block, where they will never be 8099 // executed. 8100 for (MachineBasicBlock::reverse_iterator 8101 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 8102 if (!II->isCall()) continue; 8103 8104 DenseMap<unsigned, bool> DefRegs; 8105 for (MachineInstr::mop_iterator 8106 OI = II->operands_begin(), OE = II->operands_end(); 8107 OI != OE; ++OI) { 8108 if (!OI->isReg()) continue; 8109 DefRegs[OI->getReg()] = true; 8110 } 8111 8112 MachineInstrBuilder MIB(*MF, &*II); 8113 8114 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 8115 unsigned Reg = SavedRegs[i]; 8116 if (Subtarget->isThumb2() && 8117 !ARM::tGPRRegClass.contains(Reg) && 8118 !ARM::hGPRRegClass.contains(Reg)) 8119 continue; 8120 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 8121 continue; 8122 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 8123 continue; 8124 if (!DefRegs[Reg]) 8125 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 8126 } 8127 8128 break; 8129 } 8130 } 8131 8132 // Mark all former landing pads as non-landing pads. The dispatch is the only 8133 // landing pad now. 8134 for (SmallVectorImpl<MachineBasicBlock*>::iterator 8135 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 8136 (*I)->setIsEHPad(false); 8137 8138 // The instruction is gone now. 8139 MI.eraseFromParent(); 8140 } 8141 8142 static 8143 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 8144 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 8145 E = MBB->succ_end(); I != E; ++I) 8146 if (*I != Succ) 8147 return *I; 8148 llvm_unreachable("Expecting a BB with two successors!"); 8149 } 8150 8151 /// Return the load opcode for a given load size. If load size >= 8, 8152 /// neon opcode will be returned. 8153 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 8154 if (LdSize >= 8) 8155 return LdSize == 16 ? ARM::VLD1q32wb_fixed 8156 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 8157 if (IsThumb1) 8158 return LdSize == 4 ? ARM::tLDRi 8159 : LdSize == 2 ? ARM::tLDRHi 8160 : LdSize == 1 ? ARM::tLDRBi : 0; 8161 if (IsThumb2) 8162 return LdSize == 4 ? ARM::t2LDR_POST 8163 : LdSize == 2 ? ARM::t2LDRH_POST 8164 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 8165 return LdSize == 4 ? ARM::LDR_POST_IMM 8166 : LdSize == 2 ? ARM::LDRH_POST 8167 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 8168 } 8169 8170 /// Return the store opcode for a given store size. If store size >= 8, 8171 /// neon opcode will be returned. 8172 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 8173 if (StSize >= 8) 8174 return StSize == 16 ? ARM::VST1q32wb_fixed 8175 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 8176 if (IsThumb1) 8177 return StSize == 4 ? ARM::tSTRi 8178 : StSize == 2 ? ARM::tSTRHi 8179 : StSize == 1 ? ARM::tSTRBi : 0; 8180 if (IsThumb2) 8181 return StSize == 4 ? ARM::t2STR_POST 8182 : StSize == 2 ? ARM::t2STRH_POST 8183 : StSize == 1 ? ARM::t2STRB_POST : 0; 8184 return StSize == 4 ? ARM::STR_POST_IMM 8185 : StSize == 2 ? ARM::STRH_POST 8186 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 8187 } 8188 8189 /// Emit a post-increment load operation with given size. The instructions 8190 /// will be added to BB at Pos. 8191 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 8192 const TargetInstrInfo *TII, const DebugLoc &dl, 8193 unsigned LdSize, unsigned Data, unsigned AddrIn, 8194 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 8195 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 8196 assert(LdOpc != 0 && "Should have a load opcode"); 8197 if (LdSize >= 8) { 8198 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8199 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 8200 .addImm(0)); 8201 } else if (IsThumb1) { 8202 // load + update AddrIn 8203 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8204 .addReg(AddrIn).addImm(0)); 8205 MachineInstrBuilder MIB = 8206 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 8207 MIB = AddDefaultT1CC(MIB); 8208 MIB.addReg(AddrIn).addImm(LdSize); 8209 AddDefaultPred(MIB); 8210 } else if (IsThumb2) { 8211 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8212 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 8213 .addImm(LdSize)); 8214 } else { // arm 8215 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 8216 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 8217 .addReg(0).addImm(LdSize)); 8218 } 8219 } 8220 8221 /// Emit a post-increment store operation with given size. The instructions 8222 /// will be added to BB at Pos. 8223 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, 8224 const TargetInstrInfo *TII, const DebugLoc &dl, 8225 unsigned StSize, unsigned Data, unsigned AddrIn, 8226 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 8227 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 8228 assert(StOpc != 0 && "Should have a store opcode"); 8229 if (StSize >= 8) { 8230 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8231 .addReg(AddrIn).addImm(0).addReg(Data)); 8232 } else if (IsThumb1) { 8233 // store + update AddrIn 8234 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data) 8235 .addReg(AddrIn).addImm(0)); 8236 MachineInstrBuilder MIB = 8237 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 8238 MIB = AddDefaultT1CC(MIB); 8239 MIB.addReg(AddrIn).addImm(StSize); 8240 AddDefaultPred(MIB); 8241 } else if (IsThumb2) { 8242 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8243 .addReg(Data).addReg(AddrIn).addImm(StSize)); 8244 } else { // arm 8245 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 8246 .addReg(Data).addReg(AddrIn).addReg(0) 8247 .addImm(StSize)); 8248 } 8249 } 8250 8251 MachineBasicBlock * 8252 ARMTargetLowering::EmitStructByval(MachineInstr &MI, 8253 MachineBasicBlock *BB) const { 8254 // This pseudo instruction has 3 operands: dst, src, size 8255 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 8256 // Otherwise, we will generate unrolled scalar copies. 8257 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8258 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8259 MachineFunction::iterator It = ++BB->getIterator(); 8260 8261 unsigned dest = MI.getOperand(0).getReg(); 8262 unsigned src = MI.getOperand(1).getReg(); 8263 unsigned SizeVal = MI.getOperand(2).getImm(); 8264 unsigned Align = MI.getOperand(3).getImm(); 8265 DebugLoc dl = MI.getDebugLoc(); 8266 8267 MachineFunction *MF = BB->getParent(); 8268 MachineRegisterInfo &MRI = MF->getRegInfo(); 8269 unsigned UnitSize = 0; 8270 const TargetRegisterClass *TRC = nullptr; 8271 const TargetRegisterClass *VecTRC = nullptr; 8272 8273 bool IsThumb1 = Subtarget->isThumb1Only(); 8274 bool IsThumb2 = Subtarget->isThumb2(); 8275 bool IsThumb = Subtarget->isThumb(); 8276 8277 if (Align & 1) { 8278 UnitSize = 1; 8279 } else if (Align & 2) { 8280 UnitSize = 2; 8281 } else { 8282 // Check whether we can use NEON instructions. 8283 if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) && 8284 Subtarget->hasNEON()) { 8285 if ((Align % 16 == 0) && SizeVal >= 16) 8286 UnitSize = 16; 8287 else if ((Align % 8 == 0) && SizeVal >= 8) 8288 UnitSize = 8; 8289 } 8290 // Can't use NEON instructions. 8291 if (UnitSize == 0) 8292 UnitSize = 4; 8293 } 8294 8295 // Select the correct opcode and register class for unit size load/store 8296 bool IsNeon = UnitSize >= 8; 8297 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass; 8298 if (IsNeon) 8299 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass 8300 : UnitSize == 8 ? &ARM::DPRRegClass 8301 : nullptr; 8302 8303 unsigned BytesLeft = SizeVal % UnitSize; 8304 unsigned LoopSize = SizeVal - BytesLeft; 8305 8306 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 8307 // Use LDR and STR to copy. 8308 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 8309 // [destOut] = STR_POST(scratch, destIn, UnitSize) 8310 unsigned srcIn = src; 8311 unsigned destIn = dest; 8312 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 8313 unsigned srcOut = MRI.createVirtualRegister(TRC); 8314 unsigned destOut = MRI.createVirtualRegister(TRC); 8315 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 8316 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 8317 IsThumb1, IsThumb2); 8318 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 8319 IsThumb1, IsThumb2); 8320 srcIn = srcOut; 8321 destIn = destOut; 8322 } 8323 8324 // Handle the leftover bytes with LDRB and STRB. 8325 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 8326 // [destOut] = STRB_POST(scratch, destIn, 1) 8327 for (unsigned i = 0; i < BytesLeft; i++) { 8328 unsigned srcOut = MRI.createVirtualRegister(TRC); 8329 unsigned destOut = MRI.createVirtualRegister(TRC); 8330 unsigned scratch = MRI.createVirtualRegister(TRC); 8331 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 8332 IsThumb1, IsThumb2); 8333 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 8334 IsThumb1, IsThumb2); 8335 srcIn = srcOut; 8336 destIn = destOut; 8337 } 8338 MI.eraseFromParent(); // The instruction is gone now. 8339 return BB; 8340 } 8341 8342 // Expand the pseudo op to a loop. 8343 // thisMBB: 8344 // ... 8345 // movw varEnd, # --> with thumb2 8346 // movt varEnd, # 8347 // ldrcp varEnd, idx --> without thumb2 8348 // fallthrough --> loopMBB 8349 // loopMBB: 8350 // PHI varPhi, varEnd, varLoop 8351 // PHI srcPhi, src, srcLoop 8352 // PHI destPhi, dst, destLoop 8353 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 8354 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 8355 // subs varLoop, varPhi, #UnitSize 8356 // bne loopMBB 8357 // fallthrough --> exitMBB 8358 // exitMBB: 8359 // epilogue to handle left-over bytes 8360 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 8361 // [destOut] = STRB_POST(scratch, destLoop, 1) 8362 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 8363 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 8364 MF->insert(It, loopMBB); 8365 MF->insert(It, exitMBB); 8366 8367 // Transfer the remainder of BB and its successor edges to exitMBB. 8368 exitMBB->splice(exitMBB->begin(), BB, 8369 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8370 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 8371 8372 // Load an immediate to varEnd. 8373 unsigned varEnd = MRI.createVirtualRegister(TRC); 8374 if (Subtarget->useMovt(*MF)) { 8375 unsigned Vtmp = varEnd; 8376 if ((LoopSize & 0xFFFF0000) != 0) 8377 Vtmp = MRI.createVirtualRegister(TRC); 8378 AddDefaultPred(BuildMI(BB, dl, 8379 TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), 8380 Vtmp).addImm(LoopSize & 0xFFFF)); 8381 8382 if ((LoopSize & 0xFFFF0000) != 0) 8383 AddDefaultPred(BuildMI(BB, dl, 8384 TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), 8385 varEnd) 8386 .addReg(Vtmp) 8387 .addImm(LoopSize >> 16)); 8388 } else { 8389 MachineConstantPool *ConstantPool = MF->getConstantPool(); 8390 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 8391 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 8392 8393 // MachineConstantPool wants an explicit alignment. 8394 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); 8395 if (Align == 0) 8396 Align = MF->getDataLayout().getTypeAllocSize(C->getType()); 8397 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 8398 8399 if (IsThumb) 8400 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg( 8401 varEnd, RegState::Define).addConstantPoolIndex(Idx)); 8402 else 8403 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg( 8404 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0)); 8405 } 8406 BB->addSuccessor(loopMBB); 8407 8408 // Generate the loop body: 8409 // varPhi = PHI(varLoop, varEnd) 8410 // srcPhi = PHI(srcLoop, src) 8411 // destPhi = PHI(destLoop, dst) 8412 MachineBasicBlock *entryBB = BB; 8413 BB = loopMBB; 8414 unsigned varLoop = MRI.createVirtualRegister(TRC); 8415 unsigned varPhi = MRI.createVirtualRegister(TRC); 8416 unsigned srcLoop = MRI.createVirtualRegister(TRC); 8417 unsigned srcPhi = MRI.createVirtualRegister(TRC); 8418 unsigned destLoop = MRI.createVirtualRegister(TRC); 8419 unsigned destPhi = MRI.createVirtualRegister(TRC); 8420 8421 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 8422 .addReg(varLoop).addMBB(loopMBB) 8423 .addReg(varEnd).addMBB(entryBB); 8424 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 8425 .addReg(srcLoop).addMBB(loopMBB) 8426 .addReg(src).addMBB(entryBB); 8427 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 8428 .addReg(destLoop).addMBB(loopMBB) 8429 .addReg(dest).addMBB(entryBB); 8430 8431 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 8432 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 8433 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 8434 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 8435 IsThumb1, IsThumb2); 8436 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 8437 IsThumb1, IsThumb2); 8438 8439 // Decrement loop variable by UnitSize. 8440 if (IsThumb1) { 8441 MachineInstrBuilder MIB = 8442 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop); 8443 MIB = AddDefaultT1CC(MIB); 8444 MIB.addReg(varPhi).addImm(UnitSize); 8445 AddDefaultPred(MIB); 8446 } else { 8447 MachineInstrBuilder MIB = 8448 BuildMI(*BB, BB->end(), dl, 8449 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 8450 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 8451 MIB->getOperand(5).setReg(ARM::CPSR); 8452 MIB->getOperand(5).setIsDef(true); 8453 } 8454 BuildMI(*BB, BB->end(), dl, 8455 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 8456 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 8457 8458 // loopMBB can loop back to loopMBB or fall through to exitMBB. 8459 BB->addSuccessor(loopMBB); 8460 BB->addSuccessor(exitMBB); 8461 8462 // Add epilogue to handle BytesLeft. 8463 BB = exitMBB; 8464 auto StartOfExit = exitMBB->begin(); 8465 8466 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 8467 // [destOut] = STRB_POST(scratch, destLoop, 1) 8468 unsigned srcIn = srcLoop; 8469 unsigned destIn = destLoop; 8470 for (unsigned i = 0; i < BytesLeft; i++) { 8471 unsigned srcOut = MRI.createVirtualRegister(TRC); 8472 unsigned destOut = MRI.createVirtualRegister(TRC); 8473 unsigned scratch = MRI.createVirtualRegister(TRC); 8474 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 8475 IsThumb1, IsThumb2); 8476 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 8477 IsThumb1, IsThumb2); 8478 srcIn = srcOut; 8479 destIn = destOut; 8480 } 8481 8482 MI.eraseFromParent(); // The instruction is gone now. 8483 return BB; 8484 } 8485 8486 MachineBasicBlock * 8487 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI, 8488 MachineBasicBlock *MBB) const { 8489 const TargetMachine &TM = getTargetMachine(); 8490 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 8491 DebugLoc DL = MI.getDebugLoc(); 8492 8493 assert(Subtarget->isTargetWindows() && 8494 "__chkstk is only supported on Windows"); 8495 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 8496 8497 // __chkstk takes the number of words to allocate on the stack in R4, and 8498 // returns the stack adjustment in number of bytes in R4. This will not 8499 // clober any other registers (other than the obvious lr). 8500 // 8501 // Although, technically, IP should be considered a register which may be 8502 // clobbered, the call itself will not touch it. Windows on ARM is a pure 8503 // thumb-2 environment, so there is no interworking required. As a result, we 8504 // do not expect a veneer to be emitted by the linker, clobbering IP. 8505 // 8506 // Each module receives its own copy of __chkstk, so no import thunk is 8507 // required, again, ensuring that IP is not clobbered. 8508 // 8509 // Finally, although some linkers may theoretically provide a trampoline for 8510 // out of range calls (which is quite common due to a 32M range limitation of 8511 // branches for Thumb), we can generate the long-call version via 8512 // -mcmodel=large, alleviating the need for the trampoline which may clobber 8513 // IP. 8514 8515 switch (TM.getCodeModel()) { 8516 case CodeModel::Small: 8517 case CodeModel::Medium: 8518 case CodeModel::Default: 8519 case CodeModel::Kernel: 8520 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 8521 .addImm((unsigned)ARMCC::AL).addReg(0) 8522 .addExternalSymbol("__chkstk") 8523 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8524 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8525 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8526 break; 8527 case CodeModel::Large: 8528 case CodeModel::JITDefault: { 8529 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 8530 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 8531 8532 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 8533 .addExternalSymbol("__chkstk"); 8534 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 8535 .addImm((unsigned)ARMCC::AL).addReg(0) 8536 .addReg(Reg, RegState::Kill) 8537 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 8538 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 8539 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 8540 break; 8541 } 8542 } 8543 8544 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), 8545 ARM::SP) 8546 .addReg(ARM::SP, RegState::Kill) 8547 .addReg(ARM::R4, RegState::Kill) 8548 .setMIFlags(MachineInstr::FrameSetup))); 8549 8550 MI.eraseFromParent(); 8551 return MBB; 8552 } 8553 8554 MachineBasicBlock * 8555 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI, 8556 MachineBasicBlock *MBB) const { 8557 DebugLoc DL = MI.getDebugLoc(); 8558 MachineFunction *MF = MBB->getParent(); 8559 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8560 8561 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock(); 8562 MF->insert(++MBB->getIterator(), ContBB); 8563 ContBB->splice(ContBB->begin(), MBB, 8564 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 8565 ContBB->transferSuccessorsAndUpdatePHIs(MBB); 8566 8567 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 8568 MF->push_back(TrapBB); 8569 BuildMI(TrapBB, DL, TII->get(ARM::t2UDF)).addImm(249); 8570 MBB->addSuccessor(TrapBB); 8571 8572 BuildMI(*MBB, MI, DL, TII->get(ARM::tCBZ)) 8573 .addReg(MI.getOperand(0).getReg()) 8574 .addMBB(TrapBB); 8575 AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::t2B)).addMBB(ContBB)); 8576 MBB->addSuccessor(ContBB); 8577 8578 MI.eraseFromParent(); 8579 return ContBB; 8580 } 8581 8582 MachineBasicBlock * 8583 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 8584 MachineBasicBlock *BB) const { 8585 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 8586 DebugLoc dl = MI.getDebugLoc(); 8587 bool isThumb2 = Subtarget->isThumb2(); 8588 switch (MI.getOpcode()) { 8589 default: { 8590 MI.dump(); 8591 llvm_unreachable("Unexpected instr type to insert"); 8592 } 8593 8594 // Thumb1 post-indexed loads are really just single-register LDMs. 8595 case ARM::tLDR_postidx: { 8596 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) 8597 .addOperand(MI.getOperand(1)) // Rn_wb 8598 .addOperand(MI.getOperand(2)) // Rn 8599 .addOperand(MI.getOperand(3)) // PredImm 8600 .addOperand(MI.getOperand(4)) // PredReg 8601 .addOperand(MI.getOperand(0)); // Rt 8602 MI.eraseFromParent(); 8603 return BB; 8604 } 8605 8606 // The Thumb2 pre-indexed stores have the same MI operands, they just 8607 // define them differently in the .td files from the isel patterns, so 8608 // they need pseudos. 8609 case ARM::t2STR_preidx: 8610 MI.setDesc(TII->get(ARM::t2STR_PRE)); 8611 return BB; 8612 case ARM::t2STRB_preidx: 8613 MI.setDesc(TII->get(ARM::t2STRB_PRE)); 8614 return BB; 8615 case ARM::t2STRH_preidx: 8616 MI.setDesc(TII->get(ARM::t2STRH_PRE)); 8617 return BB; 8618 8619 case ARM::STRi_preidx: 8620 case ARM::STRBi_preidx: { 8621 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM 8622 : ARM::STRB_PRE_IMM; 8623 // Decode the offset. 8624 unsigned Offset = MI.getOperand(4).getImm(); 8625 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 8626 Offset = ARM_AM::getAM2Offset(Offset); 8627 if (isSub) 8628 Offset = -Offset; 8629 8630 MachineMemOperand *MMO = *MI.memoperands_begin(); 8631 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 8632 .addOperand(MI.getOperand(0)) // Rn_wb 8633 .addOperand(MI.getOperand(1)) // Rt 8634 .addOperand(MI.getOperand(2)) // Rn 8635 .addImm(Offset) // offset (skip GPR==zero_reg) 8636 .addOperand(MI.getOperand(5)) // pred 8637 .addOperand(MI.getOperand(6)) 8638 .addMemOperand(MMO); 8639 MI.eraseFromParent(); 8640 return BB; 8641 } 8642 case ARM::STRr_preidx: 8643 case ARM::STRBr_preidx: 8644 case ARM::STRH_preidx: { 8645 unsigned NewOpc; 8646 switch (MI.getOpcode()) { 8647 default: llvm_unreachable("unexpected opcode!"); 8648 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 8649 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 8650 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 8651 } 8652 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 8653 for (unsigned i = 0; i < MI.getNumOperands(); ++i) 8654 MIB.addOperand(MI.getOperand(i)); 8655 MI.eraseFromParent(); 8656 return BB; 8657 } 8658 8659 case ARM::tMOVCCr_pseudo: { 8660 // To "insert" a SELECT_CC instruction, we actually have to insert the 8661 // diamond control-flow pattern. The incoming instruction knows the 8662 // destination vreg to set, the condition code register to branch on, the 8663 // true/false values to select between, and a branch opcode to use. 8664 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8665 MachineFunction::iterator It = ++BB->getIterator(); 8666 8667 // thisMBB: 8668 // ... 8669 // TrueVal = ... 8670 // cmpTY ccX, r1, r2 8671 // bCC copy1MBB 8672 // fallthrough --> copy0MBB 8673 MachineBasicBlock *thisMBB = BB; 8674 MachineFunction *F = BB->getParent(); 8675 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 8676 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 8677 F->insert(It, copy0MBB); 8678 F->insert(It, sinkMBB); 8679 8680 // Transfer the remainder of BB and its successor edges to sinkMBB. 8681 sinkMBB->splice(sinkMBB->begin(), BB, 8682 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8683 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 8684 8685 BB->addSuccessor(copy0MBB); 8686 BB->addSuccessor(sinkMBB); 8687 8688 BuildMI(BB, dl, TII->get(ARM::tBcc)) 8689 .addMBB(sinkMBB) 8690 .addImm(MI.getOperand(3).getImm()) 8691 .addReg(MI.getOperand(4).getReg()); 8692 8693 // copy0MBB: 8694 // %FalseValue = ... 8695 // # fallthrough to sinkMBB 8696 BB = copy0MBB; 8697 8698 // Update machine-CFG edges 8699 BB->addSuccessor(sinkMBB); 8700 8701 // sinkMBB: 8702 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 8703 // ... 8704 BB = sinkMBB; 8705 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg()) 8706 .addReg(MI.getOperand(1).getReg()) 8707 .addMBB(copy0MBB) 8708 .addReg(MI.getOperand(2).getReg()) 8709 .addMBB(thisMBB); 8710 8711 MI.eraseFromParent(); // The pseudo instruction is gone now. 8712 return BB; 8713 } 8714 8715 case ARM::BCCi64: 8716 case ARM::BCCZi64: { 8717 // If there is an unconditional branch to the other successor, remove it. 8718 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8719 8720 // Compare both parts that make up the double comparison separately for 8721 // equality. 8722 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64; 8723 8724 unsigned LHS1 = MI.getOperand(1).getReg(); 8725 unsigned LHS2 = MI.getOperand(2).getReg(); 8726 if (RHSisZero) { 8727 AddDefaultPred(BuildMI(BB, dl, 8728 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8729 .addReg(LHS1).addImm(0)); 8730 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8731 .addReg(LHS2).addImm(0) 8732 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8733 } else { 8734 unsigned RHS1 = MI.getOperand(3).getReg(); 8735 unsigned RHS2 = MI.getOperand(4).getReg(); 8736 AddDefaultPred(BuildMI(BB, dl, 8737 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8738 .addReg(LHS1).addReg(RHS1)); 8739 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 8740 .addReg(LHS2).addReg(RHS2) 8741 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 8742 } 8743 8744 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB(); 8745 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 8746 if (MI.getOperand(0).getImm() == ARMCC::NE) 8747 std::swap(destMBB, exitMBB); 8748 8749 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 8750 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 8751 if (isThumb2) 8752 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 8753 else 8754 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 8755 8756 MI.eraseFromParent(); // The pseudo instruction is gone now. 8757 return BB; 8758 } 8759 8760 case ARM::Int_eh_sjlj_setjmp: 8761 case ARM::Int_eh_sjlj_setjmp_nofp: 8762 case ARM::tInt_eh_sjlj_setjmp: 8763 case ARM::t2Int_eh_sjlj_setjmp: 8764 case ARM::t2Int_eh_sjlj_setjmp_nofp: 8765 return BB; 8766 8767 case ARM::Int_eh_sjlj_setup_dispatch: 8768 EmitSjLjDispatchBlock(MI, BB); 8769 return BB; 8770 8771 case ARM::ABS: 8772 case ARM::t2ABS: { 8773 // To insert an ABS instruction, we have to insert the 8774 // diamond control-flow pattern. The incoming instruction knows the 8775 // source vreg to test against 0, the destination vreg to set, 8776 // the condition code register to branch on, the 8777 // true/false values to select between, and a branch opcode to use. 8778 // It transforms 8779 // V1 = ABS V0 8780 // into 8781 // V2 = MOVS V0 8782 // BCC (branch to SinkBB if V0 >= 0) 8783 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 8784 // SinkBB: V1 = PHI(V2, V3) 8785 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 8786 MachineFunction::iterator BBI = ++BB->getIterator(); 8787 MachineFunction *Fn = BB->getParent(); 8788 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8789 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 8790 Fn->insert(BBI, RSBBB); 8791 Fn->insert(BBI, SinkBB); 8792 8793 unsigned int ABSSrcReg = MI.getOperand(1).getReg(); 8794 unsigned int ABSDstReg = MI.getOperand(0).getReg(); 8795 bool ABSSrcKIll = MI.getOperand(1).isKill(); 8796 bool isThumb2 = Subtarget->isThumb2(); 8797 MachineRegisterInfo &MRI = Fn->getRegInfo(); 8798 // In Thumb mode S must not be specified if source register is the SP or 8799 // PC and if destination register is the SP, so restrict register class 8800 unsigned NewRsbDstReg = 8801 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass); 8802 8803 // Transfer the remainder of BB and its successor edges to sinkMBB. 8804 SinkBB->splice(SinkBB->begin(), BB, 8805 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 8806 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 8807 8808 BB->addSuccessor(RSBBB); 8809 BB->addSuccessor(SinkBB); 8810 8811 // fall through to SinkMBB 8812 RSBBB->addSuccessor(SinkBB); 8813 8814 // insert a cmp at the end of BB 8815 AddDefaultPred(BuildMI(BB, dl, 8816 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 8817 .addReg(ABSSrcReg).addImm(0)); 8818 8819 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 8820 BuildMI(BB, dl, 8821 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 8822 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 8823 8824 // insert rsbri in RSBBB 8825 // Note: BCC and rsbri will be converted into predicated rsbmi 8826 // by if-conversion pass 8827 BuildMI(*RSBBB, RSBBB->begin(), dl, 8828 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 8829 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0) 8830 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 8831 8832 // insert PHI in SinkBB, 8833 // reuse ABSDstReg to not change uses of ABS instruction 8834 BuildMI(*SinkBB, SinkBB->begin(), dl, 8835 TII->get(ARM::PHI), ABSDstReg) 8836 .addReg(NewRsbDstReg).addMBB(RSBBB) 8837 .addReg(ABSSrcReg).addMBB(BB); 8838 8839 // remove ABS instruction 8840 MI.eraseFromParent(); 8841 8842 // return last added BB 8843 return SinkBB; 8844 } 8845 case ARM::COPY_STRUCT_BYVAL_I32: 8846 ++NumLoopByVals; 8847 return EmitStructByval(MI, BB); 8848 case ARM::WIN__CHKSTK: 8849 return EmitLowered__chkstk(MI, BB); 8850 case ARM::WIN__DBZCHK: 8851 return EmitLowered__dbzchk(MI, BB); 8852 } 8853 } 8854 8855 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers 8856 /// when it is expanded into LDM/STM. This is done as a post-isel lowering 8857 /// instead of as a custom inserter because we need the use list from the SDNode. 8858 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, 8859 MachineInstr &MI, const SDNode *Node) { 8860 bool isThumb1 = Subtarget->isThumb1Only(); 8861 8862 DebugLoc DL = MI.getDebugLoc(); 8863 MachineFunction *MF = MI.getParent()->getParent(); 8864 MachineRegisterInfo &MRI = MF->getRegInfo(); 8865 MachineInstrBuilder MIB(*MF, MI); 8866 8867 // If the new dst/src is unused mark it as dead. 8868 if (!Node->hasAnyUseOfValue(0)) { 8869 MI.getOperand(0).setIsDead(true); 8870 } 8871 if (!Node->hasAnyUseOfValue(1)) { 8872 MI.getOperand(1).setIsDead(true); 8873 } 8874 8875 // The MEMCPY both defines and kills the scratch registers. 8876 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { 8877 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass 8878 : &ARM::GPRRegClass); 8879 MIB.addReg(TmpReg, RegState::Define|RegState::Dead); 8880 } 8881 } 8882 8883 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 8884 SDNode *Node) const { 8885 if (MI.getOpcode() == ARM::MEMCPY) { 8886 attachMEMCPYScratchRegs(Subtarget, MI, Node); 8887 return; 8888 } 8889 8890 const MCInstrDesc *MCID = &MI.getDesc(); 8891 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 8892 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 8893 // operand is still set to noreg. If needed, set the optional operand's 8894 // register to CPSR, and remove the redundant implicit def. 8895 // 8896 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 8897 8898 // Rename pseudo opcodes. 8899 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode()); 8900 if (NewOpc) { 8901 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo(); 8902 MCID = &TII->get(NewOpc); 8903 8904 assert(MCID->getNumOperands() == MI.getDesc().getNumOperands() + 1 && 8905 "converted opcode should be the same except for cc_out"); 8906 8907 MI.setDesc(*MCID); 8908 8909 // Add the optional cc_out operand 8910 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 8911 } 8912 unsigned ccOutIdx = MCID->getNumOperands() - 1; 8913 8914 // Any ARM instruction that sets the 's' bit should specify an optional 8915 // "cc_out" operand in the last operand position. 8916 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 8917 assert(!NewOpc && "Optional cc_out operand required"); 8918 return; 8919 } 8920 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 8921 // since we already have an optional CPSR def. 8922 bool definesCPSR = false; 8923 bool deadCPSR = false; 8924 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e; 8925 ++i) { 8926 const MachineOperand &MO = MI.getOperand(i); 8927 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 8928 definesCPSR = true; 8929 if (MO.isDead()) 8930 deadCPSR = true; 8931 MI.RemoveOperand(i); 8932 break; 8933 } 8934 } 8935 if (!definesCPSR) { 8936 assert(!NewOpc && "Optional cc_out operand required"); 8937 return; 8938 } 8939 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 8940 if (deadCPSR) { 8941 assert(!MI.getOperand(ccOutIdx).getReg() && 8942 "expect uninitialized optional cc_out operand"); 8943 return; 8944 } 8945 8946 // If this instruction was defined with an optional CPSR def and its dag node 8947 // had a live implicit CPSR def, then activate the optional CPSR def. 8948 MachineOperand &MO = MI.getOperand(ccOutIdx); 8949 MO.setReg(ARM::CPSR); 8950 MO.setIsDef(true); 8951 } 8952 8953 //===----------------------------------------------------------------------===// 8954 // ARM Optimization Hooks 8955 //===----------------------------------------------------------------------===// 8956 8957 // Helper function that checks if N is a null or all ones constant. 8958 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 8959 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 8960 } 8961 8962 // Return true if N is conditionally 0 or all ones. 8963 // Detects these expressions where cc is an i1 value: 8964 // 8965 // (select cc 0, y) [AllOnes=0] 8966 // (select cc y, 0) [AllOnes=0] 8967 // (zext cc) [AllOnes=0] 8968 // (sext cc) [AllOnes=0/1] 8969 // (select cc -1, y) [AllOnes=1] 8970 // (select cc y, -1) [AllOnes=1] 8971 // 8972 // Invert is set when N is the null/all ones constant when CC is false. 8973 // OtherOp is set to the alternative value of N. 8974 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 8975 SDValue &CC, bool &Invert, 8976 SDValue &OtherOp, 8977 SelectionDAG &DAG) { 8978 switch (N->getOpcode()) { 8979 default: return false; 8980 case ISD::SELECT: { 8981 CC = N->getOperand(0); 8982 SDValue N1 = N->getOperand(1); 8983 SDValue N2 = N->getOperand(2); 8984 if (isZeroOrAllOnes(N1, AllOnes)) { 8985 Invert = false; 8986 OtherOp = N2; 8987 return true; 8988 } 8989 if (isZeroOrAllOnes(N2, AllOnes)) { 8990 Invert = true; 8991 OtherOp = N1; 8992 return true; 8993 } 8994 return false; 8995 } 8996 case ISD::ZERO_EXTEND: 8997 // (zext cc) can never be the all ones value. 8998 if (AllOnes) 8999 return false; 9000 LLVM_FALLTHROUGH; 9001 case ISD::SIGN_EXTEND: { 9002 SDLoc dl(N); 9003 EVT VT = N->getValueType(0); 9004 CC = N->getOperand(0); 9005 if (CC.getValueType() != MVT::i1) 9006 return false; 9007 Invert = !AllOnes; 9008 if (AllOnes) 9009 // When looking for an AllOnes constant, N is an sext, and the 'other' 9010 // value is 0. 9011 OtherOp = DAG.getConstant(0, dl, VT); 9012 else if (N->getOpcode() == ISD::ZERO_EXTEND) 9013 // When looking for a 0 constant, N can be zext or sext. 9014 OtherOp = DAG.getConstant(1, dl, VT); 9015 else 9016 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl, 9017 VT); 9018 return true; 9019 } 9020 } 9021 } 9022 9023 // Combine a constant select operand into its use: 9024 // 9025 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 9026 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 9027 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 9028 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 9029 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 9030 // 9031 // The transform is rejected if the select doesn't have a constant operand that 9032 // is null, or all ones when AllOnes is set. 9033 // 9034 // Also recognize sext/zext from i1: 9035 // 9036 // (add (zext cc), x) -> (select cc (add x, 1), x) 9037 // (add (sext cc), x) -> (select cc (add x, -1), x) 9038 // 9039 // These transformations eventually create predicated instructions. 9040 // 9041 // @param N The node to transform. 9042 // @param Slct The N operand that is a select. 9043 // @param OtherOp The other N operand (x above). 9044 // @param DCI Context. 9045 // @param AllOnes Require the select constant to be all ones instead of null. 9046 // @returns The new node, or SDValue() on failure. 9047 static 9048 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 9049 TargetLowering::DAGCombinerInfo &DCI, 9050 bool AllOnes = false) { 9051 SelectionDAG &DAG = DCI.DAG; 9052 EVT VT = N->getValueType(0); 9053 SDValue NonConstantVal; 9054 SDValue CCOp; 9055 bool SwapSelectOps; 9056 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 9057 NonConstantVal, DAG)) 9058 return SDValue(); 9059 9060 // Slct is now know to be the desired identity constant when CC is true. 9061 SDValue TrueVal = OtherOp; 9062 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 9063 OtherOp, NonConstantVal); 9064 // Unless SwapSelectOps says CC should be false. 9065 if (SwapSelectOps) 9066 std::swap(TrueVal, FalseVal); 9067 9068 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 9069 CCOp, TrueVal, FalseVal); 9070 } 9071 9072 // Attempt combineSelectAndUse on each operand of a commutative operator N. 9073 static 9074 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 9075 TargetLowering::DAGCombinerInfo &DCI) { 9076 SDValue N0 = N->getOperand(0); 9077 SDValue N1 = N->getOperand(1); 9078 if (N0.getNode()->hasOneUse()) 9079 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes)) 9080 return Result; 9081 if (N1.getNode()->hasOneUse()) 9082 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes)) 9083 return Result; 9084 return SDValue(); 9085 } 9086 9087 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 9088 // (only after legalization). 9089 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 9090 TargetLowering::DAGCombinerInfo &DCI, 9091 const ARMSubtarget *Subtarget) { 9092 9093 // Only perform optimization if after legalize, and if NEON is available. We 9094 // also expected both operands to be BUILD_VECTORs. 9095 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 9096 || N0.getOpcode() != ISD::BUILD_VECTOR 9097 || N1.getOpcode() != ISD::BUILD_VECTOR) 9098 return SDValue(); 9099 9100 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 9101 EVT VT = N->getValueType(0); 9102 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 9103 return SDValue(); 9104 9105 // Check that the vector operands are of the right form. 9106 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 9107 // operands, where N is the size of the formed vector. 9108 // Each EXTRACT_VECTOR should have the same input vector and odd or even 9109 // index such that we have a pair wise add pattern. 9110 9111 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 9112 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9113 return SDValue(); 9114 SDValue Vec = N0->getOperand(0)->getOperand(0); 9115 SDNode *V = Vec.getNode(); 9116 unsigned nextIndex = 0; 9117 9118 // For each operands to the ADD which are BUILD_VECTORs, 9119 // check to see if each of their operands are an EXTRACT_VECTOR with 9120 // the same vector and appropriate index. 9121 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 9122 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 9123 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 9124 9125 SDValue ExtVec0 = N0->getOperand(i); 9126 SDValue ExtVec1 = N1->getOperand(i); 9127 9128 // First operand is the vector, verify its the same. 9129 if (V != ExtVec0->getOperand(0).getNode() || 9130 V != ExtVec1->getOperand(0).getNode()) 9131 return SDValue(); 9132 9133 // Second is the constant, verify its correct. 9134 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 9135 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 9136 9137 // For the constant, we want to see all the even or all the odd. 9138 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 9139 || C1->getZExtValue() != nextIndex+1) 9140 return SDValue(); 9141 9142 // Increment index. 9143 nextIndex+=2; 9144 } else 9145 return SDValue(); 9146 } 9147 9148 // Create VPADDL node. 9149 SelectionDAG &DAG = DCI.DAG; 9150 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9151 9152 SDLoc dl(N); 9153 9154 // Build operand list. 9155 SmallVector<SDValue, 8> Ops; 9156 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl, 9157 TLI.getPointerTy(DAG.getDataLayout()))); 9158 9159 // Input is the vector. 9160 Ops.push_back(Vec); 9161 9162 // Get widened type and narrowed type. 9163 MVT widenType; 9164 unsigned numElem = VT.getVectorNumElements(); 9165 9166 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 9167 switch (inputLaneType.getSimpleVT().SimpleTy) { 9168 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 9169 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 9170 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 9171 default: 9172 llvm_unreachable("Invalid vector element type for padd optimization."); 9173 } 9174 9175 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops); 9176 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 9177 return DAG.getNode(ExtOp, dl, VT, tmp); 9178 } 9179 9180 static SDValue findMUL_LOHI(SDValue V) { 9181 if (V->getOpcode() == ISD::UMUL_LOHI || 9182 V->getOpcode() == ISD::SMUL_LOHI) 9183 return V; 9184 return SDValue(); 9185 } 9186 9187 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 9188 TargetLowering::DAGCombinerInfo &DCI, 9189 const ARMSubtarget *Subtarget) { 9190 9191 // Look for multiply add opportunities. 9192 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 9193 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 9194 // a glue link from the first add to the second add. 9195 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 9196 // a S/UMLAL instruction. 9197 // UMUL_LOHI 9198 // / :lo \ :hi 9199 // / \ [no multiline comment] 9200 // loAdd -> ADDE | 9201 // \ :glue / 9202 // \ / 9203 // ADDC <- hiAdd 9204 // 9205 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 9206 SDValue AddcOp0 = AddcNode->getOperand(0); 9207 SDValue AddcOp1 = AddcNode->getOperand(1); 9208 9209 // Check if the two operands are from the same mul_lohi node. 9210 if (AddcOp0.getNode() == AddcOp1.getNode()) 9211 return SDValue(); 9212 9213 assert(AddcNode->getNumValues() == 2 && 9214 AddcNode->getValueType(0) == MVT::i32 && 9215 "Expect ADDC with two result values. First: i32"); 9216 9217 // Check that we have a glued ADDC node. 9218 if (AddcNode->getValueType(1) != MVT::Glue) 9219 return SDValue(); 9220 9221 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 9222 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 9223 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 9224 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 9225 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 9226 return SDValue(); 9227 9228 // Look for the glued ADDE. 9229 SDNode* AddeNode = AddcNode->getGluedUser(); 9230 if (!AddeNode) 9231 return SDValue(); 9232 9233 // Make sure it is really an ADDE. 9234 if (AddeNode->getOpcode() != ISD::ADDE) 9235 return SDValue(); 9236 9237 assert(AddeNode->getNumOperands() == 3 && 9238 AddeNode->getOperand(2).getValueType() == MVT::Glue && 9239 "ADDE node has the wrong inputs"); 9240 9241 // Check for the triangle shape. 9242 SDValue AddeOp0 = AddeNode->getOperand(0); 9243 SDValue AddeOp1 = AddeNode->getOperand(1); 9244 9245 // Make sure that the ADDE operands are not coming from the same node. 9246 if (AddeOp0.getNode() == AddeOp1.getNode()) 9247 return SDValue(); 9248 9249 // Find the MUL_LOHI node walking up ADDE's operands. 9250 bool IsLeftOperandMUL = false; 9251 SDValue MULOp = findMUL_LOHI(AddeOp0); 9252 if (MULOp == SDValue()) 9253 MULOp = findMUL_LOHI(AddeOp1); 9254 else 9255 IsLeftOperandMUL = true; 9256 if (MULOp == SDValue()) 9257 return SDValue(); 9258 9259 // Figure out the right opcode. 9260 unsigned Opc = MULOp->getOpcode(); 9261 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 9262 9263 // Figure out the high and low input values to the MLAL node. 9264 SDValue* HiAdd = nullptr; 9265 SDValue* LoMul = nullptr; 9266 SDValue* LowAdd = nullptr; 9267 9268 // Ensure that ADDE is from high result of ISD::SMUL_LOHI. 9269 if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1))) 9270 return SDValue(); 9271 9272 if (IsLeftOperandMUL) 9273 HiAdd = &AddeOp1; 9274 else 9275 HiAdd = &AddeOp0; 9276 9277 9278 // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node 9279 // whose low result is fed to the ADDC we are checking. 9280 9281 if (AddcOp0 == MULOp.getValue(0)) { 9282 LoMul = &AddcOp0; 9283 LowAdd = &AddcOp1; 9284 } 9285 if (AddcOp1 == MULOp.getValue(0)) { 9286 LoMul = &AddcOp1; 9287 LowAdd = &AddcOp0; 9288 } 9289 9290 if (!LoMul) 9291 return SDValue(); 9292 9293 // Create the merged node. 9294 SelectionDAG &DAG = DCI.DAG; 9295 9296 // Build operand list. 9297 SmallVector<SDValue, 8> Ops; 9298 Ops.push_back(LoMul->getOperand(0)); 9299 Ops.push_back(LoMul->getOperand(1)); 9300 Ops.push_back(*LowAdd); 9301 Ops.push_back(*HiAdd); 9302 9303 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), 9304 DAG.getVTList(MVT::i32, MVT::i32), Ops); 9305 9306 // Replace the ADDs' nodes uses by the MLA node's values. 9307 SDValue HiMLALResult(MLALNode.getNode(), 1); 9308 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 9309 9310 SDValue LoMLALResult(MLALNode.getNode(), 0); 9311 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 9312 9313 // Return original node to notify the driver to stop replacing. 9314 SDValue resNode(AddcNode, 0); 9315 return resNode; 9316 } 9317 9318 static SDValue AddCombineTo64bitUMAAL(SDNode *AddcNode, 9319 TargetLowering::DAGCombinerInfo &DCI, 9320 const ARMSubtarget *Subtarget) { 9321 // UMAAL is similar to UMLAL except that it adds two unsigned values. 9322 // While trying to combine for the other MLAL nodes, first search for the 9323 // chance to use UMAAL. Check if Addc uses another addc node which can first 9324 // be combined into a UMLAL. The other pattern is AddcNode being combined 9325 // into an UMLAL and then using another addc is handled in ISelDAGToDAG. 9326 9327 if (!Subtarget->hasV6Ops() || 9328 (Subtarget->isThumb() && !Subtarget->hasThumb2())) 9329 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget); 9330 9331 SDNode *PrevAddc = nullptr; 9332 if (AddcNode->getOperand(0).getOpcode() == ISD::ADDC) 9333 PrevAddc = AddcNode->getOperand(0).getNode(); 9334 else if (AddcNode->getOperand(1).getOpcode() == ISD::ADDC) 9335 PrevAddc = AddcNode->getOperand(1).getNode(); 9336 9337 // If there's no addc chains, just return a search for any MLAL. 9338 if (PrevAddc == nullptr) 9339 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget); 9340 9341 // Try to convert the addc operand to an MLAL and if that fails try to 9342 // combine AddcNode. 9343 SDValue MLAL = AddCombineTo64bitMLAL(PrevAddc, DCI, Subtarget); 9344 if (MLAL != SDValue(PrevAddc, 0)) 9345 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget); 9346 9347 // Find the converted UMAAL or quit if it doesn't exist. 9348 SDNode *UmlalNode = nullptr; 9349 SDValue AddHi; 9350 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) { 9351 UmlalNode = AddcNode->getOperand(0).getNode(); 9352 AddHi = AddcNode->getOperand(1); 9353 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) { 9354 UmlalNode = AddcNode->getOperand(1).getNode(); 9355 AddHi = AddcNode->getOperand(0); 9356 } else { 9357 return SDValue(); 9358 } 9359 9360 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as 9361 // the ADDC as well as Zero. 9362 auto *Zero = dyn_cast<ConstantSDNode>(UmlalNode->getOperand(3)); 9363 9364 if (!Zero || Zero->getZExtValue() != 0) 9365 return SDValue(); 9366 9367 // Check that we have a glued ADDC node. 9368 if (AddcNode->getValueType(1) != MVT::Glue) 9369 return SDValue(); 9370 9371 // Look for the glued ADDE. 9372 SDNode* AddeNode = AddcNode->getGluedUser(); 9373 if (!AddeNode) 9374 return SDValue(); 9375 9376 if ((AddeNode->getOperand(0).getNode() == Zero && 9377 AddeNode->getOperand(1).getNode() == UmlalNode) || 9378 (AddeNode->getOperand(0).getNode() == UmlalNode && 9379 AddeNode->getOperand(1).getNode() == Zero)) { 9380 9381 SelectionDAG &DAG = DCI.DAG; 9382 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1), 9383 UmlalNode->getOperand(2), AddHi }; 9384 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode), 9385 DAG.getVTList(MVT::i32, MVT::i32), Ops); 9386 9387 // Replace the ADDs' nodes uses by the UMAAL node's values. 9388 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1)); 9389 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0)); 9390 9391 // Return original node to notify the driver to stop replacing. 9392 return SDValue(AddcNode, 0); 9393 } 9394 return SDValue(); 9395 } 9396 9397 /// PerformADDCCombine - Target-specific dag combine transform from 9398 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL or 9399 /// ISD::ADDC, ISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL 9400 static SDValue PerformADDCCombine(SDNode *N, 9401 TargetLowering::DAGCombinerInfo &DCI, 9402 const ARMSubtarget *Subtarget) { 9403 9404 if (Subtarget->isThumb1Only()) return SDValue(); 9405 9406 // Only perform the checks after legalize when the pattern is available. 9407 if (DCI.isBeforeLegalize()) return SDValue(); 9408 9409 return AddCombineTo64bitUMAAL(N, DCI, Subtarget); 9410 } 9411 9412 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 9413 /// operands N0 and N1. This is a helper for PerformADDCombine that is 9414 /// called with the default operands, and if that fails, with commuted 9415 /// operands. 9416 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 9417 TargetLowering::DAGCombinerInfo &DCI, 9418 const ARMSubtarget *Subtarget){ 9419 9420 // Attempt to create vpaddl for this add. 9421 if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget)) 9422 return Result; 9423 9424 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 9425 if (N0.getNode()->hasOneUse()) 9426 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI)) 9427 return Result; 9428 return SDValue(); 9429 } 9430 9431 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 9432 /// 9433 static SDValue PerformADDCombine(SDNode *N, 9434 TargetLowering::DAGCombinerInfo &DCI, 9435 const ARMSubtarget *Subtarget) { 9436 SDValue N0 = N->getOperand(0); 9437 SDValue N1 = N->getOperand(1); 9438 9439 // First try with the default operand order. 9440 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget)) 9441 return Result; 9442 9443 // If that didn't work, try again with the operands commuted. 9444 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 9445 } 9446 9447 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 9448 /// 9449 static SDValue PerformSUBCombine(SDNode *N, 9450 TargetLowering::DAGCombinerInfo &DCI) { 9451 SDValue N0 = N->getOperand(0); 9452 SDValue N1 = N->getOperand(1); 9453 9454 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 9455 if (N1.getNode()->hasOneUse()) 9456 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI)) 9457 return Result; 9458 9459 return SDValue(); 9460 } 9461 9462 /// PerformVMULCombine 9463 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 9464 /// special multiplier accumulator forwarding. 9465 /// vmul d3, d0, d2 9466 /// vmla d3, d1, d2 9467 /// is faster than 9468 /// vadd d3, d0, d1 9469 /// vmul d3, d3, d2 9470 // However, for (A + B) * (A + B), 9471 // vadd d2, d0, d1 9472 // vmul d3, d0, d2 9473 // vmla d3, d1, d2 9474 // is slower than 9475 // vadd d2, d0, d1 9476 // vmul d3, d2, d2 9477 static SDValue PerformVMULCombine(SDNode *N, 9478 TargetLowering::DAGCombinerInfo &DCI, 9479 const ARMSubtarget *Subtarget) { 9480 if (!Subtarget->hasVMLxForwarding()) 9481 return SDValue(); 9482 9483 SelectionDAG &DAG = DCI.DAG; 9484 SDValue N0 = N->getOperand(0); 9485 SDValue N1 = N->getOperand(1); 9486 unsigned Opcode = N0.getOpcode(); 9487 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 9488 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 9489 Opcode = N1.getOpcode(); 9490 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 9491 Opcode != ISD::FADD && Opcode != ISD::FSUB) 9492 return SDValue(); 9493 std::swap(N0, N1); 9494 } 9495 9496 if (N0 == N1) 9497 return SDValue(); 9498 9499 EVT VT = N->getValueType(0); 9500 SDLoc DL(N); 9501 SDValue N00 = N0->getOperand(0); 9502 SDValue N01 = N0->getOperand(1); 9503 return DAG.getNode(Opcode, DL, VT, 9504 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 9505 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 9506 } 9507 9508 static SDValue PerformMULCombine(SDNode *N, 9509 TargetLowering::DAGCombinerInfo &DCI, 9510 const ARMSubtarget *Subtarget) { 9511 SelectionDAG &DAG = DCI.DAG; 9512 9513 if (Subtarget->isThumb1Only()) 9514 return SDValue(); 9515 9516 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 9517 return SDValue(); 9518 9519 EVT VT = N->getValueType(0); 9520 if (VT.is64BitVector() || VT.is128BitVector()) 9521 return PerformVMULCombine(N, DCI, Subtarget); 9522 if (VT != MVT::i32) 9523 return SDValue(); 9524 9525 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9526 if (!C) 9527 return SDValue(); 9528 9529 int64_t MulAmt = C->getSExtValue(); 9530 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 9531 9532 ShiftAmt = ShiftAmt & (32 - 1); 9533 SDValue V = N->getOperand(0); 9534 SDLoc DL(N); 9535 9536 SDValue Res; 9537 MulAmt >>= ShiftAmt; 9538 9539 if (MulAmt >= 0) { 9540 if (isPowerOf2_32(MulAmt - 1)) { 9541 // (mul x, 2^N + 1) => (add (shl x, N), x) 9542 Res = DAG.getNode(ISD::ADD, DL, VT, 9543 V, 9544 DAG.getNode(ISD::SHL, DL, VT, 9545 V, 9546 DAG.getConstant(Log2_32(MulAmt - 1), DL, 9547 MVT::i32))); 9548 } else if (isPowerOf2_32(MulAmt + 1)) { 9549 // (mul x, 2^N - 1) => (sub (shl x, N), x) 9550 Res = DAG.getNode(ISD::SUB, DL, VT, 9551 DAG.getNode(ISD::SHL, DL, VT, 9552 V, 9553 DAG.getConstant(Log2_32(MulAmt + 1), DL, 9554 MVT::i32)), 9555 V); 9556 } else 9557 return SDValue(); 9558 } else { 9559 uint64_t MulAmtAbs = -MulAmt; 9560 if (isPowerOf2_32(MulAmtAbs + 1)) { 9561 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 9562 Res = DAG.getNode(ISD::SUB, DL, VT, 9563 V, 9564 DAG.getNode(ISD::SHL, DL, VT, 9565 V, 9566 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL, 9567 MVT::i32))); 9568 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 9569 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 9570 Res = DAG.getNode(ISD::ADD, DL, VT, 9571 V, 9572 DAG.getNode(ISD::SHL, DL, VT, 9573 V, 9574 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL, 9575 MVT::i32))); 9576 Res = DAG.getNode(ISD::SUB, DL, VT, 9577 DAG.getConstant(0, DL, MVT::i32), Res); 9578 9579 } else 9580 return SDValue(); 9581 } 9582 9583 if (ShiftAmt != 0) 9584 Res = DAG.getNode(ISD::SHL, DL, VT, 9585 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32)); 9586 9587 // Do not add new nodes to DAG combiner worklist. 9588 DCI.CombineTo(N, Res, false); 9589 return SDValue(); 9590 } 9591 9592 static SDValue PerformANDCombine(SDNode *N, 9593 TargetLowering::DAGCombinerInfo &DCI, 9594 const ARMSubtarget *Subtarget) { 9595 9596 // Attempt to use immediate-form VBIC 9597 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9598 SDLoc dl(N); 9599 EVT VT = N->getValueType(0); 9600 SelectionDAG &DAG = DCI.DAG; 9601 9602 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9603 return SDValue(); 9604 9605 APInt SplatBits, SplatUndef; 9606 unsigned SplatBitSize; 9607 bool HasAnyUndefs; 9608 if (BVN && 9609 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9610 if (SplatBitSize <= 64) { 9611 EVT VbicVT; 9612 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 9613 SplatUndef.getZExtValue(), SplatBitSize, 9614 DAG, dl, VbicVT, VT.is128BitVector(), 9615 OtherModImm); 9616 if (Val.getNode()) { 9617 SDValue Input = 9618 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 9619 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 9620 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 9621 } 9622 } 9623 } 9624 9625 if (!Subtarget->isThumb1Only()) { 9626 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 9627 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI)) 9628 return Result; 9629 } 9630 9631 return SDValue(); 9632 } 9633 9634 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 9635 static SDValue PerformORCombine(SDNode *N, 9636 TargetLowering::DAGCombinerInfo &DCI, 9637 const ARMSubtarget *Subtarget) { 9638 // Attempt to use immediate-form VORR 9639 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 9640 SDLoc dl(N); 9641 EVT VT = N->getValueType(0); 9642 SelectionDAG &DAG = DCI.DAG; 9643 9644 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9645 return SDValue(); 9646 9647 APInt SplatBits, SplatUndef; 9648 unsigned SplatBitSize; 9649 bool HasAnyUndefs; 9650 if (BVN && Subtarget->hasNEON() && 9651 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 9652 if (SplatBitSize <= 64) { 9653 EVT VorrVT; 9654 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 9655 SplatUndef.getZExtValue(), SplatBitSize, 9656 DAG, dl, VorrVT, VT.is128BitVector(), 9657 OtherModImm); 9658 if (Val.getNode()) { 9659 SDValue Input = 9660 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 9661 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 9662 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 9663 } 9664 } 9665 } 9666 9667 if (!Subtarget->isThumb1Only()) { 9668 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 9669 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9670 return Result; 9671 } 9672 9673 // The code below optimizes (or (and X, Y), Z). 9674 // The AND operand needs to have a single user to make these optimizations 9675 // profitable. 9676 SDValue N0 = N->getOperand(0); 9677 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 9678 return SDValue(); 9679 SDValue N1 = N->getOperand(1); 9680 9681 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 9682 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 9683 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 9684 APInt SplatUndef; 9685 unsigned SplatBitSize; 9686 bool HasAnyUndefs; 9687 9688 APInt SplatBits0, SplatBits1; 9689 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 9690 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 9691 // Ensure that the second operand of both ands are constants 9692 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 9693 HasAnyUndefs) && !HasAnyUndefs) { 9694 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 9695 HasAnyUndefs) && !HasAnyUndefs) { 9696 // Ensure that the bit width of the constants are the same and that 9697 // the splat arguments are logical inverses as per the pattern we 9698 // are trying to simplify. 9699 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 9700 SplatBits0 == ~SplatBits1) { 9701 // Canonicalize the vector type to make instruction selection 9702 // simpler. 9703 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 9704 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 9705 N0->getOperand(1), 9706 N0->getOperand(0), 9707 N1->getOperand(0)); 9708 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 9709 } 9710 } 9711 } 9712 } 9713 9714 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 9715 // reasonable. 9716 9717 // BFI is only available on V6T2+ 9718 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 9719 return SDValue(); 9720 9721 SDLoc DL(N); 9722 // 1) or (and A, mask), val => ARMbfi A, val, mask 9723 // iff (val & mask) == val 9724 // 9725 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9726 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 9727 // && mask == ~mask2 9728 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 9729 // && ~mask == mask2 9730 // (i.e., copy a bitfield value into another bitfield of the same width) 9731 9732 if (VT != MVT::i32) 9733 return SDValue(); 9734 9735 SDValue N00 = N0.getOperand(0); 9736 9737 // The value and the mask need to be constants so we can verify this is 9738 // actually a bitfield set. If the mask is 0xffff, we can do better 9739 // via a movt instruction, so don't use BFI in that case. 9740 SDValue MaskOp = N0.getOperand(1); 9741 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 9742 if (!MaskC) 9743 return SDValue(); 9744 unsigned Mask = MaskC->getZExtValue(); 9745 if (Mask == 0xffff) 9746 return SDValue(); 9747 SDValue Res; 9748 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 9749 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 9750 if (N1C) { 9751 unsigned Val = N1C->getZExtValue(); 9752 if ((Val & ~Mask) != Val) 9753 return SDValue(); 9754 9755 if (ARM::isBitFieldInvertedMask(Mask)) { 9756 Val >>= countTrailingZeros(~Mask); 9757 9758 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 9759 DAG.getConstant(Val, DL, MVT::i32), 9760 DAG.getConstant(Mask, DL, MVT::i32)); 9761 9762 // Do not add new nodes to DAG combiner worklist. 9763 DCI.CombineTo(N, Res, false); 9764 return SDValue(); 9765 } 9766 } else if (N1.getOpcode() == ISD::AND) { 9767 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 9768 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9769 if (!N11C) 9770 return SDValue(); 9771 unsigned Mask2 = N11C->getZExtValue(); 9772 9773 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 9774 // as is to match. 9775 if (ARM::isBitFieldInvertedMask(Mask) && 9776 (Mask == ~Mask2)) { 9777 // The pack halfword instruction works better for masks that fit it, 9778 // so use that when it's available. 9779 if (Subtarget->hasT2ExtractPack() && 9780 (Mask == 0xffff || Mask == 0xffff0000)) 9781 return SDValue(); 9782 // 2a 9783 unsigned amt = countTrailingZeros(Mask2); 9784 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 9785 DAG.getConstant(amt, DL, MVT::i32)); 9786 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 9787 DAG.getConstant(Mask, DL, MVT::i32)); 9788 // Do not add new nodes to DAG combiner worklist. 9789 DCI.CombineTo(N, Res, false); 9790 return SDValue(); 9791 } else if (ARM::isBitFieldInvertedMask(~Mask) && 9792 (~Mask == Mask2)) { 9793 // The pack halfword instruction works better for masks that fit it, 9794 // so use that when it's available. 9795 if (Subtarget->hasT2ExtractPack() && 9796 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 9797 return SDValue(); 9798 // 2b 9799 unsigned lsb = countTrailingZeros(Mask); 9800 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 9801 DAG.getConstant(lsb, DL, MVT::i32)); 9802 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 9803 DAG.getConstant(Mask2, DL, MVT::i32)); 9804 // Do not add new nodes to DAG combiner worklist. 9805 DCI.CombineTo(N, Res, false); 9806 return SDValue(); 9807 } 9808 } 9809 9810 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 9811 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 9812 ARM::isBitFieldInvertedMask(~Mask)) { 9813 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 9814 // where lsb(mask) == #shamt and masked bits of B are known zero. 9815 SDValue ShAmt = N00.getOperand(1); 9816 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 9817 unsigned LSB = countTrailingZeros(Mask); 9818 if (ShAmtC != LSB) 9819 return SDValue(); 9820 9821 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 9822 DAG.getConstant(~Mask, DL, MVT::i32)); 9823 9824 // Do not add new nodes to DAG combiner worklist. 9825 DCI.CombineTo(N, Res, false); 9826 } 9827 9828 return SDValue(); 9829 } 9830 9831 static SDValue PerformXORCombine(SDNode *N, 9832 TargetLowering::DAGCombinerInfo &DCI, 9833 const ARMSubtarget *Subtarget) { 9834 EVT VT = N->getValueType(0); 9835 SelectionDAG &DAG = DCI.DAG; 9836 9837 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9838 return SDValue(); 9839 9840 if (!Subtarget->isThumb1Only()) { 9841 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 9842 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI)) 9843 return Result; 9844 } 9845 9846 return SDValue(); 9847 } 9848 9849 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it, 9850 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and 9851 // their position in "to" (Rd). 9852 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { 9853 assert(N->getOpcode() == ARMISD::BFI); 9854 9855 SDValue From = N->getOperand(1); 9856 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue(); 9857 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation()); 9858 9859 // If the Base came from a SHR #C, we can deduce that it is really testing bit 9860 // #C in the base of the SHR. 9861 if (From->getOpcode() == ISD::SRL && 9862 isa<ConstantSDNode>(From->getOperand(1))) { 9863 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue(); 9864 assert(Shift.getLimitedValue() < 32 && "Shift too large!"); 9865 FromMask <<= Shift.getLimitedValue(31); 9866 From = From->getOperand(0); 9867 } 9868 9869 return From; 9870 } 9871 9872 // If A and B contain one contiguous set of bits, does A | B == A . B? 9873 // 9874 // Neither A nor B must be zero. 9875 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { 9876 unsigned LastActiveBitInA = A.countTrailingZeros(); 9877 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1; 9878 return LastActiveBitInA - 1 == FirstActiveBitInB; 9879 } 9880 9881 static SDValue FindBFIToCombineWith(SDNode *N) { 9882 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with, 9883 // if one exists. 9884 APInt ToMask, FromMask; 9885 SDValue From = ParseBFI(N, ToMask, FromMask); 9886 SDValue To = N->getOperand(0); 9887 9888 // Now check for a compatible BFI to merge with. We can pass through BFIs that 9889 // aren't compatible, but not if they set the same bit in their destination as 9890 // we do (or that of any BFI we're going to combine with). 9891 SDValue V = To; 9892 APInt CombinedToMask = ToMask; 9893 while (V.getOpcode() == ARMISD::BFI) { 9894 APInt NewToMask, NewFromMask; 9895 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask); 9896 if (NewFrom != From) { 9897 // This BFI has a different base. Keep going. 9898 CombinedToMask |= NewToMask; 9899 V = V.getOperand(0); 9900 continue; 9901 } 9902 9903 // Do the written bits conflict with any we've seen so far? 9904 if ((NewToMask & CombinedToMask).getBoolValue()) 9905 // Conflicting bits - bail out because going further is unsafe. 9906 return SDValue(); 9907 9908 // Are the new bits contiguous when combined with the old bits? 9909 if (BitsProperlyConcatenate(ToMask, NewToMask) && 9910 BitsProperlyConcatenate(FromMask, NewFromMask)) 9911 return V; 9912 if (BitsProperlyConcatenate(NewToMask, ToMask) && 9913 BitsProperlyConcatenate(NewFromMask, FromMask)) 9914 return V; 9915 9916 // We've seen a write to some bits, so track it. 9917 CombinedToMask |= NewToMask; 9918 // Keep going... 9919 V = V.getOperand(0); 9920 } 9921 9922 return SDValue(); 9923 } 9924 9925 static SDValue PerformBFICombine(SDNode *N, 9926 TargetLowering::DAGCombinerInfo &DCI) { 9927 SDValue N1 = N->getOperand(1); 9928 if (N1.getOpcode() == ISD::AND) { 9929 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 9930 // the bits being cleared by the AND are not demanded by the BFI. 9931 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 9932 if (!N11C) 9933 return SDValue(); 9934 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 9935 unsigned LSB = countTrailingZeros(~InvMask); 9936 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 9937 assert(Width < 9938 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) && 9939 "undefined behavior"); 9940 unsigned Mask = (1u << Width) - 1; 9941 unsigned Mask2 = N11C->getZExtValue(); 9942 if ((Mask & (~Mask2)) == 0) 9943 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 9944 N->getOperand(0), N1.getOperand(0), 9945 N->getOperand(2)); 9946 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) { 9947 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes. 9948 // Keep track of any consecutive bits set that all come from the same base 9949 // value. We can combine these together into a single BFI. 9950 SDValue CombineBFI = FindBFIToCombineWith(N); 9951 if (CombineBFI == SDValue()) 9952 return SDValue(); 9953 9954 // We've found a BFI. 9955 APInt ToMask1, FromMask1; 9956 SDValue From1 = ParseBFI(N, ToMask1, FromMask1); 9957 9958 APInt ToMask2, FromMask2; 9959 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2); 9960 assert(From1 == From2); 9961 (void)From2; 9962 9963 // First, unlink CombineBFI. 9964 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0)); 9965 // Then create a new BFI, combining the two together. 9966 APInt NewFromMask = FromMask1 | FromMask2; 9967 APInt NewToMask = ToMask1 | ToMask2; 9968 9969 EVT VT = N->getValueType(0); 9970 SDLoc dl(N); 9971 9972 if (NewFromMask[0] == 0) 9973 From1 = DCI.DAG.getNode( 9974 ISD::SRL, dl, VT, From1, 9975 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT)); 9976 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1, 9977 DCI.DAG.getConstant(~NewToMask, dl, VT)); 9978 } 9979 return SDValue(); 9980 } 9981 9982 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 9983 /// ARMISD::VMOVRRD. 9984 static SDValue PerformVMOVRRDCombine(SDNode *N, 9985 TargetLowering::DAGCombinerInfo &DCI, 9986 const ARMSubtarget *Subtarget) { 9987 // vmovrrd(vmovdrr x, y) -> x,y 9988 SDValue InDouble = N->getOperand(0); 9989 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) 9990 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 9991 9992 // vmovrrd(load f64) -> (load i32), (load i32) 9993 SDNode *InNode = InDouble.getNode(); 9994 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 9995 InNode->getValueType(0) == MVT::f64 && 9996 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 9997 !cast<LoadSDNode>(InNode)->isVolatile()) { 9998 // TODO: Should this be done for non-FrameIndex operands? 9999 LoadSDNode *LD = cast<LoadSDNode>(InNode); 10000 10001 SelectionDAG &DAG = DCI.DAG; 10002 SDLoc DL(LD); 10003 SDValue BasePtr = LD->getBasePtr(); 10004 SDValue NewLD1 = 10005 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(), 10006 LD->getAlignment(), LD->getMemOperand()->getFlags()); 10007 10008 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 10009 DAG.getConstant(4, DL, MVT::i32)); 10010 SDValue NewLD2 = DAG.getLoad( 10011 MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(), 10012 std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags()); 10013 10014 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 10015 if (DCI.DAG.getDataLayout().isBigEndian()) 10016 std::swap (NewLD1, NewLD2); 10017 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 10018 return Result; 10019 } 10020 10021 return SDValue(); 10022 } 10023 10024 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 10025 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 10026 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 10027 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 10028 SDValue Op0 = N->getOperand(0); 10029 SDValue Op1 = N->getOperand(1); 10030 if (Op0.getOpcode() == ISD::BITCAST) 10031 Op0 = Op0.getOperand(0); 10032 if (Op1.getOpcode() == ISD::BITCAST) 10033 Op1 = Op1.getOperand(0); 10034 if (Op0.getOpcode() == ARMISD::VMOVRRD && 10035 Op0.getNode() == Op1.getNode() && 10036 Op0.getResNo() == 0 && Op1.getResNo() == 1) 10037 return DAG.getNode(ISD::BITCAST, SDLoc(N), 10038 N->getValueType(0), Op0.getOperand(0)); 10039 return SDValue(); 10040 } 10041 10042 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 10043 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 10044 /// i64 vector to have f64 elements, since the value can then be loaded 10045 /// directly into a VFP register. 10046 static bool hasNormalLoadOperand(SDNode *N) { 10047 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 10048 for (unsigned i = 0; i < NumElts; ++i) { 10049 SDNode *Elt = N->getOperand(i).getNode(); 10050 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 10051 return true; 10052 } 10053 return false; 10054 } 10055 10056 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 10057 /// ISD::BUILD_VECTOR. 10058 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 10059 TargetLowering::DAGCombinerInfo &DCI, 10060 const ARMSubtarget *Subtarget) { 10061 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 10062 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 10063 // into a pair of GPRs, which is fine when the value is used as a scalar, 10064 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 10065 SelectionDAG &DAG = DCI.DAG; 10066 if (N->getNumOperands() == 2) 10067 if (SDValue RV = PerformVMOVDRRCombine(N, DAG)) 10068 return RV; 10069 10070 // Load i64 elements as f64 values so that type legalization does not split 10071 // them up into i32 values. 10072 EVT VT = N->getValueType(0); 10073 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 10074 return SDValue(); 10075 SDLoc dl(N); 10076 SmallVector<SDValue, 8> Ops; 10077 unsigned NumElts = VT.getVectorNumElements(); 10078 for (unsigned i = 0; i < NumElts; ++i) { 10079 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 10080 Ops.push_back(V); 10081 // Make the DAGCombiner fold the bitcast. 10082 DCI.AddToWorklist(V.getNode()); 10083 } 10084 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 10085 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops); 10086 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 10087 } 10088 10089 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 10090 static SDValue 10091 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 10092 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 10093 // At that time, we may have inserted bitcasts from integer to float. 10094 // If these bitcasts have survived DAGCombine, change the lowering of this 10095 // BUILD_VECTOR in something more vector friendly, i.e., that does not 10096 // force to use floating point types. 10097 10098 // Make sure we can change the type of the vector. 10099 // This is possible iff: 10100 // 1. The vector is only used in a bitcast to a integer type. I.e., 10101 // 1.1. Vector is used only once. 10102 // 1.2. Use is a bit convert to an integer type. 10103 // 2. The size of its operands are 32-bits (64-bits are not legal). 10104 EVT VT = N->getValueType(0); 10105 EVT EltVT = VT.getVectorElementType(); 10106 10107 // Check 1.1. and 2. 10108 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 10109 return SDValue(); 10110 10111 // By construction, the input type must be float. 10112 assert(EltVT == MVT::f32 && "Unexpected type!"); 10113 10114 // Check 1.2. 10115 SDNode *Use = *N->use_begin(); 10116 if (Use->getOpcode() != ISD::BITCAST || 10117 Use->getValueType(0).isFloatingPoint()) 10118 return SDValue(); 10119 10120 // Check profitability. 10121 // Model is, if more than half of the relevant operands are bitcast from 10122 // i32, turn the build_vector into a sequence of insert_vector_elt. 10123 // Relevant operands are everything that is not statically 10124 // (i.e., at compile time) bitcasted. 10125 unsigned NumOfBitCastedElts = 0; 10126 unsigned NumElts = VT.getVectorNumElements(); 10127 unsigned NumOfRelevantElts = NumElts; 10128 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 10129 SDValue Elt = N->getOperand(Idx); 10130 if (Elt->getOpcode() == ISD::BITCAST) { 10131 // Assume only bit cast to i32 will go away. 10132 if (Elt->getOperand(0).getValueType() == MVT::i32) 10133 ++NumOfBitCastedElts; 10134 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt)) 10135 // Constants are statically casted, thus do not count them as 10136 // relevant operands. 10137 --NumOfRelevantElts; 10138 } 10139 10140 // Check if more than half of the elements require a non-free bitcast. 10141 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 10142 return SDValue(); 10143 10144 SelectionDAG &DAG = DCI.DAG; 10145 // Create the new vector type. 10146 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 10147 // Check if the type is legal. 10148 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10149 if (!TLI.isTypeLegal(VecVT)) 10150 return SDValue(); 10151 10152 // Combine: 10153 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 10154 // => BITCAST INSERT_VECTOR_ELT 10155 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 10156 // (BITCAST EN), N. 10157 SDValue Vec = DAG.getUNDEF(VecVT); 10158 SDLoc dl(N); 10159 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 10160 SDValue V = N->getOperand(Idx); 10161 if (V.isUndef()) 10162 continue; 10163 if (V.getOpcode() == ISD::BITCAST && 10164 V->getOperand(0).getValueType() == MVT::i32) 10165 // Fold obvious case. 10166 V = V.getOperand(0); 10167 else { 10168 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 10169 // Make the DAGCombiner fold the bitcasts. 10170 DCI.AddToWorklist(V.getNode()); 10171 } 10172 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32); 10173 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 10174 } 10175 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 10176 // Make the DAGCombiner fold the bitcasts. 10177 DCI.AddToWorklist(Vec.getNode()); 10178 return Vec; 10179 } 10180 10181 /// PerformInsertEltCombine - Target-specific dag combine xforms for 10182 /// ISD::INSERT_VECTOR_ELT. 10183 static SDValue PerformInsertEltCombine(SDNode *N, 10184 TargetLowering::DAGCombinerInfo &DCI) { 10185 // Bitcast an i64 load inserted into a vector to f64. 10186 // Otherwise, the i64 value will be legalized to a pair of i32 values. 10187 EVT VT = N->getValueType(0); 10188 SDNode *Elt = N->getOperand(1).getNode(); 10189 if (VT.getVectorElementType() != MVT::i64 || 10190 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 10191 return SDValue(); 10192 10193 SelectionDAG &DAG = DCI.DAG; 10194 SDLoc dl(N); 10195 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 10196 VT.getVectorNumElements()); 10197 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 10198 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 10199 // Make the DAGCombiner fold the bitcasts. 10200 DCI.AddToWorklist(Vec.getNode()); 10201 DCI.AddToWorklist(V.getNode()); 10202 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 10203 Vec, V, N->getOperand(2)); 10204 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 10205 } 10206 10207 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 10208 /// ISD::VECTOR_SHUFFLE. 10209 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 10210 // The LLVM shufflevector instruction does not require the shuffle mask 10211 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 10212 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 10213 // operands do not match the mask length, they are extended by concatenating 10214 // them with undef vectors. That is probably the right thing for other 10215 // targets, but for NEON it is better to concatenate two double-register 10216 // size vector operands into a single quad-register size vector. Do that 10217 // transformation here: 10218 // shuffle(concat(v1, undef), concat(v2, undef)) -> 10219 // shuffle(concat(v1, v2), undef) 10220 SDValue Op0 = N->getOperand(0); 10221 SDValue Op1 = N->getOperand(1); 10222 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 10223 Op1.getOpcode() != ISD::CONCAT_VECTORS || 10224 Op0.getNumOperands() != 2 || 10225 Op1.getNumOperands() != 2) 10226 return SDValue(); 10227 SDValue Concat0Op1 = Op0.getOperand(1); 10228 SDValue Concat1Op1 = Op1.getOperand(1); 10229 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef()) 10230 return SDValue(); 10231 // Skip the transformation if any of the types are illegal. 10232 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10233 EVT VT = N->getValueType(0); 10234 if (!TLI.isTypeLegal(VT) || 10235 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 10236 !TLI.isTypeLegal(Concat1Op1.getValueType())) 10237 return SDValue(); 10238 10239 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 10240 Op0.getOperand(0), Op1.getOperand(0)); 10241 // Translate the shuffle mask. 10242 SmallVector<int, 16> NewMask; 10243 unsigned NumElts = VT.getVectorNumElements(); 10244 unsigned HalfElts = NumElts/2; 10245 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 10246 for (unsigned n = 0; n < NumElts; ++n) { 10247 int MaskElt = SVN->getMaskElt(n); 10248 int NewElt = -1; 10249 if (MaskElt < (int)HalfElts) 10250 NewElt = MaskElt; 10251 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 10252 NewElt = HalfElts + MaskElt - NumElts; 10253 NewMask.push_back(NewElt); 10254 } 10255 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 10256 DAG.getUNDEF(VT), NewMask); 10257 } 10258 10259 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, 10260 /// NEON load/store intrinsics, and generic vector load/stores, to merge 10261 /// base address updates. 10262 /// For generic load/stores, the memory type is assumed to be a vector. 10263 /// The caller is assumed to have checked legality. 10264 static SDValue CombineBaseUpdate(SDNode *N, 10265 TargetLowering::DAGCombinerInfo &DCI) { 10266 SelectionDAG &DAG = DCI.DAG; 10267 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 10268 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 10269 const bool isStore = N->getOpcode() == ISD::STORE; 10270 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); 10271 SDValue Addr = N->getOperand(AddrOpIdx); 10272 MemSDNode *MemN = cast<MemSDNode>(N); 10273 SDLoc dl(N); 10274 10275 // Search for a use of the address operand that is an increment. 10276 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 10277 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 10278 SDNode *User = *UI; 10279 if (User->getOpcode() != ISD::ADD || 10280 UI.getUse().getResNo() != Addr.getResNo()) 10281 continue; 10282 10283 // Check that the add is independent of the load/store. Otherwise, folding 10284 // it would create a cycle. 10285 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 10286 continue; 10287 10288 // Find the new opcode for the updating load/store. 10289 bool isLoadOp = true; 10290 bool isLaneOp = false; 10291 unsigned NewOpc = 0; 10292 unsigned NumVecs = 0; 10293 if (isIntrinsic) { 10294 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 10295 switch (IntNo) { 10296 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 10297 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 10298 NumVecs = 1; break; 10299 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 10300 NumVecs = 2; break; 10301 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 10302 NumVecs = 3; break; 10303 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 10304 NumVecs = 4; break; 10305 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 10306 NumVecs = 2; isLaneOp = true; break; 10307 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 10308 NumVecs = 3; isLaneOp = true; break; 10309 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 10310 NumVecs = 4; isLaneOp = true; break; 10311 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 10312 NumVecs = 1; isLoadOp = false; break; 10313 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 10314 NumVecs = 2; isLoadOp = false; break; 10315 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 10316 NumVecs = 3; isLoadOp = false; break; 10317 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 10318 NumVecs = 4; isLoadOp = false; break; 10319 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 10320 NumVecs = 2; isLoadOp = false; isLaneOp = true; break; 10321 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 10322 NumVecs = 3; isLoadOp = false; isLaneOp = true; break; 10323 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 10324 NumVecs = 4; isLoadOp = false; isLaneOp = true; break; 10325 } 10326 } else { 10327 isLaneOp = true; 10328 switch (N->getOpcode()) { 10329 default: llvm_unreachable("unexpected opcode for Neon base update"); 10330 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 10331 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 10332 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 10333 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD; 10334 NumVecs = 1; isLaneOp = false; break; 10335 case ISD::STORE: NewOpc = ARMISD::VST1_UPD; 10336 NumVecs = 1; isLaneOp = false; isLoadOp = false; break; 10337 } 10338 } 10339 10340 // Find the size of memory referenced by the load/store. 10341 EVT VecTy; 10342 if (isLoadOp) { 10343 VecTy = N->getValueType(0); 10344 } else if (isIntrinsic) { 10345 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 10346 } else { 10347 assert(isStore && "Node has to be a load, a store, or an intrinsic!"); 10348 VecTy = N->getOperand(1).getValueType(); 10349 } 10350 10351 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 10352 if (isLaneOp) 10353 NumBytes /= VecTy.getVectorNumElements(); 10354 10355 // If the increment is a constant, it must match the memory ref size. 10356 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 10357 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 10358 uint64_t IncVal = CInc->getZExtValue(); 10359 if (IncVal != NumBytes) 10360 continue; 10361 } else if (NumBytes >= 3 * 16) { 10362 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 10363 // separate instructions that make it harder to use a non-constant update. 10364 continue; 10365 } 10366 10367 // OK, we found an ADD we can fold into the base update. 10368 // Now, create a _UPD node, taking care of not breaking alignment. 10369 10370 EVT AlignedVecTy = VecTy; 10371 unsigned Alignment = MemN->getAlignment(); 10372 10373 // If this is a less-than-standard-aligned load/store, change the type to 10374 // match the standard alignment. 10375 // The alignment is overlooked when selecting _UPD variants; and it's 10376 // easier to introduce bitcasts here than fix that. 10377 // There are 3 ways to get to this base-update combine: 10378 // - intrinsics: they are assumed to be properly aligned (to the standard 10379 // alignment of the memory type), so we don't need to do anything. 10380 // - ARMISD::VLDx nodes: they are only generated from the aforementioned 10381 // intrinsics, so, likewise, there's nothing to do. 10382 // - generic load/store instructions: the alignment is specified as an 10383 // explicit operand, rather than implicitly as the standard alignment 10384 // of the memory type (like the intrisics). We need to change the 10385 // memory type to match the explicit alignment. That way, we don't 10386 // generate non-standard-aligned ARMISD::VLDx nodes. 10387 if (isa<LSBaseSDNode>(N)) { 10388 if (Alignment == 0) 10389 Alignment = 1; 10390 if (Alignment < VecTy.getScalarSizeInBits() / 8) { 10391 MVT EltTy = MVT::getIntegerVT(Alignment * 8); 10392 assert(NumVecs == 1 && "Unexpected multi-element generic load/store."); 10393 assert(!isLaneOp && "Unexpected generic load/store lane."); 10394 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8); 10395 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts); 10396 } 10397 // Don't set an explicit alignment on regular load/stores that we want 10398 // to transform to VLD/VST 1_UPD nodes. 10399 // This matches the behavior of regular load/stores, which only get an 10400 // explicit alignment if the MMO alignment is larger than the standard 10401 // alignment of the memory type. 10402 // Intrinsics, however, always get an explicit alignment, set to the 10403 // alignment of the MMO. 10404 Alignment = 1; 10405 } 10406 10407 // Create the new updating load/store node. 10408 // First, create an SDVTList for the new updating node's results. 10409 EVT Tys[6]; 10410 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0); 10411 unsigned n; 10412 for (n = 0; n < NumResultVecs; ++n) 10413 Tys[n] = AlignedVecTy; 10414 Tys[n++] = MVT::i32; 10415 Tys[n] = MVT::Other; 10416 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2)); 10417 10418 // Then, gather the new node's operands. 10419 SmallVector<SDValue, 8> Ops; 10420 Ops.push_back(N->getOperand(0)); // incoming chain 10421 Ops.push_back(N->getOperand(AddrOpIdx)); 10422 Ops.push_back(Inc); 10423 10424 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) { 10425 // Try to match the intrinsic's signature 10426 Ops.push_back(StN->getValue()); 10427 } else { 10428 // Loads (and of course intrinsics) match the intrinsics' signature, 10429 // so just add all but the alignment operand. 10430 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i) 10431 Ops.push_back(N->getOperand(i)); 10432 } 10433 10434 // For all node types, the alignment operand is always the last one. 10435 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32)); 10436 10437 // If this is a non-standard-aligned STORE, the penultimate operand is the 10438 // stored value. Bitcast it to the aligned type. 10439 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) { 10440 SDValue &StVal = Ops[Ops.size()-2]; 10441 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal); 10442 } 10443 10444 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, 10445 Ops, AlignedVecTy, 10446 MemN->getMemOperand()); 10447 10448 // Update the uses. 10449 SmallVector<SDValue, 5> NewResults; 10450 for (unsigned i = 0; i < NumResultVecs; ++i) 10451 NewResults.push_back(SDValue(UpdN.getNode(), i)); 10452 10453 // If this is an non-standard-aligned LOAD, the first result is the loaded 10454 // value. Bitcast it to the expected result type. 10455 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) { 10456 SDValue &LdVal = NewResults[0]; 10457 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal); 10458 } 10459 10460 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 10461 DCI.CombineTo(N, NewResults); 10462 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 10463 10464 break; 10465 } 10466 return SDValue(); 10467 } 10468 10469 static SDValue PerformVLDCombine(SDNode *N, 10470 TargetLowering::DAGCombinerInfo &DCI) { 10471 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10472 return SDValue(); 10473 10474 return CombineBaseUpdate(N, DCI); 10475 } 10476 10477 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 10478 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 10479 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 10480 /// return true. 10481 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 10482 SelectionDAG &DAG = DCI.DAG; 10483 EVT VT = N->getValueType(0); 10484 // vldN-dup instructions only support 64-bit vectors for N > 1. 10485 if (!VT.is64BitVector()) 10486 return false; 10487 10488 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 10489 SDNode *VLD = N->getOperand(0).getNode(); 10490 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 10491 return false; 10492 unsigned NumVecs = 0; 10493 unsigned NewOpc = 0; 10494 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 10495 if (IntNo == Intrinsic::arm_neon_vld2lane) { 10496 NumVecs = 2; 10497 NewOpc = ARMISD::VLD2DUP; 10498 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 10499 NumVecs = 3; 10500 NewOpc = ARMISD::VLD3DUP; 10501 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 10502 NumVecs = 4; 10503 NewOpc = ARMISD::VLD4DUP; 10504 } else { 10505 return false; 10506 } 10507 10508 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 10509 // numbers match the load. 10510 unsigned VLDLaneNo = 10511 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 10512 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 10513 UI != UE; ++UI) { 10514 // Ignore uses of the chain result. 10515 if (UI.getUse().getResNo() == NumVecs) 10516 continue; 10517 SDNode *User = *UI; 10518 if (User->getOpcode() != ARMISD::VDUPLANE || 10519 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 10520 return false; 10521 } 10522 10523 // Create the vldN-dup node. 10524 EVT Tys[5]; 10525 unsigned n; 10526 for (n = 0; n < NumVecs; ++n) 10527 Tys[n] = VT; 10528 Tys[n] = MVT::Other; 10529 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); 10530 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 10531 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 10532 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 10533 Ops, VLDMemInt->getMemoryVT(), 10534 VLDMemInt->getMemOperand()); 10535 10536 // Update the uses. 10537 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 10538 UI != UE; ++UI) { 10539 unsigned ResNo = UI.getUse().getResNo(); 10540 // Ignore uses of the chain result. 10541 if (ResNo == NumVecs) 10542 continue; 10543 SDNode *User = *UI; 10544 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 10545 } 10546 10547 // Now the vldN-lane intrinsic is dead except for its chain result. 10548 // Update uses of the chain. 10549 std::vector<SDValue> VLDDupResults; 10550 for (unsigned n = 0; n < NumVecs; ++n) 10551 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 10552 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 10553 DCI.CombineTo(VLD, VLDDupResults); 10554 10555 return true; 10556 } 10557 10558 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 10559 /// ARMISD::VDUPLANE. 10560 static SDValue PerformVDUPLANECombine(SDNode *N, 10561 TargetLowering::DAGCombinerInfo &DCI) { 10562 SDValue Op = N->getOperand(0); 10563 10564 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 10565 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 10566 if (CombineVLDDUP(N, DCI)) 10567 return SDValue(N, 0); 10568 10569 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 10570 // redundant. Ignore bit_converts for now; element sizes are checked below. 10571 while (Op.getOpcode() == ISD::BITCAST) 10572 Op = Op.getOperand(0); 10573 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 10574 return SDValue(); 10575 10576 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 10577 unsigned EltSize = Op.getScalarValueSizeInBits(); 10578 // The canonical VMOV for a zero vector uses a 32-bit element size. 10579 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 10580 unsigned EltBits; 10581 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 10582 EltSize = 8; 10583 EVT VT = N->getValueType(0); 10584 if (EltSize > VT.getScalarSizeInBits()) 10585 return SDValue(); 10586 10587 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 10588 } 10589 10590 static SDValue PerformLOADCombine(SDNode *N, 10591 TargetLowering::DAGCombinerInfo &DCI) { 10592 EVT VT = N->getValueType(0); 10593 10594 // If this is a legal vector load, try to combine it into a VLD1_UPD. 10595 if (ISD::isNormalLoad(N) && VT.isVector() && 10596 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10597 return CombineBaseUpdate(N, DCI); 10598 10599 return SDValue(); 10600 } 10601 10602 /// PerformSTORECombine - Target-specific dag combine xforms for 10603 /// ISD::STORE. 10604 static SDValue PerformSTORECombine(SDNode *N, 10605 TargetLowering::DAGCombinerInfo &DCI) { 10606 StoreSDNode *St = cast<StoreSDNode>(N); 10607 if (St->isVolatile()) 10608 return SDValue(); 10609 10610 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 10611 // pack all of the elements in one place. Next, store to memory in fewer 10612 // chunks. 10613 SDValue StVal = St->getValue(); 10614 EVT VT = StVal.getValueType(); 10615 if (St->isTruncatingStore() && VT.isVector()) { 10616 SelectionDAG &DAG = DCI.DAG; 10617 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10618 EVT StVT = St->getMemoryVT(); 10619 unsigned NumElems = VT.getVectorNumElements(); 10620 assert(StVT != VT && "Cannot truncate to the same type"); 10621 unsigned FromEltSz = VT.getScalarSizeInBits(); 10622 unsigned ToEltSz = StVT.getScalarSizeInBits(); 10623 10624 // From, To sizes and ElemCount must be pow of two 10625 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 10626 10627 // We are going to use the original vector elt for storing. 10628 // Accumulated smaller vector elements must be a multiple of the store size. 10629 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 10630 10631 unsigned SizeRatio = FromEltSz / ToEltSz; 10632 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 10633 10634 // Create a type on which we perform the shuffle. 10635 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 10636 NumElems*SizeRatio); 10637 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 10638 10639 SDLoc DL(St); 10640 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 10641 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 10642 for (unsigned i = 0; i < NumElems; ++i) 10643 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() 10644 ? (i + 1) * SizeRatio - 1 10645 : i * SizeRatio; 10646 10647 // Can't shuffle using an illegal type. 10648 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 10649 10650 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 10651 DAG.getUNDEF(WideVec.getValueType()), 10652 ShuffleVec); 10653 // At this point all of the data is stored at the bottom of the 10654 // register. We now need to save it to mem. 10655 10656 // Find the largest store unit 10657 MVT StoreType = MVT::i8; 10658 for (MVT Tp : MVT::integer_valuetypes()) { 10659 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 10660 StoreType = Tp; 10661 } 10662 // Didn't find a legal store type. 10663 if (!TLI.isTypeLegal(StoreType)) 10664 return SDValue(); 10665 10666 // Bitcast the original vector into a vector of store-size units 10667 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 10668 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 10669 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 10670 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 10671 SmallVector<SDValue, 8> Chains; 10672 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL, 10673 TLI.getPointerTy(DAG.getDataLayout())); 10674 SDValue BasePtr = St->getBasePtr(); 10675 10676 // Perform one or more big stores into memory. 10677 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 10678 for (unsigned I = 0; I < E; I++) { 10679 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 10680 StoreType, ShuffWide, 10681 DAG.getIntPtrConstant(I, DL)); 10682 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 10683 St->getPointerInfo(), St->getAlignment(), 10684 St->getMemOperand()->getFlags()); 10685 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 10686 Increment); 10687 Chains.push_back(Ch); 10688 } 10689 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 10690 } 10691 10692 if (!ISD::isNormalStore(St)) 10693 return SDValue(); 10694 10695 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 10696 // ARM stores of arguments in the same cache line. 10697 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 10698 StVal.getNode()->hasOneUse()) { 10699 SelectionDAG &DAG = DCI.DAG; 10700 bool isBigEndian = DAG.getDataLayout().isBigEndian(); 10701 SDLoc DL(St); 10702 SDValue BasePtr = St->getBasePtr(); 10703 SDValue NewST1 = DAG.getStore( 10704 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0), 10705 BasePtr, St->getPointerInfo(), St->getAlignment(), 10706 St->getMemOperand()->getFlags()); 10707 10708 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 10709 DAG.getConstant(4, DL, MVT::i32)); 10710 return DAG.getStore(NewST1.getValue(0), DL, 10711 StVal.getNode()->getOperand(isBigEndian ? 0 : 1), 10712 OffsetPtr, St->getPointerInfo(), 10713 std::min(4U, St->getAlignment() / 2), 10714 St->getMemOperand()->getFlags()); 10715 } 10716 10717 if (StVal.getValueType() == MVT::i64 && 10718 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 10719 10720 // Bitcast an i64 store extracted from a vector to f64. 10721 // Otherwise, the i64 value will be legalized to a pair of i32 values. 10722 SelectionDAG &DAG = DCI.DAG; 10723 SDLoc dl(StVal); 10724 SDValue IntVec = StVal.getOperand(0); 10725 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 10726 IntVec.getValueType().getVectorNumElements()); 10727 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 10728 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 10729 Vec, StVal.getOperand(1)); 10730 dl = SDLoc(N); 10731 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 10732 // Make the DAGCombiner fold the bitcasts. 10733 DCI.AddToWorklist(Vec.getNode()); 10734 DCI.AddToWorklist(ExtElt.getNode()); 10735 DCI.AddToWorklist(V.getNode()); 10736 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 10737 St->getPointerInfo(), St->getAlignment(), 10738 St->getMemOperand()->getFlags(), St->getAAInfo()); 10739 } 10740 10741 // If this is a legal vector store, try to combine it into a VST1_UPD. 10742 if (ISD::isNormalStore(N) && VT.isVector() && 10743 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT)) 10744 return CombineBaseUpdate(N, DCI); 10745 10746 return SDValue(); 10747 } 10748 10749 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 10750 /// can replace combinations of VMUL and VCVT (floating-point to integer) 10751 /// when the VMUL has a constant operand that is a power of 2. 10752 /// 10753 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10754 /// vmul.f32 d16, d17, d16 10755 /// vcvt.s32.f32 d16, d16 10756 /// becomes: 10757 /// vcvt.s32.f32 d16, d16, #3 10758 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, 10759 const ARMSubtarget *Subtarget) { 10760 if (!Subtarget->hasNEON()) 10761 return SDValue(); 10762 10763 SDValue Op = N->getOperand(0); 10764 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 10765 Op.getOpcode() != ISD::FMUL) 10766 return SDValue(); 10767 10768 SDValue ConstVec = Op->getOperand(1); 10769 if (!isa<BuildVectorSDNode>(ConstVec)) 10770 return SDValue(); 10771 10772 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 10773 uint32_t FloatBits = FloatTy.getSizeInBits(); 10774 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 10775 uint32_t IntBits = IntTy.getSizeInBits(); 10776 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10777 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10778 // These instructions only exist converting from f32 to i32. We can handle 10779 // smaller integers by generating an extra truncate, but larger ones would 10780 // be lossy. We also can't handle more then 4 lanes, since these intructions 10781 // only support v2i32/v4i32 types. 10782 return SDValue(); 10783 } 10784 10785 BitVector UndefElements; 10786 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10787 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10788 if (C == -1 || C == 0 || C > 32) 10789 return SDValue(); 10790 10791 SDLoc dl(N); 10792 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 10793 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 10794 Intrinsic::arm_neon_vcvtfp2fxu; 10795 SDValue FixConv = DAG.getNode( 10796 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10797 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0), 10798 DAG.getConstant(C, dl, MVT::i32)); 10799 10800 if (IntBits < FloatBits) 10801 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv); 10802 10803 return FixConv; 10804 } 10805 10806 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 10807 /// can replace combinations of VCVT (integer to floating-point) and VDIV 10808 /// when the VDIV has a constant operand that is a power of 2. 10809 /// 10810 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 10811 /// vcvt.f32.s32 d16, d16 10812 /// vdiv.f32 d16, d17, d16 10813 /// becomes: 10814 /// vcvt.f32.s32 d16, d16, #3 10815 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, 10816 const ARMSubtarget *Subtarget) { 10817 if (!Subtarget->hasNEON()) 10818 return SDValue(); 10819 10820 SDValue Op = N->getOperand(0); 10821 unsigned OpOpcode = Op.getNode()->getOpcode(); 10822 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() || 10823 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 10824 return SDValue(); 10825 10826 SDValue ConstVec = N->getOperand(1); 10827 if (!isa<BuildVectorSDNode>(ConstVec)) 10828 return SDValue(); 10829 10830 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 10831 uint32_t FloatBits = FloatTy.getSizeInBits(); 10832 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 10833 uint32_t IntBits = IntTy.getSizeInBits(); 10834 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 10835 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) { 10836 // These instructions only exist converting from i32 to f32. We can handle 10837 // smaller integers by generating an extra extend, but larger ones would 10838 // be lossy. We also can't handle more then 4 lanes, since these intructions 10839 // only support v2i32/v4i32 types. 10840 return SDValue(); 10841 } 10842 10843 BitVector UndefElements; 10844 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 10845 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); 10846 if (C == -1 || C == 0 || C > 32) 10847 return SDValue(); 10848 10849 SDLoc dl(N); 10850 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 10851 SDValue ConvInput = Op.getOperand(0); 10852 if (IntBits < FloatBits) 10853 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 10854 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 10855 ConvInput); 10856 10857 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 10858 Intrinsic::arm_neon_vcvtfxu2fp; 10859 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, 10860 Op.getValueType(), 10861 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), 10862 ConvInput, DAG.getConstant(C, dl, MVT::i32)); 10863 } 10864 10865 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 10866 /// operand of a vector shift operation, where all the elements of the 10867 /// build_vector must have the same constant integer value. 10868 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 10869 // Ignore bit_converts. 10870 while (Op.getOpcode() == ISD::BITCAST) 10871 Op = Op.getOperand(0); 10872 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 10873 APInt SplatBits, SplatUndef; 10874 unsigned SplatBitSize; 10875 bool HasAnyUndefs; 10876 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 10877 HasAnyUndefs, ElementBits) || 10878 SplatBitSize > ElementBits) 10879 return false; 10880 Cnt = SplatBits.getSExtValue(); 10881 return true; 10882 } 10883 10884 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 10885 /// operand of a vector shift left operation. That value must be in the range: 10886 /// 0 <= Value < ElementBits for a left shift; or 10887 /// 0 <= Value <= ElementBits for a long left shift. 10888 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 10889 assert(VT.isVector() && "vector shift count is not a vector type"); 10890 int64_t ElementBits = VT.getScalarSizeInBits(); 10891 if (! getVShiftImm(Op, ElementBits, Cnt)) 10892 return false; 10893 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 10894 } 10895 10896 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 10897 /// operand of a vector shift right operation. For a shift opcode, the value 10898 /// is positive, but for an intrinsic the value count must be negative. The 10899 /// absolute value must be in the range: 10900 /// 1 <= |Value| <= ElementBits for a right shift; or 10901 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 10902 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 10903 int64_t &Cnt) { 10904 assert(VT.isVector() && "vector shift count is not a vector type"); 10905 int64_t ElementBits = VT.getScalarSizeInBits(); 10906 if (! getVShiftImm(Op, ElementBits, Cnt)) 10907 return false; 10908 if (!isIntrinsic) 10909 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 10910 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) { 10911 Cnt = -Cnt; 10912 return true; 10913 } 10914 return false; 10915 } 10916 10917 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 10918 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 10919 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 10920 switch (IntNo) { 10921 default: 10922 // Don't do anything for most intrinsics. 10923 break; 10924 10925 // Vector shifts: check for immediate versions and lower them. 10926 // Note: This is done during DAG combining instead of DAG legalizing because 10927 // the build_vectors for 64-bit vector element shift counts are generally 10928 // not legal, and it is hard to see their values after they get legalized to 10929 // loads from a constant pool. 10930 case Intrinsic::arm_neon_vshifts: 10931 case Intrinsic::arm_neon_vshiftu: 10932 case Intrinsic::arm_neon_vrshifts: 10933 case Intrinsic::arm_neon_vrshiftu: 10934 case Intrinsic::arm_neon_vrshiftn: 10935 case Intrinsic::arm_neon_vqshifts: 10936 case Intrinsic::arm_neon_vqshiftu: 10937 case Intrinsic::arm_neon_vqshiftsu: 10938 case Intrinsic::arm_neon_vqshiftns: 10939 case Intrinsic::arm_neon_vqshiftnu: 10940 case Intrinsic::arm_neon_vqshiftnsu: 10941 case Intrinsic::arm_neon_vqrshiftns: 10942 case Intrinsic::arm_neon_vqrshiftnu: 10943 case Intrinsic::arm_neon_vqrshiftnsu: { 10944 EVT VT = N->getOperand(1).getValueType(); 10945 int64_t Cnt; 10946 unsigned VShiftOpc = 0; 10947 10948 switch (IntNo) { 10949 case Intrinsic::arm_neon_vshifts: 10950 case Intrinsic::arm_neon_vshiftu: 10951 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 10952 VShiftOpc = ARMISD::VSHL; 10953 break; 10954 } 10955 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 10956 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 10957 ARMISD::VSHRs : ARMISD::VSHRu); 10958 break; 10959 } 10960 return SDValue(); 10961 10962 case Intrinsic::arm_neon_vrshifts: 10963 case Intrinsic::arm_neon_vrshiftu: 10964 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 10965 break; 10966 return SDValue(); 10967 10968 case Intrinsic::arm_neon_vqshifts: 10969 case Intrinsic::arm_neon_vqshiftu: 10970 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10971 break; 10972 return SDValue(); 10973 10974 case Intrinsic::arm_neon_vqshiftsu: 10975 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 10976 break; 10977 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 10978 10979 case Intrinsic::arm_neon_vrshiftn: 10980 case Intrinsic::arm_neon_vqshiftns: 10981 case Intrinsic::arm_neon_vqshiftnu: 10982 case Intrinsic::arm_neon_vqshiftnsu: 10983 case Intrinsic::arm_neon_vqrshiftns: 10984 case Intrinsic::arm_neon_vqrshiftnu: 10985 case Intrinsic::arm_neon_vqrshiftnsu: 10986 // Narrowing shifts require an immediate right shift. 10987 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 10988 break; 10989 llvm_unreachable("invalid shift count for narrowing vector shift " 10990 "intrinsic"); 10991 10992 default: 10993 llvm_unreachable("unhandled vector shift"); 10994 } 10995 10996 switch (IntNo) { 10997 case Intrinsic::arm_neon_vshifts: 10998 case Intrinsic::arm_neon_vshiftu: 10999 // Opcode already set above. 11000 break; 11001 case Intrinsic::arm_neon_vrshifts: 11002 VShiftOpc = ARMISD::VRSHRs; break; 11003 case Intrinsic::arm_neon_vrshiftu: 11004 VShiftOpc = ARMISD::VRSHRu; break; 11005 case Intrinsic::arm_neon_vrshiftn: 11006 VShiftOpc = ARMISD::VRSHRN; break; 11007 case Intrinsic::arm_neon_vqshifts: 11008 VShiftOpc = ARMISD::VQSHLs; break; 11009 case Intrinsic::arm_neon_vqshiftu: 11010 VShiftOpc = ARMISD::VQSHLu; break; 11011 case Intrinsic::arm_neon_vqshiftsu: 11012 VShiftOpc = ARMISD::VQSHLsu; break; 11013 case Intrinsic::arm_neon_vqshiftns: 11014 VShiftOpc = ARMISD::VQSHRNs; break; 11015 case Intrinsic::arm_neon_vqshiftnu: 11016 VShiftOpc = ARMISD::VQSHRNu; break; 11017 case Intrinsic::arm_neon_vqshiftnsu: 11018 VShiftOpc = ARMISD::VQSHRNsu; break; 11019 case Intrinsic::arm_neon_vqrshiftns: 11020 VShiftOpc = ARMISD::VQRSHRNs; break; 11021 case Intrinsic::arm_neon_vqrshiftnu: 11022 VShiftOpc = ARMISD::VQRSHRNu; break; 11023 case Intrinsic::arm_neon_vqrshiftnsu: 11024 VShiftOpc = ARMISD::VQRSHRNsu; break; 11025 } 11026 11027 SDLoc dl(N); 11028 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 11029 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32)); 11030 } 11031 11032 case Intrinsic::arm_neon_vshiftins: { 11033 EVT VT = N->getOperand(1).getValueType(); 11034 int64_t Cnt; 11035 unsigned VShiftOpc = 0; 11036 11037 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 11038 VShiftOpc = ARMISD::VSLI; 11039 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 11040 VShiftOpc = ARMISD::VSRI; 11041 else { 11042 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 11043 } 11044 11045 SDLoc dl(N); 11046 return DAG.getNode(VShiftOpc, dl, N->getValueType(0), 11047 N->getOperand(1), N->getOperand(2), 11048 DAG.getConstant(Cnt, dl, MVT::i32)); 11049 } 11050 11051 case Intrinsic::arm_neon_vqrshifts: 11052 case Intrinsic::arm_neon_vqrshiftu: 11053 // No immediate versions of these to check for. 11054 break; 11055 } 11056 11057 return SDValue(); 11058 } 11059 11060 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 11061 /// lowers them. As with the vector shift intrinsics, this is done during DAG 11062 /// combining instead of DAG legalizing because the build_vectors for 64-bit 11063 /// vector element shift counts are generally not legal, and it is hard to see 11064 /// their values after they get legalized to loads from a constant pool. 11065 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 11066 const ARMSubtarget *ST) { 11067 EVT VT = N->getValueType(0); 11068 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 11069 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 11070 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 11071 SDValue N1 = N->getOperand(1); 11072 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 11073 SDValue N0 = N->getOperand(0); 11074 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 11075 DAG.MaskedValueIsZero(N0.getOperand(0), 11076 APInt::getHighBitsSet(32, 16))) 11077 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 11078 } 11079 } 11080 11081 // Nothing to be done for scalar shifts. 11082 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11083 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 11084 return SDValue(); 11085 11086 assert(ST->hasNEON() && "unexpected vector shift"); 11087 int64_t Cnt; 11088 11089 switch (N->getOpcode()) { 11090 default: llvm_unreachable("unexpected shift opcode"); 11091 11092 case ISD::SHL: 11093 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) { 11094 SDLoc dl(N); 11095 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), 11096 DAG.getConstant(Cnt, dl, MVT::i32)); 11097 } 11098 break; 11099 11100 case ISD::SRA: 11101 case ISD::SRL: 11102 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 11103 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 11104 ARMISD::VSHRs : ARMISD::VSHRu); 11105 SDLoc dl(N); 11106 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), 11107 DAG.getConstant(Cnt, dl, MVT::i32)); 11108 } 11109 } 11110 return SDValue(); 11111 } 11112 11113 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 11114 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 11115 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 11116 const ARMSubtarget *ST) { 11117 SDValue N0 = N->getOperand(0); 11118 11119 // Check for sign- and zero-extensions of vector extract operations of 8- 11120 // and 16-bit vector elements. NEON supports these directly. They are 11121 // handled during DAG combining because type legalization will promote them 11122 // to 32-bit types and it is messy to recognize the operations after that. 11123 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 11124 SDValue Vec = N0.getOperand(0); 11125 SDValue Lane = N0.getOperand(1); 11126 EVT VT = N->getValueType(0); 11127 EVT EltVT = N0.getValueType(); 11128 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11129 11130 if (VT == MVT::i32 && 11131 (EltVT == MVT::i8 || EltVT == MVT::i16) && 11132 TLI.isTypeLegal(Vec.getValueType()) && 11133 isa<ConstantSDNode>(Lane)) { 11134 11135 unsigned Opc = 0; 11136 switch (N->getOpcode()) { 11137 default: llvm_unreachable("unexpected opcode"); 11138 case ISD::SIGN_EXTEND: 11139 Opc = ARMISD::VGETLANEs; 11140 break; 11141 case ISD::ZERO_EXTEND: 11142 case ISD::ANY_EXTEND: 11143 Opc = ARMISD::VGETLANEu; 11144 break; 11145 } 11146 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 11147 } 11148 } 11149 11150 return SDValue(); 11151 } 11152 11153 static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero, 11154 APInt &KnownOne) { 11155 if (Op.getOpcode() == ARMISD::BFI) { 11156 // Conservatively, we can recurse down the first operand 11157 // and just mask out all affected bits. 11158 computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne); 11159 11160 // The operand to BFI is already a mask suitable for removing the bits it 11161 // sets. 11162 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2)); 11163 const APInt &Mask = CI->getAPIntValue(); 11164 KnownZero &= Mask; 11165 KnownOne &= Mask; 11166 return; 11167 } 11168 if (Op.getOpcode() == ARMISD::CMOV) { 11169 APInt KZ2(KnownZero.getBitWidth(), 0); 11170 APInt KO2(KnownOne.getBitWidth(), 0); 11171 computeKnownBits(DAG, Op.getOperand(1), KnownZero, KnownOne); 11172 computeKnownBits(DAG, Op.getOperand(2), KZ2, KO2); 11173 11174 KnownZero &= KZ2; 11175 KnownOne &= KO2; 11176 return; 11177 } 11178 return DAG.computeKnownBits(Op, KnownZero, KnownOne); 11179 } 11180 11181 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { 11182 // If we have a CMOV, OR and AND combination such as: 11183 // if (x & CN) 11184 // y |= CM; 11185 // 11186 // And: 11187 // * CN is a single bit; 11188 // * All bits covered by CM are known zero in y 11189 // 11190 // Then we can convert this into a sequence of BFI instructions. This will 11191 // always be a win if CM is a single bit, will always be no worse than the 11192 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is 11193 // three bits (due to the extra IT instruction). 11194 11195 SDValue Op0 = CMOV->getOperand(0); 11196 SDValue Op1 = CMOV->getOperand(1); 11197 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2)); 11198 auto CC = CCNode->getAPIntValue().getLimitedValue(); 11199 SDValue CmpZ = CMOV->getOperand(4); 11200 11201 // The compare must be against zero. 11202 if (!isNullConstant(CmpZ->getOperand(1))) 11203 return SDValue(); 11204 11205 assert(CmpZ->getOpcode() == ARMISD::CMPZ); 11206 SDValue And = CmpZ->getOperand(0); 11207 if (And->getOpcode() != ISD::AND) 11208 return SDValue(); 11209 ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1)); 11210 if (!AndC || !AndC->getAPIntValue().isPowerOf2()) 11211 return SDValue(); 11212 SDValue X = And->getOperand(0); 11213 11214 if (CC == ARMCC::EQ) { 11215 // We're performing an "equal to zero" compare. Swap the operands so we 11216 // canonicalize on a "not equal to zero" compare. 11217 std::swap(Op0, Op1); 11218 } else { 11219 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?"); 11220 } 11221 11222 if (Op1->getOpcode() != ISD::OR) 11223 return SDValue(); 11224 11225 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1)); 11226 if (!OrC) 11227 return SDValue(); 11228 SDValue Y = Op1->getOperand(0); 11229 11230 if (Op0 != Y) 11231 return SDValue(); 11232 11233 // Now, is it profitable to continue? 11234 APInt OrCI = OrC->getAPIntValue(); 11235 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2; 11236 if (OrCI.countPopulation() > Heuristic) 11237 return SDValue(); 11238 11239 // Lastly, can we determine that the bits defined by OrCI 11240 // are zero in Y? 11241 APInt KnownZero, KnownOne; 11242 computeKnownBits(DAG, Y, KnownZero, KnownOne); 11243 if ((OrCI & KnownZero) != OrCI) 11244 return SDValue(); 11245 11246 // OK, we can do the combine. 11247 SDValue V = Y; 11248 SDLoc dl(X); 11249 EVT VT = X.getValueType(); 11250 unsigned BitInX = AndC->getAPIntValue().logBase2(); 11251 11252 if (BitInX != 0) { 11253 // We must shift X first. 11254 X = DAG.getNode(ISD::SRL, dl, VT, X, 11255 DAG.getConstant(BitInX, dl, VT)); 11256 } 11257 11258 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits(); 11259 BitInY < NumActiveBits; ++BitInY) { 11260 if (OrCI[BitInY] == 0) 11261 continue; 11262 APInt Mask(VT.getSizeInBits(), 0); 11263 Mask.setBit(BitInY); 11264 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X, 11265 // Confusingly, the operand is an *inverted* mask. 11266 DAG.getConstant(~Mask, dl, VT)); 11267 } 11268 11269 return V; 11270 } 11271 11272 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND. 11273 SDValue 11274 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { 11275 SDValue Cmp = N->getOperand(4); 11276 if (Cmp.getOpcode() != ARMISD::CMPZ) 11277 // Only looking at NE cases. 11278 return SDValue(); 11279 11280 EVT VT = N->getValueType(0); 11281 SDLoc dl(N); 11282 SDValue LHS = Cmp.getOperand(0); 11283 SDValue RHS = Cmp.getOperand(1); 11284 SDValue Chain = N->getOperand(0); 11285 SDValue BB = N->getOperand(1); 11286 SDValue ARMcc = N->getOperand(2); 11287 ARMCC::CondCodes CC = 11288 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 11289 11290 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0)) 11291 // -> (brcond Chain BB CC CPSR Cmp) 11292 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() && 11293 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV && 11294 LHS->getOperand(0)->hasOneUse()) { 11295 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0)); 11296 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1)); 11297 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 11298 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 11299 if ((LHS00C && LHS00C->getZExtValue() == 0) && 11300 (LHS01C && LHS01C->getZExtValue() == 1) && 11301 (LHS1C && LHS1C->getZExtValue() == 1) && 11302 (RHSC && RHSC->getZExtValue() == 0)) { 11303 return DAG.getNode( 11304 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2), 11305 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4)); 11306 } 11307 } 11308 11309 return SDValue(); 11310 } 11311 11312 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 11313 SDValue 11314 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 11315 SDValue Cmp = N->getOperand(4); 11316 if (Cmp.getOpcode() != ARMISD::CMPZ) 11317 // Only looking at EQ and NE cases. 11318 return SDValue(); 11319 11320 EVT VT = N->getValueType(0); 11321 SDLoc dl(N); 11322 SDValue LHS = Cmp.getOperand(0); 11323 SDValue RHS = Cmp.getOperand(1); 11324 SDValue FalseVal = N->getOperand(0); 11325 SDValue TrueVal = N->getOperand(1); 11326 SDValue ARMcc = N->getOperand(2); 11327 ARMCC::CondCodes CC = 11328 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 11329 11330 // BFI is only available on V6T2+. 11331 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) { 11332 SDValue R = PerformCMOVToBFICombine(N, DAG); 11333 if (R) 11334 return R; 11335 } 11336 11337 // Simplify 11338 // mov r1, r0 11339 // cmp r1, x 11340 // mov r0, y 11341 // moveq r0, x 11342 // to 11343 // cmp r0, x 11344 // movne r0, y 11345 // 11346 // mov r1, r0 11347 // cmp r1, x 11348 // mov r0, x 11349 // movne r0, y 11350 // to 11351 // cmp r0, x 11352 // movne r0, y 11353 /// FIXME: Turn this into a target neutral optimization? 11354 SDValue Res; 11355 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 11356 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 11357 N->getOperand(3), Cmp); 11358 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 11359 SDValue ARMcc; 11360 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 11361 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 11362 N->getOperand(3), NewCmp); 11363 } 11364 11365 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0)) 11366 // -> (cmov F T CC CPSR Cmp) 11367 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) { 11368 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)); 11369 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 11370 auto *RHSC = dyn_cast<ConstantSDNode>(RHS); 11371 if ((LHS0C && LHS0C->getZExtValue() == 0) && 11372 (LHS1C && LHS1C->getZExtValue() == 1) && 11373 (RHSC && RHSC->getZExtValue() == 0)) { 11374 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 11375 LHS->getOperand(2), LHS->getOperand(3), 11376 LHS->getOperand(4)); 11377 } 11378 } 11379 11380 if (Res.getNode()) { 11381 APInt KnownZero, KnownOne; 11382 DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne); 11383 // Capture demanded bits information that would be otherwise lost. 11384 if (KnownZero == 0xfffffffe) 11385 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 11386 DAG.getValueType(MVT::i1)); 11387 else if (KnownZero == 0xffffff00) 11388 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 11389 DAG.getValueType(MVT::i8)); 11390 else if (KnownZero == 0xffff0000) 11391 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 11392 DAG.getValueType(MVT::i16)); 11393 } 11394 11395 return Res; 11396 } 11397 11398 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 11399 DAGCombinerInfo &DCI) const { 11400 switch (N->getOpcode()) { 11401 default: break; 11402 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 11403 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 11404 case ISD::SUB: return PerformSUBCombine(N, DCI); 11405 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 11406 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 11407 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 11408 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 11409 case ARMISD::BFI: return PerformBFICombine(N, DCI); 11410 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget); 11411 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 11412 case ISD::STORE: return PerformSTORECombine(N, DCI); 11413 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget); 11414 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 11415 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 11416 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 11417 case ISD::FP_TO_SINT: 11418 case ISD::FP_TO_UINT: 11419 return PerformVCVTCombine(N, DCI.DAG, Subtarget); 11420 case ISD::FDIV: 11421 return PerformVDIVCombine(N, DCI.DAG, Subtarget); 11422 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 11423 case ISD::SHL: 11424 case ISD::SRA: 11425 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 11426 case ISD::SIGN_EXTEND: 11427 case ISD::ZERO_EXTEND: 11428 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 11429 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 11430 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); 11431 case ISD::LOAD: return PerformLOADCombine(N, DCI); 11432 case ARMISD::VLD2DUP: 11433 case ARMISD::VLD3DUP: 11434 case ARMISD::VLD4DUP: 11435 return PerformVLDCombine(N, DCI); 11436 case ARMISD::BUILD_VECTOR: 11437 return PerformARMBUILD_VECTORCombine(N, DCI); 11438 case ISD::INTRINSIC_VOID: 11439 case ISD::INTRINSIC_W_CHAIN: 11440 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 11441 case Intrinsic::arm_neon_vld1: 11442 case Intrinsic::arm_neon_vld2: 11443 case Intrinsic::arm_neon_vld3: 11444 case Intrinsic::arm_neon_vld4: 11445 case Intrinsic::arm_neon_vld2lane: 11446 case Intrinsic::arm_neon_vld3lane: 11447 case Intrinsic::arm_neon_vld4lane: 11448 case Intrinsic::arm_neon_vst1: 11449 case Intrinsic::arm_neon_vst2: 11450 case Intrinsic::arm_neon_vst3: 11451 case Intrinsic::arm_neon_vst4: 11452 case Intrinsic::arm_neon_vst2lane: 11453 case Intrinsic::arm_neon_vst3lane: 11454 case Intrinsic::arm_neon_vst4lane: 11455 return PerformVLDCombine(N, DCI); 11456 default: break; 11457 } 11458 break; 11459 } 11460 return SDValue(); 11461 } 11462 11463 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 11464 EVT VT) const { 11465 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 11466 } 11467 11468 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 11469 unsigned, 11470 unsigned, 11471 bool *Fast) const { 11472 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 11473 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 11474 11475 switch (VT.getSimpleVT().SimpleTy) { 11476 default: 11477 return false; 11478 case MVT::i8: 11479 case MVT::i16: 11480 case MVT::i32: { 11481 // Unaligned access can use (for example) LRDB, LRDH, LDR 11482 if (AllowsUnaligned) { 11483 if (Fast) 11484 *Fast = Subtarget->hasV7Ops(); 11485 return true; 11486 } 11487 return false; 11488 } 11489 case MVT::f64: 11490 case MVT::v2f64: { 11491 // For any little-endian targets with neon, we can support unaligned ld/st 11492 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 11493 // A big-endian target may also explicitly support unaligned accesses 11494 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) { 11495 if (Fast) 11496 *Fast = true; 11497 return true; 11498 } 11499 return false; 11500 } 11501 } 11502 } 11503 11504 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 11505 unsigned AlignCheck) { 11506 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 11507 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 11508 } 11509 11510 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 11511 unsigned DstAlign, unsigned SrcAlign, 11512 bool IsMemset, bool ZeroMemset, 11513 bool MemcpyStrSrc, 11514 MachineFunction &MF) const { 11515 const Function *F = MF.getFunction(); 11516 11517 // See if we can use NEON instructions for this... 11518 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && 11519 !F->hasFnAttribute(Attribute::NoImplicitFloat)) { 11520 bool Fast; 11521 if (Size >= 16 && 11522 (memOpAlign(SrcAlign, DstAlign, 16) || 11523 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) { 11524 return MVT::v2f64; 11525 } else if (Size >= 8 && 11526 (memOpAlign(SrcAlign, DstAlign, 8) || 11527 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) && 11528 Fast))) { 11529 return MVT::f64; 11530 } 11531 } 11532 11533 // Lowering to i32/i16 if the size permits. 11534 if (Size >= 4) 11535 return MVT::i32; 11536 else if (Size >= 2) 11537 return MVT::i16; 11538 11539 // Let the target-independent logic figure it out. 11540 return MVT::Other; 11541 } 11542 11543 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 11544 if (Val.getOpcode() != ISD::LOAD) 11545 return false; 11546 11547 EVT VT1 = Val.getValueType(); 11548 if (!VT1.isSimple() || !VT1.isInteger() || 11549 !VT2.isSimple() || !VT2.isInteger()) 11550 return false; 11551 11552 switch (VT1.getSimpleVT().SimpleTy) { 11553 default: break; 11554 case MVT::i1: 11555 case MVT::i8: 11556 case MVT::i16: 11557 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 11558 return true; 11559 } 11560 11561 return false; 11562 } 11563 11564 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { 11565 EVT VT = ExtVal.getValueType(); 11566 11567 if (!isTypeLegal(VT)) 11568 return false; 11569 11570 // Don't create a loadext if we can fold the extension into a wide/long 11571 // instruction. 11572 // If there's more than one user instruction, the loadext is desirable no 11573 // matter what. There can be two uses by the same instruction. 11574 if (ExtVal->use_empty() || 11575 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode())) 11576 return true; 11577 11578 SDNode *U = *ExtVal->use_begin(); 11579 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB || 11580 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) 11581 return false; 11582 11583 return true; 11584 } 11585 11586 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 11587 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 11588 return false; 11589 11590 if (!isTypeLegal(EVT::getEVT(Ty1))) 11591 return false; 11592 11593 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 11594 11595 // Assuming the caller doesn't have a zeroext or signext return parameter, 11596 // truncation all the way down to i1 is valid. 11597 return true; 11598 } 11599 11600 11601 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 11602 if (V < 0) 11603 return false; 11604 11605 unsigned Scale = 1; 11606 switch (VT.getSimpleVT().SimpleTy) { 11607 default: return false; 11608 case MVT::i1: 11609 case MVT::i8: 11610 // Scale == 1; 11611 break; 11612 case MVT::i16: 11613 // Scale == 2; 11614 Scale = 2; 11615 break; 11616 case MVT::i32: 11617 // Scale == 4; 11618 Scale = 4; 11619 break; 11620 } 11621 11622 if ((V & (Scale - 1)) != 0) 11623 return false; 11624 V /= Scale; 11625 return V == (V & ((1LL << 5) - 1)); 11626 } 11627 11628 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 11629 const ARMSubtarget *Subtarget) { 11630 bool isNeg = false; 11631 if (V < 0) { 11632 isNeg = true; 11633 V = - V; 11634 } 11635 11636 switch (VT.getSimpleVT().SimpleTy) { 11637 default: return false; 11638 case MVT::i1: 11639 case MVT::i8: 11640 case MVT::i16: 11641 case MVT::i32: 11642 // + imm12 or - imm8 11643 if (isNeg) 11644 return V == (V & ((1LL << 8) - 1)); 11645 return V == (V & ((1LL << 12) - 1)); 11646 case MVT::f32: 11647 case MVT::f64: 11648 // Same as ARM mode. FIXME: NEON? 11649 if (!Subtarget->hasVFP2()) 11650 return false; 11651 if ((V & 3) != 0) 11652 return false; 11653 V >>= 2; 11654 return V == (V & ((1LL << 8) - 1)); 11655 } 11656 } 11657 11658 /// isLegalAddressImmediate - Return true if the integer value can be used 11659 /// as the offset of the target addressing mode for load / store of the 11660 /// given type. 11661 static bool isLegalAddressImmediate(int64_t V, EVT VT, 11662 const ARMSubtarget *Subtarget) { 11663 if (V == 0) 11664 return true; 11665 11666 if (!VT.isSimple()) 11667 return false; 11668 11669 if (Subtarget->isThumb1Only()) 11670 return isLegalT1AddressImmediate(V, VT); 11671 else if (Subtarget->isThumb2()) 11672 return isLegalT2AddressImmediate(V, VT, Subtarget); 11673 11674 // ARM mode. 11675 if (V < 0) 11676 V = - V; 11677 switch (VT.getSimpleVT().SimpleTy) { 11678 default: return false; 11679 case MVT::i1: 11680 case MVT::i8: 11681 case MVT::i32: 11682 // +- imm12 11683 return V == (V & ((1LL << 12) - 1)); 11684 case MVT::i16: 11685 // +- imm8 11686 return V == (V & ((1LL << 8) - 1)); 11687 case MVT::f32: 11688 case MVT::f64: 11689 if (!Subtarget->hasVFP2()) // FIXME: NEON? 11690 return false; 11691 if ((V & 3) != 0) 11692 return false; 11693 V >>= 2; 11694 return V == (V & ((1LL << 8) - 1)); 11695 } 11696 } 11697 11698 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 11699 EVT VT) const { 11700 int Scale = AM.Scale; 11701 if (Scale < 0) 11702 return false; 11703 11704 switch (VT.getSimpleVT().SimpleTy) { 11705 default: return false; 11706 case MVT::i1: 11707 case MVT::i8: 11708 case MVT::i16: 11709 case MVT::i32: 11710 if (Scale == 1) 11711 return true; 11712 // r + r << imm 11713 Scale = Scale & ~1; 11714 return Scale == 2 || Scale == 4 || Scale == 8; 11715 case MVT::i64: 11716 // r + r 11717 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11718 return true; 11719 return false; 11720 case MVT::isVoid: 11721 // Note, we allow "void" uses (basically, uses that aren't loads or 11722 // stores), because arm allows folding a scale into many arithmetic 11723 // operations. This should be made more precise and revisited later. 11724 11725 // Allow r << imm, but the imm has to be a multiple of two. 11726 if (Scale & 1) return false; 11727 return isPowerOf2_32(Scale); 11728 } 11729 } 11730 11731 /// isLegalAddressingMode - Return true if the addressing mode represented 11732 /// by AM is legal for this target, for a load/store of the specified type. 11733 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL, 11734 const AddrMode &AM, Type *Ty, 11735 unsigned AS) const { 11736 EVT VT = getValueType(DL, Ty, true); 11737 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 11738 return false; 11739 11740 // Can never fold addr of global into load/store. 11741 if (AM.BaseGV) 11742 return false; 11743 11744 switch (AM.Scale) { 11745 case 0: // no scale reg, must be "r+i" or "r", or "i". 11746 break; 11747 case 1: 11748 if (Subtarget->isThumb1Only()) 11749 return false; 11750 LLVM_FALLTHROUGH; 11751 default: 11752 // ARM doesn't support any R+R*scale+imm addr modes. 11753 if (AM.BaseOffs) 11754 return false; 11755 11756 if (!VT.isSimple()) 11757 return false; 11758 11759 if (Subtarget->isThumb2()) 11760 return isLegalT2ScaledAddressingMode(AM, VT); 11761 11762 int Scale = AM.Scale; 11763 switch (VT.getSimpleVT().SimpleTy) { 11764 default: return false; 11765 case MVT::i1: 11766 case MVT::i8: 11767 case MVT::i32: 11768 if (Scale < 0) Scale = -Scale; 11769 if (Scale == 1) 11770 return true; 11771 // r + r << imm 11772 return isPowerOf2_32(Scale & ~1); 11773 case MVT::i16: 11774 case MVT::i64: 11775 // r + r 11776 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 11777 return true; 11778 return false; 11779 11780 case MVT::isVoid: 11781 // Note, we allow "void" uses (basically, uses that aren't loads or 11782 // stores), because arm allows folding a scale into many arithmetic 11783 // operations. This should be made more precise and revisited later. 11784 11785 // Allow r << imm, but the imm has to be a multiple of two. 11786 if (Scale & 1) return false; 11787 return isPowerOf2_32(Scale); 11788 } 11789 } 11790 return true; 11791 } 11792 11793 /// isLegalICmpImmediate - Return true if the specified immediate is legal 11794 /// icmp immediate, that is the target has icmp instructions which can compare 11795 /// a register against the immediate without having to materialize the 11796 /// immediate into a register. 11797 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 11798 // Thumb2 and ARM modes can use cmn for negative immediates. 11799 if (!Subtarget->isThumb()) 11800 return ARM_AM::getSOImmVal(std::abs(Imm)) != -1; 11801 if (Subtarget->isThumb2()) 11802 return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1; 11803 // Thumb1 doesn't have cmn, and only 8-bit immediates. 11804 return Imm >= 0 && Imm <= 255; 11805 } 11806 11807 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 11808 /// *or sub* immediate, that is the target has add or sub instructions which can 11809 /// add a register with the immediate without having to materialize the 11810 /// immediate into a register. 11811 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 11812 // Same encoding for add/sub, just flip the sign. 11813 int64_t AbsImm = std::abs(Imm); 11814 if (!Subtarget->isThumb()) 11815 return ARM_AM::getSOImmVal(AbsImm) != -1; 11816 if (Subtarget->isThumb2()) 11817 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 11818 // Thumb1 only has 8-bit unsigned immediate. 11819 return AbsImm >= 0 && AbsImm <= 255; 11820 } 11821 11822 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 11823 bool isSEXTLoad, SDValue &Base, 11824 SDValue &Offset, bool &isInc, 11825 SelectionDAG &DAG) { 11826 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11827 return false; 11828 11829 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 11830 // AddressingMode 3 11831 Base = Ptr->getOperand(0); 11832 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11833 int RHSC = (int)RHS->getZExtValue(); 11834 if (RHSC < 0 && RHSC > -256) { 11835 assert(Ptr->getOpcode() == ISD::ADD); 11836 isInc = false; 11837 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11838 return true; 11839 } 11840 } 11841 isInc = (Ptr->getOpcode() == ISD::ADD); 11842 Offset = Ptr->getOperand(1); 11843 return true; 11844 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 11845 // AddressingMode 2 11846 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11847 int RHSC = (int)RHS->getZExtValue(); 11848 if (RHSC < 0 && RHSC > -0x1000) { 11849 assert(Ptr->getOpcode() == ISD::ADD); 11850 isInc = false; 11851 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11852 Base = Ptr->getOperand(0); 11853 return true; 11854 } 11855 } 11856 11857 if (Ptr->getOpcode() == ISD::ADD) { 11858 isInc = true; 11859 ARM_AM::ShiftOpc ShOpcVal= 11860 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 11861 if (ShOpcVal != ARM_AM::no_shift) { 11862 Base = Ptr->getOperand(1); 11863 Offset = Ptr->getOperand(0); 11864 } else { 11865 Base = Ptr->getOperand(0); 11866 Offset = Ptr->getOperand(1); 11867 } 11868 return true; 11869 } 11870 11871 isInc = (Ptr->getOpcode() == ISD::ADD); 11872 Base = Ptr->getOperand(0); 11873 Offset = Ptr->getOperand(1); 11874 return true; 11875 } 11876 11877 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 11878 return false; 11879 } 11880 11881 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 11882 bool isSEXTLoad, SDValue &Base, 11883 SDValue &Offset, bool &isInc, 11884 SelectionDAG &DAG) { 11885 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 11886 return false; 11887 11888 Base = Ptr->getOperand(0); 11889 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 11890 int RHSC = (int)RHS->getZExtValue(); 11891 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 11892 assert(Ptr->getOpcode() == ISD::ADD); 11893 isInc = false; 11894 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11895 return true; 11896 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 11897 isInc = Ptr->getOpcode() == ISD::ADD; 11898 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0)); 11899 return true; 11900 } 11901 } 11902 11903 return false; 11904 } 11905 11906 /// getPreIndexedAddressParts - returns true by value, base pointer and 11907 /// offset pointer and addressing mode by reference if the node's address 11908 /// can be legally represented as pre-indexed load / store address. 11909 bool 11910 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 11911 SDValue &Offset, 11912 ISD::MemIndexedMode &AM, 11913 SelectionDAG &DAG) const { 11914 if (Subtarget->isThumb1Only()) 11915 return false; 11916 11917 EVT VT; 11918 SDValue Ptr; 11919 bool isSEXTLoad = false; 11920 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11921 Ptr = LD->getBasePtr(); 11922 VT = LD->getMemoryVT(); 11923 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11924 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11925 Ptr = ST->getBasePtr(); 11926 VT = ST->getMemoryVT(); 11927 } else 11928 return false; 11929 11930 bool isInc; 11931 bool isLegal = false; 11932 if (Subtarget->isThumb2()) 11933 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11934 Offset, isInc, DAG); 11935 else 11936 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 11937 Offset, isInc, DAG); 11938 if (!isLegal) 11939 return false; 11940 11941 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 11942 return true; 11943 } 11944 11945 /// getPostIndexedAddressParts - returns true by value, base pointer and 11946 /// offset pointer and addressing mode by reference if this node can be 11947 /// combined with a load / store to form a post-indexed load / store. 11948 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 11949 SDValue &Base, 11950 SDValue &Offset, 11951 ISD::MemIndexedMode &AM, 11952 SelectionDAG &DAG) const { 11953 EVT VT; 11954 SDValue Ptr; 11955 bool isSEXTLoad = false, isNonExt; 11956 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11957 VT = LD->getMemoryVT(); 11958 Ptr = LD->getBasePtr(); 11959 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 11960 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; 11961 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11962 VT = ST->getMemoryVT(); 11963 Ptr = ST->getBasePtr(); 11964 isNonExt = !ST->isTruncatingStore(); 11965 } else 11966 return false; 11967 11968 if (Subtarget->isThumb1Only()) { 11969 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It 11970 // must be non-extending/truncating, i32, with an offset of 4. 11971 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!"); 11972 if (Op->getOpcode() != ISD::ADD || !isNonExt) 11973 return false; 11974 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 11975 if (!RHS || RHS->getZExtValue() != 4) 11976 return false; 11977 11978 Offset = Op->getOperand(1); 11979 Base = Op->getOperand(0); 11980 AM = ISD::POST_INC; 11981 return true; 11982 } 11983 11984 bool isInc; 11985 bool isLegal = false; 11986 if (Subtarget->isThumb2()) 11987 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11988 isInc, DAG); 11989 else 11990 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 11991 isInc, DAG); 11992 if (!isLegal) 11993 return false; 11994 11995 if (Ptr != Base) { 11996 // Swap base ptr and offset to catch more post-index load / store when 11997 // it's legal. In Thumb2 mode, offset must be an immediate. 11998 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 11999 !Subtarget->isThumb2()) 12000 std::swap(Base, Offset); 12001 12002 // Post-indexed load / store update the base pointer. 12003 if (Ptr != Base) 12004 return false; 12005 } 12006 12007 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 12008 return true; 12009 } 12010 12011 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 12012 APInt &KnownZero, 12013 APInt &KnownOne, 12014 const SelectionDAG &DAG, 12015 unsigned Depth) const { 12016 unsigned BitWidth = KnownOne.getBitWidth(); 12017 KnownZero = KnownOne = APInt(BitWidth, 0); 12018 switch (Op.getOpcode()) { 12019 default: break; 12020 case ARMISD::ADDC: 12021 case ARMISD::ADDE: 12022 case ARMISD::SUBC: 12023 case ARMISD::SUBE: 12024 // These nodes' second result is a boolean 12025 if (Op.getResNo() == 0) 12026 break; 12027 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 12028 break; 12029 case ARMISD::CMOV: { 12030 // Bits are known zero/one if known on the LHS and RHS. 12031 DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 12032 if (KnownZero == 0 && KnownOne == 0) return; 12033 12034 APInt KnownZeroRHS, KnownOneRHS; 12035 DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 12036 KnownZero &= KnownZeroRHS; 12037 KnownOne &= KnownOneRHS; 12038 return; 12039 } 12040 case ISD::INTRINSIC_W_CHAIN: { 12041 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 12042 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 12043 switch (IntID) { 12044 default: return; 12045 case Intrinsic::arm_ldaex: 12046 case Intrinsic::arm_ldrex: { 12047 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 12048 unsigned MemBits = VT.getScalarSizeInBits(); 12049 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 12050 return; 12051 } 12052 } 12053 } 12054 } 12055 } 12056 12057 //===----------------------------------------------------------------------===// 12058 // ARM Inline Assembly Support 12059 //===----------------------------------------------------------------------===// 12060 12061 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 12062 // Looking for "rev" which is V6+. 12063 if (!Subtarget->hasV6Ops()) 12064 return false; 12065 12066 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 12067 std::string AsmStr = IA->getAsmString(); 12068 SmallVector<StringRef, 4> AsmPieces; 12069 SplitString(AsmStr, AsmPieces, ";\n"); 12070 12071 switch (AsmPieces.size()) { 12072 default: return false; 12073 case 1: 12074 AsmStr = AsmPieces[0]; 12075 AsmPieces.clear(); 12076 SplitString(AsmStr, AsmPieces, " \t,"); 12077 12078 // rev $0, $1 12079 if (AsmPieces.size() == 3 && 12080 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 12081 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 12082 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 12083 if (Ty && Ty->getBitWidth() == 32) 12084 return IntrinsicLowering::LowerToByteSwap(CI); 12085 } 12086 break; 12087 } 12088 12089 return false; 12090 } 12091 12092 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { 12093 // At this point, we have to lower this constraint to something else, so we 12094 // lower it to an "r" or "w". However, by doing this we will force the result 12095 // to be in register, while the X constraint is much more permissive. 12096 // 12097 // Although we are correct (we are free to emit anything, without 12098 // constraints), we might break use cases that would expect us to be more 12099 // efficient and emit something else. 12100 if (!Subtarget->hasVFP2()) 12101 return "r"; 12102 if (ConstraintVT.isFloatingPoint()) 12103 return "w"; 12104 if (ConstraintVT.isVector() && Subtarget->hasNEON() && 12105 (ConstraintVT.getSizeInBits() == 64 || 12106 ConstraintVT.getSizeInBits() == 128)) 12107 return "w"; 12108 12109 return "r"; 12110 } 12111 12112 /// getConstraintType - Given a constraint letter, return the type of 12113 /// constraint it is for this target. 12114 ARMTargetLowering::ConstraintType 12115 ARMTargetLowering::getConstraintType(StringRef Constraint) const { 12116 if (Constraint.size() == 1) { 12117 switch (Constraint[0]) { 12118 default: break; 12119 case 'l': return C_RegisterClass; 12120 case 'w': return C_RegisterClass; 12121 case 'h': return C_RegisterClass; 12122 case 'x': return C_RegisterClass; 12123 case 't': return C_RegisterClass; 12124 case 'j': return C_Other; // Constant for movw. 12125 // An address with a single base register. Due to the way we 12126 // currently handle addresses it is the same as an 'r' memory constraint. 12127 case 'Q': return C_Memory; 12128 } 12129 } else if (Constraint.size() == 2) { 12130 switch (Constraint[0]) { 12131 default: break; 12132 // All 'U+' constraints are addresses. 12133 case 'U': return C_Memory; 12134 } 12135 } 12136 return TargetLowering::getConstraintType(Constraint); 12137 } 12138 12139 /// Examine constraint type and operand type and determine a weight value. 12140 /// This object must already have been set up with the operand type 12141 /// and the current alternative constraint selected. 12142 TargetLowering::ConstraintWeight 12143 ARMTargetLowering::getSingleConstraintMatchWeight( 12144 AsmOperandInfo &info, const char *constraint) const { 12145 ConstraintWeight weight = CW_Invalid; 12146 Value *CallOperandVal = info.CallOperandVal; 12147 // If we don't have a value, we can't do a match, 12148 // but allow it at the lowest weight. 12149 if (!CallOperandVal) 12150 return CW_Default; 12151 Type *type = CallOperandVal->getType(); 12152 // Look at the constraint type. 12153 switch (*constraint) { 12154 default: 12155 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 12156 break; 12157 case 'l': 12158 if (type->isIntegerTy()) { 12159 if (Subtarget->isThumb()) 12160 weight = CW_SpecificReg; 12161 else 12162 weight = CW_Register; 12163 } 12164 break; 12165 case 'w': 12166 if (type->isFloatingPointTy()) 12167 weight = CW_Register; 12168 break; 12169 } 12170 return weight; 12171 } 12172 12173 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 12174 RCPair ARMTargetLowering::getRegForInlineAsmConstraint( 12175 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 12176 if (Constraint.size() == 1) { 12177 // GCC ARM Constraint Letters 12178 switch (Constraint[0]) { 12179 case 'l': // Low regs or general regs. 12180 if (Subtarget->isThumb()) 12181 return RCPair(0U, &ARM::tGPRRegClass); 12182 return RCPair(0U, &ARM::GPRRegClass); 12183 case 'h': // High regs or no regs. 12184 if (Subtarget->isThumb()) 12185 return RCPair(0U, &ARM::hGPRRegClass); 12186 break; 12187 case 'r': 12188 if (Subtarget->isThumb1Only()) 12189 return RCPair(0U, &ARM::tGPRRegClass); 12190 return RCPair(0U, &ARM::GPRRegClass); 12191 case 'w': 12192 if (VT == MVT::Other) 12193 break; 12194 if (VT == MVT::f32) 12195 return RCPair(0U, &ARM::SPRRegClass); 12196 if (VT.getSizeInBits() == 64) 12197 return RCPair(0U, &ARM::DPRRegClass); 12198 if (VT.getSizeInBits() == 128) 12199 return RCPair(0U, &ARM::QPRRegClass); 12200 break; 12201 case 'x': 12202 if (VT == MVT::Other) 12203 break; 12204 if (VT == MVT::f32) 12205 return RCPair(0U, &ARM::SPR_8RegClass); 12206 if (VT.getSizeInBits() == 64) 12207 return RCPair(0U, &ARM::DPR_8RegClass); 12208 if (VT.getSizeInBits() == 128) 12209 return RCPair(0U, &ARM::QPR_8RegClass); 12210 break; 12211 case 't': 12212 if (VT == MVT::f32) 12213 return RCPair(0U, &ARM::SPRRegClass); 12214 break; 12215 } 12216 } 12217 if (StringRef("{cc}").equals_lower(Constraint)) 12218 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 12219 12220 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 12221 } 12222 12223 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 12224 /// vector. If it is invalid, don't add anything to Ops. 12225 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 12226 std::string &Constraint, 12227 std::vector<SDValue>&Ops, 12228 SelectionDAG &DAG) const { 12229 SDValue Result; 12230 12231 // Currently only support length 1 constraints. 12232 if (Constraint.length() != 1) return; 12233 12234 char ConstraintLetter = Constraint[0]; 12235 switch (ConstraintLetter) { 12236 default: break; 12237 case 'j': 12238 case 'I': case 'J': case 'K': case 'L': 12239 case 'M': case 'N': case 'O': 12240 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 12241 if (!C) 12242 return; 12243 12244 int64_t CVal64 = C->getSExtValue(); 12245 int CVal = (int) CVal64; 12246 // None of these constraints allow values larger than 32 bits. Check 12247 // that the value fits in an int. 12248 if (CVal != CVal64) 12249 return; 12250 12251 switch (ConstraintLetter) { 12252 case 'j': 12253 // Constant suitable for movw, must be between 0 and 12254 // 65535. 12255 if (Subtarget->hasV6T2Ops()) 12256 if (CVal >= 0 && CVal <= 65535) 12257 break; 12258 return; 12259 case 'I': 12260 if (Subtarget->isThumb1Only()) { 12261 // This must be a constant between 0 and 255, for ADD 12262 // immediates. 12263 if (CVal >= 0 && CVal <= 255) 12264 break; 12265 } else if (Subtarget->isThumb2()) { 12266 // A constant that can be used as an immediate value in a 12267 // data-processing instruction. 12268 if (ARM_AM::getT2SOImmVal(CVal) != -1) 12269 break; 12270 } else { 12271 // A constant that can be used as an immediate value in a 12272 // data-processing instruction. 12273 if (ARM_AM::getSOImmVal(CVal) != -1) 12274 break; 12275 } 12276 return; 12277 12278 case 'J': 12279 if (Subtarget->isThumb1Only()) { 12280 // This must be a constant between -255 and -1, for negated ADD 12281 // immediates. This can be used in GCC with an "n" modifier that 12282 // prints the negated value, for use with SUB instructions. It is 12283 // not useful otherwise but is implemented for compatibility. 12284 if (CVal >= -255 && CVal <= -1) 12285 break; 12286 } else { 12287 // This must be a constant between -4095 and 4095. It is not clear 12288 // what this constraint is intended for. Implemented for 12289 // compatibility with GCC. 12290 if (CVal >= -4095 && CVal <= 4095) 12291 break; 12292 } 12293 return; 12294 12295 case 'K': 12296 if (Subtarget->isThumb1Only()) { 12297 // A 32-bit value where only one byte has a nonzero value. Exclude 12298 // zero to match GCC. This constraint is used by GCC internally for 12299 // constants that can be loaded with a move/shift combination. 12300 // It is not useful otherwise but is implemented for compatibility. 12301 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 12302 break; 12303 } else if (Subtarget->isThumb2()) { 12304 // A constant whose bitwise inverse can be used as an immediate 12305 // value in a data-processing instruction. This can be used in GCC 12306 // with a "B" modifier that prints the inverted value, for use with 12307 // BIC and MVN instructions. It is not useful otherwise but is 12308 // implemented for compatibility. 12309 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 12310 break; 12311 } else { 12312 // A constant whose bitwise inverse can be used as an immediate 12313 // value in a data-processing instruction. This can be used in GCC 12314 // with a "B" modifier that prints the inverted value, for use with 12315 // BIC and MVN instructions. It is not useful otherwise but is 12316 // implemented for compatibility. 12317 if (ARM_AM::getSOImmVal(~CVal) != -1) 12318 break; 12319 } 12320 return; 12321 12322 case 'L': 12323 if (Subtarget->isThumb1Only()) { 12324 // This must be a constant between -7 and 7, 12325 // for 3-operand ADD/SUB immediate instructions. 12326 if (CVal >= -7 && CVal < 7) 12327 break; 12328 } else if (Subtarget->isThumb2()) { 12329 // A constant whose negation can be used as an immediate value in a 12330 // data-processing instruction. This can be used in GCC with an "n" 12331 // modifier that prints the negated value, for use with SUB 12332 // instructions. It is not useful otherwise but is implemented for 12333 // compatibility. 12334 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 12335 break; 12336 } else { 12337 // A constant whose negation can be used as an immediate value in a 12338 // data-processing instruction. This can be used in GCC with an "n" 12339 // modifier that prints the negated value, for use with SUB 12340 // instructions. It is not useful otherwise but is implemented for 12341 // compatibility. 12342 if (ARM_AM::getSOImmVal(-CVal) != -1) 12343 break; 12344 } 12345 return; 12346 12347 case 'M': 12348 if (Subtarget->isThumb1Only()) { 12349 // This must be a multiple of 4 between 0 and 1020, for 12350 // ADD sp + immediate. 12351 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 12352 break; 12353 } else { 12354 // A power of two or a constant between 0 and 32. This is used in 12355 // GCC for the shift amount on shifted register operands, but it is 12356 // useful in general for any shift amounts. 12357 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 12358 break; 12359 } 12360 return; 12361 12362 case 'N': 12363 if (Subtarget->isThumb()) { // FIXME thumb2 12364 // This must be a constant between 0 and 31, for shift amounts. 12365 if (CVal >= 0 && CVal <= 31) 12366 break; 12367 } 12368 return; 12369 12370 case 'O': 12371 if (Subtarget->isThumb()) { // FIXME thumb2 12372 // This must be a multiple of 4 between -508 and 508, for 12373 // ADD/SUB sp = sp + immediate. 12374 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 12375 break; 12376 } 12377 return; 12378 } 12379 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType()); 12380 break; 12381 } 12382 12383 if (Result.getNode()) { 12384 Ops.push_back(Result); 12385 return; 12386 } 12387 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 12388 } 12389 12390 static RTLIB::Libcall getDivRemLibcall( 12391 const SDNode *N, MVT::SimpleValueType SVT) { 12392 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 12393 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 12394 "Unhandled Opcode in getDivRemLibcall"); 12395 bool isSigned = N->getOpcode() == ISD::SDIVREM || 12396 N->getOpcode() == ISD::SREM; 12397 RTLIB::Libcall LC; 12398 switch (SVT) { 12399 default: llvm_unreachable("Unexpected request for libcall!"); 12400 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 12401 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 12402 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 12403 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 12404 } 12405 return LC; 12406 } 12407 12408 static TargetLowering::ArgListTy getDivRemArgList( 12409 const SDNode *N, LLVMContext *Context) { 12410 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM || 12411 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) && 12412 "Unhandled Opcode in getDivRemArgList"); 12413 bool isSigned = N->getOpcode() == ISD::SDIVREM || 12414 N->getOpcode() == ISD::SREM; 12415 TargetLowering::ArgListTy Args; 12416 TargetLowering::ArgListEntry Entry; 12417 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 12418 EVT ArgVT = N->getOperand(i).getValueType(); 12419 Type *ArgTy = ArgVT.getTypeForEVT(*Context); 12420 Entry.Node = N->getOperand(i); 12421 Entry.Ty = ArgTy; 12422 Entry.isSExt = isSigned; 12423 Entry.isZExt = !isSigned; 12424 Args.push_back(Entry); 12425 } 12426 return Args; 12427 } 12428 12429 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 12430 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() || 12431 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI()) && 12432 "Register-based DivRem lowering only"); 12433 unsigned Opcode = Op->getOpcode(); 12434 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 12435 "Invalid opcode for Div/Rem lowering"); 12436 bool isSigned = (Opcode == ISD::SDIVREM); 12437 EVT VT = Op->getValueType(0); 12438 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 12439 12440 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(), 12441 VT.getSimpleVT().SimpleTy); 12442 SDValue InChain = DAG.getEntryNode(); 12443 12444 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(), 12445 DAG.getContext()); 12446 12447 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 12448 getPointerTy(DAG.getDataLayout())); 12449 12450 Type *RetTy = (Type*)StructType::get(Ty, Ty, nullptr); 12451 12452 SDLoc dl(Op); 12453 TargetLowering::CallLoweringInfo CLI(DAG); 12454 CLI.setDebugLoc(dl).setChain(InChain) 12455 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 12456 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned); 12457 12458 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 12459 return CallInfo.first; 12460 } 12461 12462 // Lowers REM using divmod helpers 12463 // see RTABI section 4.2/4.3 12464 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { 12465 // Build return types (div and rem) 12466 std::vector<Type*> RetTyParams; 12467 Type *RetTyElement; 12468 12469 switch (N->getValueType(0).getSimpleVT().SimpleTy) { 12470 default: llvm_unreachable("Unexpected request for libcall!"); 12471 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break; 12472 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break; 12473 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break; 12474 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break; 12475 } 12476 12477 RetTyParams.push_back(RetTyElement); 12478 RetTyParams.push_back(RetTyElement); 12479 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams); 12480 Type *RetTy = StructType::get(*DAG.getContext(), ret); 12481 12482 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT(). 12483 SimpleTy); 12484 SDValue InChain = DAG.getEntryNode(); 12485 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext()); 12486 bool isSigned = N->getOpcode() == ISD::SREM; 12487 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 12488 getPointerTy(DAG.getDataLayout())); 12489 12490 // Lower call 12491 CallLoweringInfo CLI(DAG); 12492 CLI.setChain(InChain) 12493 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args)) 12494 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N)); 12495 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 12496 12497 // Return second (rem) result operand (first contains div) 12498 SDNode *ResNode = CallResult.first.getNode(); 12499 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands"); 12500 return ResNode->getOperand(1); 12501 } 12502 12503 SDValue 12504 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 12505 assert(Subtarget->isTargetWindows() && "unsupported target platform"); 12506 SDLoc DL(Op); 12507 12508 // Get the inputs. 12509 SDValue Chain = Op.getOperand(0); 12510 SDValue Size = Op.getOperand(1); 12511 12512 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size, 12513 DAG.getConstant(2, DL, MVT::i32)); 12514 12515 SDValue Flag; 12516 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag); 12517 Flag = Chain.getValue(1); 12518 12519 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 12520 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag); 12521 12522 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32); 12523 Chain = NewSP.getValue(1); 12524 12525 SDValue Ops[2] = { NewSP, Chain }; 12526 return DAG.getMergeValues(Ops, DL); 12527 } 12528 12529 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 12530 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && 12531 "Unexpected type for custom-lowering FP_EXTEND"); 12532 12533 RTLIB::Libcall LC; 12534 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 12535 12536 SDValue SrcVal = Op.getOperand(0); 12537 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 12538 SDLoc(Op)).first; 12539 } 12540 12541 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 12542 assert(Op.getOperand(0).getValueType() == MVT::f64 && 12543 Subtarget->isFPOnlySP() && 12544 "Unexpected type for custom-lowering FP_ROUND"); 12545 12546 RTLIB::Libcall LC; 12547 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 12548 12549 SDValue SrcVal = Op.getOperand(0); 12550 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 12551 SDLoc(Op)).first; 12552 } 12553 12554 bool 12555 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 12556 // The ARM target isn't yet aware of offsets. 12557 return false; 12558 } 12559 12560 bool ARM::isBitFieldInvertedMask(unsigned v) { 12561 if (v == 0xffffffff) 12562 return false; 12563 12564 // there can be 1's on either or both "outsides", all the "inside" 12565 // bits must be 0's 12566 return isShiftedMask_32(~v); 12567 } 12568 12569 /// isFPImmLegal - Returns true if the target can instruction select the 12570 /// specified FP immediate natively. If false, the legalizer will 12571 /// materialize the FP immediate as a load from a constant pool. 12572 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 12573 if (!Subtarget->hasVFP3()) 12574 return false; 12575 if (VT == MVT::f32) 12576 return ARM_AM::getFP32Imm(Imm) != -1; 12577 if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) 12578 return ARM_AM::getFP64Imm(Imm) != -1; 12579 return false; 12580 } 12581 12582 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 12583 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 12584 /// specified in the intrinsic calls. 12585 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 12586 const CallInst &I, 12587 unsigned Intrinsic) const { 12588 switch (Intrinsic) { 12589 case Intrinsic::arm_neon_vld1: 12590 case Intrinsic::arm_neon_vld2: 12591 case Intrinsic::arm_neon_vld3: 12592 case Intrinsic::arm_neon_vld4: 12593 case Intrinsic::arm_neon_vld2lane: 12594 case Intrinsic::arm_neon_vld3lane: 12595 case Intrinsic::arm_neon_vld4lane: { 12596 Info.opc = ISD::INTRINSIC_W_CHAIN; 12597 // Conservatively set memVT to the entire set of vectors loaded. 12598 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12599 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 12600 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12601 Info.ptrVal = I.getArgOperand(0); 12602 Info.offset = 0; 12603 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12604 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12605 Info.vol = false; // volatile loads with NEON intrinsics not supported 12606 Info.readMem = true; 12607 Info.writeMem = false; 12608 return true; 12609 } 12610 case Intrinsic::arm_neon_vst1: 12611 case Intrinsic::arm_neon_vst2: 12612 case Intrinsic::arm_neon_vst3: 12613 case Intrinsic::arm_neon_vst4: 12614 case Intrinsic::arm_neon_vst2lane: 12615 case Intrinsic::arm_neon_vst3lane: 12616 case Intrinsic::arm_neon_vst4lane: { 12617 Info.opc = ISD::INTRINSIC_VOID; 12618 // Conservatively set memVT to the entire set of vectors stored. 12619 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12620 unsigned NumElts = 0; 12621 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 12622 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 12623 if (!ArgTy->isVectorTy()) 12624 break; 12625 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 12626 } 12627 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 12628 Info.ptrVal = I.getArgOperand(0); 12629 Info.offset = 0; 12630 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 12631 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 12632 Info.vol = false; // volatile stores with NEON intrinsics not supported 12633 Info.readMem = false; 12634 Info.writeMem = true; 12635 return true; 12636 } 12637 case Intrinsic::arm_ldaex: 12638 case Intrinsic::arm_ldrex: { 12639 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12640 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 12641 Info.opc = ISD::INTRINSIC_W_CHAIN; 12642 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12643 Info.ptrVal = I.getArgOperand(0); 12644 Info.offset = 0; 12645 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12646 Info.vol = true; 12647 Info.readMem = true; 12648 Info.writeMem = false; 12649 return true; 12650 } 12651 case Intrinsic::arm_stlex: 12652 case Intrinsic::arm_strex: { 12653 auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); 12654 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 12655 Info.opc = ISD::INTRINSIC_W_CHAIN; 12656 Info.memVT = MVT::getVT(PtrTy->getElementType()); 12657 Info.ptrVal = I.getArgOperand(1); 12658 Info.offset = 0; 12659 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 12660 Info.vol = true; 12661 Info.readMem = false; 12662 Info.writeMem = true; 12663 return true; 12664 } 12665 case Intrinsic::arm_stlexd: 12666 case Intrinsic::arm_strexd: { 12667 Info.opc = ISD::INTRINSIC_W_CHAIN; 12668 Info.memVT = MVT::i64; 12669 Info.ptrVal = I.getArgOperand(2); 12670 Info.offset = 0; 12671 Info.align = 8; 12672 Info.vol = true; 12673 Info.readMem = false; 12674 Info.writeMem = true; 12675 return true; 12676 } 12677 case Intrinsic::arm_ldaexd: 12678 case Intrinsic::arm_ldrexd: { 12679 Info.opc = ISD::INTRINSIC_W_CHAIN; 12680 Info.memVT = MVT::i64; 12681 Info.ptrVal = I.getArgOperand(0); 12682 Info.offset = 0; 12683 Info.align = 8; 12684 Info.vol = true; 12685 Info.readMem = true; 12686 Info.writeMem = false; 12687 return true; 12688 } 12689 default: 12690 break; 12691 } 12692 12693 return false; 12694 } 12695 12696 /// \brief Returns true if it is beneficial to convert a load of a constant 12697 /// to just the constant itself. 12698 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 12699 Type *Ty) const { 12700 assert(Ty->isIntegerTy()); 12701 12702 unsigned Bits = Ty->getPrimitiveSizeInBits(); 12703 if (Bits == 0 || Bits > 32) 12704 return false; 12705 return true; 12706 } 12707 12708 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder, 12709 ARM_MB::MemBOpt Domain) const { 12710 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12711 12712 // First, if the target has no DMB, see what fallback we can use. 12713 if (!Subtarget->hasDataBarrier()) { 12714 // Some ARMv6 cpus can support data barriers with an mcr instruction. 12715 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 12716 // here. 12717 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) { 12718 Function *MCR = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_mcr); 12719 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0), 12720 Builder.getInt32(0), Builder.getInt32(7), 12721 Builder.getInt32(10), Builder.getInt32(5)}; 12722 return Builder.CreateCall(MCR, args); 12723 } else { 12724 // Instead of using barriers, atomic accesses on these subtargets use 12725 // libcalls. 12726 llvm_unreachable("makeDMB on a target so old that it has no barriers"); 12727 } 12728 } else { 12729 Function *DMB = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_dmb); 12730 // Only a full system barrier exists in the M-class architectures. 12731 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain; 12732 Constant *CDomain = Builder.getInt32(Domain); 12733 return Builder.CreateCall(DMB, CDomain); 12734 } 12735 } 12736 12737 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 12738 Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 12739 AtomicOrdering Ord, bool IsStore, 12740 bool IsLoad) const { 12741 switch (Ord) { 12742 case AtomicOrdering::NotAtomic: 12743 case AtomicOrdering::Unordered: 12744 llvm_unreachable("Invalid fence: unordered/non-atomic"); 12745 case AtomicOrdering::Monotonic: 12746 case AtomicOrdering::Acquire: 12747 return nullptr; // Nothing to do 12748 case AtomicOrdering::SequentiallyConsistent: 12749 if (!IsStore) 12750 return nullptr; // Nothing to do 12751 /*FALLTHROUGH*/ 12752 case AtomicOrdering::Release: 12753 case AtomicOrdering::AcquireRelease: 12754 if (Subtarget->preferISHSTBarriers()) 12755 return makeDMB(Builder, ARM_MB::ISHST); 12756 // FIXME: add a comment with a link to documentation justifying this. 12757 else 12758 return makeDMB(Builder, ARM_MB::ISH); 12759 } 12760 llvm_unreachable("Unknown fence ordering in emitLeadingFence"); 12761 } 12762 12763 Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 12764 AtomicOrdering Ord, bool IsStore, 12765 bool IsLoad) const { 12766 switch (Ord) { 12767 case AtomicOrdering::NotAtomic: 12768 case AtomicOrdering::Unordered: 12769 llvm_unreachable("Invalid fence: unordered/not-atomic"); 12770 case AtomicOrdering::Monotonic: 12771 case AtomicOrdering::Release: 12772 return nullptr; // Nothing to do 12773 case AtomicOrdering::Acquire: 12774 case AtomicOrdering::AcquireRelease: 12775 case AtomicOrdering::SequentiallyConsistent: 12776 return makeDMB(Builder, ARM_MB::ISH); 12777 } 12778 llvm_unreachable("Unknown fence ordering in emitTrailingFence"); 12779 } 12780 12781 // Loads and stores less than 64-bits are already atomic; ones above that 12782 // are doomed anyway, so defer to the default libcall and blame the OS when 12783 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12784 // anything for those. 12785 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 12786 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 12787 return (Size == 64) && !Subtarget->isMClass(); 12788 } 12789 12790 // Loads and stores less than 64-bits are already atomic; ones above that 12791 // are doomed anyway, so defer to the default libcall and blame the OS when 12792 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit 12793 // anything for those. 12794 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that 12795 // guarantee, see DDI0406C ARM architecture reference manual, 12796 // sections A8.8.72-74 LDRD) 12797 TargetLowering::AtomicExpansionKind 12798 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 12799 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 12800 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly 12801 : AtomicExpansionKind::None; 12802 } 12803 12804 // For the real atomic operations, we have ldrex/strex up to 32 bits, 12805 // and up to 64 bits on the non-M profiles 12806 TargetLowering::AtomicExpansionKind 12807 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 12808 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 12809 return (Size <= (Subtarget->isMClass() ? 32U : 64U)) 12810 ? AtomicExpansionKind::LLSC 12811 : AtomicExpansionKind::None; 12812 } 12813 12814 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR( 12815 AtomicCmpXchgInst *AI) const { 12816 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 12817 // implement cmpxchg without spilling. If the address being exchanged is also 12818 // on the stack and close enough to the spill slot, this can lead to a 12819 // situation where the monitor always gets cleared and the atomic operation 12820 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 12821 return getTargetMachine().getOptLevel() != 0; 12822 } 12823 12824 bool ARMTargetLowering::shouldInsertFencesForAtomic( 12825 const Instruction *I) const { 12826 return InsertFencesForAtomic; 12827 } 12828 12829 // This has so far only been implemented for MachO. 12830 bool ARMTargetLowering::useLoadStackGuardNode() const { 12831 return Subtarget->isTargetMachO(); 12832 } 12833 12834 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 12835 unsigned &Cost) const { 12836 // If we do not have NEON, vector types are not natively supported. 12837 if (!Subtarget->hasNEON()) 12838 return false; 12839 12840 // Floating point values and vector values map to the same register file. 12841 // Therefore, although we could do a store extract of a vector type, this is 12842 // better to leave at float as we have more freedom in the addressing mode for 12843 // those. 12844 if (VectorTy->isFPOrFPVectorTy()) 12845 return false; 12846 12847 // If the index is unknown at compile time, this is very expensive to lower 12848 // and it is not possible to combine the store with the extract. 12849 if (!isa<ConstantInt>(Idx)) 12850 return false; 12851 12852 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); 12853 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth(); 12854 // We can do a store + vector extract on any vector that fits perfectly in a D 12855 // or Q register. 12856 if (BitWidth == 64 || BitWidth == 128) { 12857 Cost = 0; 12858 return true; 12859 } 12860 return false; 12861 } 12862 12863 bool ARMTargetLowering::isCheapToSpeculateCttz() const { 12864 return Subtarget->hasV6T2Ops(); 12865 } 12866 12867 bool ARMTargetLowering::isCheapToSpeculateCtlz() const { 12868 return Subtarget->hasV6T2Ops(); 12869 } 12870 12871 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 12872 AtomicOrdering Ord) const { 12873 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12874 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 12875 bool IsAcquire = isAcquireOrStronger(Ord); 12876 12877 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd 12878 // intrinsic must return {i32, i32} and we have to recombine them into a 12879 // single i64 here. 12880 if (ValTy->getPrimitiveSizeInBits() == 64) { 12881 Intrinsic::ID Int = 12882 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd; 12883 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int); 12884 12885 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12886 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi"); 12887 12888 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 12889 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 12890 if (!Subtarget->isLittle()) 12891 std::swap (Lo, Hi); 12892 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 12893 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 12894 return Builder.CreateOr( 12895 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64"); 12896 } 12897 12898 Type *Tys[] = { Addr->getType() }; 12899 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex; 12900 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys); 12901 12902 return Builder.CreateTruncOrBitCast( 12903 Builder.CreateCall(Ldrex, Addr), 12904 cast<PointerType>(Addr->getType())->getElementType()); 12905 } 12906 12907 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 12908 IRBuilder<> &Builder) const { 12909 if (!Subtarget->hasV7Ops()) 12910 return; 12911 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12912 Builder.CreateCall(llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_clrex)); 12913 } 12914 12915 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, 12916 Value *Addr, 12917 AtomicOrdering Ord) const { 12918 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 12919 bool IsRelease = isReleaseOrStronger(Ord); 12920 12921 // Since the intrinsics must have legal type, the i64 intrinsics take two 12922 // parameters: "i32, i32". We must marshal Val into the appropriate form 12923 // before the call. 12924 if (Val->getType()->getPrimitiveSizeInBits() == 64) { 12925 Intrinsic::ID Int = 12926 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd; 12927 Function *Strex = Intrinsic::getDeclaration(M, Int); 12928 Type *Int32Ty = Type::getInt32Ty(M->getContext()); 12929 12930 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo"); 12931 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi"); 12932 if (!Subtarget->isLittle()) 12933 std::swap (Lo, Hi); 12934 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 12935 return Builder.CreateCall(Strex, {Lo, Hi, Addr}); 12936 } 12937 12938 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; 12939 Type *Tys[] = { Addr->getType() }; 12940 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); 12941 12942 return Builder.CreateCall( 12943 Strex, {Builder.CreateZExtOrBitCast( 12944 Val, Strex->getFunctionType()->getParamType(0)), 12945 Addr}); 12946 } 12947 12948 /// \brief Lower an interleaved load into a vldN intrinsic. 12949 /// 12950 /// E.g. Lower an interleaved load (Factor = 2): 12951 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4 12952 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 12953 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 12954 /// 12955 /// Into: 12956 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4) 12957 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0 12958 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1 12959 bool ARMTargetLowering::lowerInterleavedLoad( 12960 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 12961 ArrayRef<unsigned> Indices, unsigned Factor) const { 12962 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 12963 "Invalid interleave factor"); 12964 assert(!Shuffles.empty() && "Empty shufflevector input"); 12965 assert(Shuffles.size() == Indices.size() && 12966 "Unmatched number of shufflevectors and indices"); 12967 12968 VectorType *VecTy = Shuffles[0]->getType(); 12969 Type *EltTy = VecTy->getVectorElementType(); 12970 12971 const DataLayout &DL = LI->getModule()->getDataLayout(); 12972 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 12973 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 12974 12975 // Skip if we do not have NEON and skip illegal vector types and vector types 12976 // with i64/f64 elements (vldN doesn't support i64/f64 elements). 12977 if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits) 12978 return false; 12979 12980 // A pointer vector can not be the return type of the ldN intrinsics. Need to 12981 // load integer vectors first and then convert to pointer vectors. 12982 if (EltTy->isPointerTy()) 12983 VecTy = 12984 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); 12985 12986 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2, 12987 Intrinsic::arm_neon_vld3, 12988 Intrinsic::arm_neon_vld4}; 12989 12990 IRBuilder<> Builder(LI); 12991 SmallVector<Value *, 2> Ops; 12992 12993 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace()); 12994 Ops.push_back(Builder.CreateBitCast(LI->getPointerOperand(), Int8Ptr)); 12995 Ops.push_back(Builder.getInt32(LI->getAlignment())); 12996 12997 Type *Tys[] = { VecTy, Int8Ptr }; 12998 Function *VldnFunc = 12999 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 13000 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN"); 13001 13002 // Replace uses of each shufflevector with the corresponding vector loaded 13003 // by ldN. 13004 for (unsigned i = 0; i < Shuffles.size(); i++) { 13005 ShuffleVectorInst *SV = Shuffles[i]; 13006 unsigned Index = Indices[i]; 13007 13008 Value *SubVec = Builder.CreateExtractValue(VldN, Index); 13009 13010 // Convert the integer vector to pointer vector if the element is pointer. 13011 if (EltTy->isPointerTy()) 13012 SubVec = Builder.CreateIntToPtr(SubVec, SV->getType()); 13013 13014 SV->replaceAllUsesWith(SubVec); 13015 } 13016 13017 return true; 13018 } 13019 13020 /// \brief Get a mask consisting of sequential integers starting from \p Start. 13021 /// 13022 /// I.e. <Start, Start + 1, ..., Start + NumElts - 1> 13023 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start, 13024 unsigned NumElts) { 13025 SmallVector<Constant *, 16> Mask; 13026 for (unsigned i = 0; i < NumElts; i++) 13027 Mask.push_back(Builder.getInt32(Start + i)); 13028 13029 return ConstantVector::get(Mask); 13030 } 13031 13032 /// \brief Lower an interleaved store into a vstN intrinsic. 13033 /// 13034 /// E.g. Lower an interleaved store (Factor = 3): 13035 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 13036 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 13037 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4 13038 /// 13039 /// Into: 13040 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 13041 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 13042 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 13043 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4) 13044 /// 13045 /// Note that the new shufflevectors will be removed and we'll only generate one 13046 /// vst3 instruction in CodeGen. 13047 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI, 13048 ShuffleVectorInst *SVI, 13049 unsigned Factor) const { 13050 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 13051 "Invalid interleave factor"); 13052 13053 VectorType *VecTy = SVI->getType(); 13054 assert(VecTy->getVectorNumElements() % Factor == 0 && 13055 "Invalid interleaved store"); 13056 13057 unsigned NumSubElts = VecTy->getVectorNumElements() / Factor; 13058 Type *EltTy = VecTy->getVectorElementType(); 13059 VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts); 13060 13061 const DataLayout &DL = SI->getModule()->getDataLayout(); 13062 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 13063 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64; 13064 13065 // Skip if we do not have NEON and skip illegal vector types and vector types 13066 // with i64/f64 elements (vstN doesn't support i64/f64 elements). 13067 if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128) || 13068 EltIs64Bits) 13069 return false; 13070 13071 Value *Op0 = SVI->getOperand(0); 13072 Value *Op1 = SVI->getOperand(1); 13073 IRBuilder<> Builder(SI); 13074 13075 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 13076 // vectors to integer vectors. 13077 if (EltTy->isPointerTy()) { 13078 Type *IntTy = DL.getIntPtrType(EltTy); 13079 13080 // Convert to the corresponding integer vector. 13081 Type *IntVecTy = 13082 VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); 13083 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 13084 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 13085 13086 SubVecTy = VectorType::get(IntTy, NumSubElts); 13087 } 13088 13089 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2, 13090 Intrinsic::arm_neon_vst3, 13091 Intrinsic::arm_neon_vst4}; 13092 SmallVector<Value *, 6> Ops; 13093 13094 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace()); 13095 Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), Int8Ptr)); 13096 13097 Type *Tys[] = { Int8Ptr, SubVecTy }; 13098 Function *VstNFunc = Intrinsic::getDeclaration( 13099 SI->getModule(), StoreInts[Factor - 2], Tys); 13100 13101 // Split the shufflevector operands into sub vectors for the new vstN call. 13102 for (unsigned i = 0; i < Factor; i++) 13103 Ops.push_back(Builder.CreateShuffleVector( 13104 Op0, Op1, getSequentialMask(Builder, NumSubElts * i, NumSubElts))); 13105 13106 Ops.push_back(Builder.getInt32(SI->getAlignment())); 13107 Builder.CreateCall(VstNFunc, Ops); 13108 return true; 13109 } 13110 13111 enum HABaseType { 13112 HA_UNKNOWN = 0, 13113 HA_FLOAT, 13114 HA_DOUBLE, 13115 HA_VECT64, 13116 HA_VECT128 13117 }; 13118 13119 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, 13120 uint64_t &Members) { 13121 if (auto *ST = dyn_cast<StructType>(Ty)) { 13122 for (unsigned i = 0; i < ST->getNumElements(); ++i) { 13123 uint64_t SubMembers = 0; 13124 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers)) 13125 return false; 13126 Members += SubMembers; 13127 } 13128 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) { 13129 uint64_t SubMembers = 0; 13130 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers)) 13131 return false; 13132 Members += SubMembers * AT->getNumElements(); 13133 } else if (Ty->isFloatTy()) { 13134 if (Base != HA_UNKNOWN && Base != HA_FLOAT) 13135 return false; 13136 Members = 1; 13137 Base = HA_FLOAT; 13138 } else if (Ty->isDoubleTy()) { 13139 if (Base != HA_UNKNOWN && Base != HA_DOUBLE) 13140 return false; 13141 Members = 1; 13142 Base = HA_DOUBLE; 13143 } else if (auto *VT = dyn_cast<VectorType>(Ty)) { 13144 Members = 1; 13145 switch (Base) { 13146 case HA_FLOAT: 13147 case HA_DOUBLE: 13148 return false; 13149 case HA_VECT64: 13150 return VT->getBitWidth() == 64; 13151 case HA_VECT128: 13152 return VT->getBitWidth() == 128; 13153 case HA_UNKNOWN: 13154 switch (VT->getBitWidth()) { 13155 case 64: 13156 Base = HA_VECT64; 13157 return true; 13158 case 128: 13159 Base = HA_VECT128; 13160 return true; 13161 default: 13162 return false; 13163 } 13164 } 13165 } 13166 13167 return (Members > 0 && Members <= 4); 13168 } 13169 13170 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of 13171 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when 13172 /// passing according to AAPCS rules. 13173 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( 13174 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 13175 if (getEffectiveCallingConv(CallConv, isVarArg) != 13176 CallingConv::ARM_AAPCS_VFP) 13177 return false; 13178 13179 HABaseType Base = HA_UNKNOWN; 13180 uint64_t Members = 0; 13181 bool IsHA = isHomogeneousAggregate(Ty, Base, Members); 13182 DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); 13183 13184 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); 13185 return IsHA || IsIntArray; 13186 } 13187 13188 unsigned ARMTargetLowering::getExceptionPointerRegister( 13189 const Constant *PersonalityFn) const { 13190 // Platforms which do not use SjLj EH may return values in these registers 13191 // via the personality function. 13192 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0; 13193 } 13194 13195 unsigned ARMTargetLowering::getExceptionSelectorRegister( 13196 const Constant *PersonalityFn) const { 13197 // Platforms which do not use SjLj EH may return values in these registers 13198 // via the personality function. 13199 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1; 13200 } 13201 13202 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 13203 // Update IsSplitCSR in ARMFunctionInfo. 13204 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>(); 13205 AFI->setIsSplitCSR(true); 13206 } 13207 13208 void ARMTargetLowering::insertCopiesSplitCSR( 13209 MachineBasicBlock *Entry, 13210 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 13211 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo(); 13212 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 13213 if (!IStart) 13214 return; 13215 13216 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 13217 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 13218 MachineBasicBlock::iterator MBBI = Entry->begin(); 13219 for (const MCPhysReg *I = IStart; *I; ++I) { 13220 const TargetRegisterClass *RC = nullptr; 13221 if (ARM::GPRRegClass.contains(*I)) 13222 RC = &ARM::GPRRegClass; 13223 else if (ARM::DPRRegClass.contains(*I)) 13224 RC = &ARM::DPRRegClass; 13225 else 13226 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 13227 13228 unsigned NewVR = MRI->createVirtualRegister(RC); 13229 // Create copy from CSR to a virtual register. 13230 // FIXME: this currently does not emit CFI pseudo-instructions, it works 13231 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 13232 // nounwind. If we want to generalize this later, we may need to emit 13233 // CFI pseudo-instructions. 13234 assert(Entry->getParent()->getFunction()->hasFnAttribute( 13235 Attribute::NoUnwind) && 13236 "Function should be nounwind in insertCopiesSplitCSR!"); 13237 Entry->addLiveIn(*I); 13238 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 13239 .addReg(*I); 13240 13241 // Insert the copy-back instructions right before the terminator. 13242 for (auto *Exit : Exits) 13243 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 13244 TII->get(TargetOpcode::COPY), *I) 13245 .addReg(NewVR); 13246 } 13247 } 13248