1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "arm-isel" 16 #include "ARMISelLowering.h" 17 #include "ARM.h" 18 #include "ARMCallingConv.h" 19 #include "ARMConstantPoolValue.h" 20 #include "ARMMachineFunctionInfo.h" 21 #include "ARMPerfectShuffle.h" 22 #include "ARMSubtarget.h" 23 #include "ARMTargetMachine.h" 24 #include "ARMTargetObjectFile.h" 25 #include "MCTargetDesc/ARMAddressingModes.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/CodeGen/CallingConvLower.h" 29 #include "llvm/CodeGen/IntrinsicLowering.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineModuleInfo.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/IR/CallingConv.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GlobalValue.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/MC/MCSectionMachO.h" 46 #include "llvm/Support/CommandLine.h" 47 #include "llvm/Support/ErrorHandling.h" 48 #include "llvm/Support/MathExtras.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include "llvm/Target/TargetOptions.h" 51 using namespace llvm; 52 53 STATISTIC(NumTailCalls, "Number of tail calls"); 54 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 55 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 56 57 // This option should go away when tail calls fully work. 58 static cl::opt<bool> 59 EnableARMTailCalls("arm-tail-calls", cl::Hidden, 60 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 61 cl::init(false)); 62 63 cl::opt<bool> 64 EnableARMLongCalls("arm-long-calls", cl::Hidden, 65 cl::desc("Generate calls via indirect call instructions"), 66 cl::init(false)); 67 68 static cl::opt<bool> 69 ARMInterworking("arm-interworking", cl::Hidden, 70 cl::desc("Enable / disable ARM interworking (for debugging only)"), 71 cl::init(true)); 72 73 namespace { 74 class ARMCCState : public CCState { 75 public: 76 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 77 const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs, 78 LLVMContext &C, ParmContext PC) 79 : CCState(CC, isVarArg, MF, TM, locs, C) { 80 assert(((PC == Call) || (PC == Prologue)) && 81 "ARMCCState users must specify whether their context is call" 82 "or prologue generation."); 83 CallOrPrologue = PC; 84 } 85 }; 86 } 87 88 // The APCS parameter registers. 89 static const uint16_t GPRArgRegs[] = { 90 ARM::R0, ARM::R1, ARM::R2, ARM::R3 91 }; 92 93 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 94 MVT PromotedBitwiseVT) { 95 if (VT != PromotedLdStVT) { 96 setOperationAction(ISD::LOAD, VT, Promote); 97 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 98 99 setOperationAction(ISD::STORE, VT, Promote); 100 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 101 } 102 103 MVT ElemTy = VT.getVectorElementType(); 104 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 105 setOperationAction(ISD::SETCC, VT, Custom); 106 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 107 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 108 if (ElemTy == MVT::i32) { 109 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 110 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 111 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 112 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 113 } else { 114 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 123 setOperationAction(ISD::SELECT, VT, Expand); 124 setOperationAction(ISD::SELECT_CC, VT, Expand); 125 setOperationAction(ISD::VSELECT, VT, Expand); 126 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 127 if (VT.isInteger()) { 128 setOperationAction(ISD::SHL, VT, Custom); 129 setOperationAction(ISD::SRA, VT, Custom); 130 setOperationAction(ISD::SRL, VT, Custom); 131 } 132 133 // Promote all bit-wise operations. 134 if (VT.isInteger() && VT != PromotedBitwiseVT) { 135 setOperationAction(ISD::AND, VT, Promote); 136 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 137 setOperationAction(ISD::OR, VT, Promote); 138 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 139 setOperationAction(ISD::XOR, VT, Promote); 140 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 141 } 142 143 // Neon does not support vector divide/remainder operations. 144 setOperationAction(ISD::SDIV, VT, Expand); 145 setOperationAction(ISD::UDIV, VT, Expand); 146 setOperationAction(ISD::FDIV, VT, Expand); 147 setOperationAction(ISD::SREM, VT, Expand); 148 setOperationAction(ISD::UREM, VT, Expand); 149 setOperationAction(ISD::FREM, VT, Expand); 150 } 151 152 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 153 addRegisterClass(VT, &ARM::DPRRegClass); 154 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 155 } 156 157 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 158 addRegisterClass(VT, &ARM::QPRRegClass); 159 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 160 } 161 162 static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 163 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 164 return new TargetLoweringObjectFileMachO(); 165 166 return new ARMElfTargetObjectFile(); 167 } 168 169 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 170 : TargetLowering(TM, createTLOF(TM)) { 171 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 172 RegInfo = TM.getRegisterInfo(); 173 Itins = TM.getInstrItineraryData(); 174 175 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 176 177 if (Subtarget->isTargetDarwin()) { 178 // Uses VFP for Thumb libfuncs if available. 179 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 180 // Single-precision floating-point arithmetic. 181 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 182 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 183 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 184 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 185 186 // Double-precision floating-point arithmetic. 187 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 188 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 189 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 190 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 191 192 // Single-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 194 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 195 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 196 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 197 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 198 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 199 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 200 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 210 211 // Double-precision comparisons. 212 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 213 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 214 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 215 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 216 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 217 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 218 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 219 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 220 221 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 222 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 223 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 224 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 225 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 226 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 227 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 228 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 229 230 // Floating-point to integer conversions. 231 // i64 conversions are done via library routines even when generating VFP 232 // instructions, so use the same ones. 233 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 234 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 236 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 237 238 // Conversions between floating types. 239 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 240 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 241 242 // Integer to floating-point conversions. 243 // i64 conversions are done via library routines even when generating VFP 244 // instructions, so use the same ones. 245 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 246 // e.g., __floatunsidf vs. __floatunssidfvfp. 247 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 248 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 249 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 250 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 251 } 252 } 253 254 // These libcalls are not available in 32-bit. 255 setLibcallName(RTLIB::SHL_I128, 0); 256 setLibcallName(RTLIB::SRL_I128, 0); 257 setLibcallName(RTLIB::SRA_I128, 0); 258 259 if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) { 260 // Double-precision floating-point arithmetic helper functions 261 // RTABI chapter 4.1.2, Table 2 262 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 263 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 264 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 265 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 266 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 267 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 270 271 // Double-precision floating-point comparison helper functions 272 // RTABI chapter 4.1.2, Table 3 273 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 274 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 275 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 276 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 277 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 278 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 279 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 280 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 281 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 282 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 283 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 284 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 285 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 286 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 287 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 288 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 289 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 290 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 291 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 292 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 293 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point arithmetic helper functions 299 // RTABI chapter 4.1.2, Table 4 300 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 301 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 302 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 303 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 304 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 305 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 308 309 // Single-precision floating-point comparison helper functions 310 // RTABI chapter 4.1.2, Table 5 311 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 312 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 313 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 314 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 315 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 316 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 317 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 318 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 319 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 320 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 321 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 322 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 323 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 324 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 325 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 326 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 327 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 335 336 // Floating-point to integer conversions. 337 // RTABI chapter 4.1.2, Table 6 338 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 339 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 340 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 341 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 342 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 343 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 344 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 345 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 346 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 347 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 348 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 350 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 354 355 // Conversions between floating types. 356 // RTABI chapter 4.1.2, Table 7 357 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 358 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 359 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 361 362 // Integer to floating-point conversions. 363 // RTABI chapter 4.1.2, Table 8 364 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 365 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 366 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 367 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 368 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 369 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 370 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 371 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 372 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 376 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 377 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 378 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 380 381 // Long long helper functions 382 // RTABI chapter 4.2, Table 9 383 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 384 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 385 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 386 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 387 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 391 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 392 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 393 394 // Integer division functions 395 // RTABI chapter 4.3.1 396 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 397 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 398 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 399 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 400 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 401 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 402 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 403 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 404 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 405 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 406 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 407 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 408 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 409 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 410 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 411 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 412 413 // Memory operations 414 // RTABI chapter 4.3.4 415 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 416 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 417 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 418 setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS); 419 setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS); 420 setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS); 421 } 422 423 // Use divmod compiler-rt calls for iOS 5.0 and later. 424 if (Subtarget->getTargetTriple().isiOS() && 425 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 426 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 427 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 428 } 429 430 if (Subtarget->isThumb1Only()) 431 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 432 else 433 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 434 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 435 !Subtarget->isThumb1Only()) { 436 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 437 if (!Subtarget->isFPOnlySP()) 438 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 439 440 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 441 } 442 443 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 444 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 445 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 446 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 447 setTruncStoreAction((MVT::SimpleValueType)VT, 448 (MVT::SimpleValueType)InnerVT, Expand); 449 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 450 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 451 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 452 } 453 454 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 455 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 456 457 if (Subtarget->hasNEON()) { 458 addDRTypeForNEON(MVT::v2f32); 459 addDRTypeForNEON(MVT::v8i8); 460 addDRTypeForNEON(MVT::v4i16); 461 addDRTypeForNEON(MVT::v2i32); 462 addDRTypeForNEON(MVT::v1i64); 463 464 addQRTypeForNEON(MVT::v4f32); 465 addQRTypeForNEON(MVT::v2f64); 466 addQRTypeForNEON(MVT::v16i8); 467 addQRTypeForNEON(MVT::v8i16); 468 addQRTypeForNEON(MVT::v4i32); 469 addQRTypeForNEON(MVT::v2i64); 470 471 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 472 // neither Neon nor VFP support any arithmetic operations on it. 473 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 474 // supported for v4f32. 475 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 476 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 477 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 478 // FIXME: Code duplication: FDIV and FREM are expanded always, see 479 // ARMTargetLowering::addTypeForNEON method for details. 480 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 481 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 482 // FIXME: Create unittest. 483 // In another words, find a way when "copysign" appears in DAG with vector 484 // operands. 485 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 486 // FIXME: Code duplication: SETCC has custom operation action, see 487 // ARMTargetLowering::addTypeForNEON method for details. 488 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 489 // FIXME: Create unittest for FNEG and for FABS. 490 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 491 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 492 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 493 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 494 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 495 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 496 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 497 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 498 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 499 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 500 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 501 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 502 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 503 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 504 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 505 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 506 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 507 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 508 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 509 510 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 511 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 512 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 513 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 514 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 515 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 516 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 517 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 518 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 519 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 520 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 521 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 522 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 523 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 524 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 525 526 // Mark v2f32 intrinsics. 527 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 528 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 529 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 530 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 531 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 532 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 533 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 534 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 535 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 536 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 537 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 538 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 539 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 540 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 541 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 542 543 // Neon does not support some operations on v1i64 and v2i64 types. 544 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 545 // Custom handling for some quad-vector types to detect VMULL. 546 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 547 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 548 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 549 // Custom handling for some vector types to avoid expensive expansions 550 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 551 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 552 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 553 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 554 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 555 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 556 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 557 // a destination type that is wider than the source, and nor does 558 // it have a FP_TO_[SU]INT instruction with a narrower destination than 559 // source. 560 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 561 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 562 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 563 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 564 565 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 566 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 567 568 // Custom expand long extensions to vectors. 569 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); 570 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 571 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); 572 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); 573 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); 574 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); 575 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); 576 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); 577 578 // NEON does not have single instruction CTPOP for vectors with element 579 // types wider than 8-bits. However, custom lowering can leverage the 580 // v8i8/v16i8 vcnt instruction. 581 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 582 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 583 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 584 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 585 586 // NEON only has FMA instructions as of VFP4. 587 if (!Subtarget->hasVFP4()) { 588 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 589 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 590 } 591 592 setTargetDAGCombine(ISD::INTRINSIC_VOID); 593 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 594 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 595 setTargetDAGCombine(ISD::SHL); 596 setTargetDAGCombine(ISD::SRL); 597 setTargetDAGCombine(ISD::SRA); 598 setTargetDAGCombine(ISD::SIGN_EXTEND); 599 setTargetDAGCombine(ISD::ZERO_EXTEND); 600 setTargetDAGCombine(ISD::ANY_EXTEND); 601 setTargetDAGCombine(ISD::SELECT_CC); 602 setTargetDAGCombine(ISD::BUILD_VECTOR); 603 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 604 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 605 setTargetDAGCombine(ISD::STORE); 606 setTargetDAGCombine(ISD::FP_TO_SINT); 607 setTargetDAGCombine(ISD::FP_TO_UINT); 608 setTargetDAGCombine(ISD::FDIV); 609 610 // It is legal to extload from v4i8 to v4i16 or v4i32. 611 MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, 612 MVT::v4i16, MVT::v2i16, 613 MVT::v2i32}; 614 for (unsigned i = 0; i < 6; ++i) { 615 setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal); 616 setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal); 617 setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal); 618 } 619 } 620 621 // ARM and Thumb2 support UMLAL/SMLAL. 622 if (!Subtarget->isThumb1Only()) 623 setTargetDAGCombine(ISD::ADDC); 624 625 626 computeRegisterProperties(); 627 628 // ARM does not have f32 extending load. 629 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 630 631 // ARM does not have i1 sign extending load. 632 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 633 634 // ARM supports all 4 flavors of integer indexed load / store. 635 if (!Subtarget->isThumb1Only()) { 636 for (unsigned im = (unsigned)ISD::PRE_INC; 637 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 638 setIndexedLoadAction(im, MVT::i1, Legal); 639 setIndexedLoadAction(im, MVT::i8, Legal); 640 setIndexedLoadAction(im, MVT::i16, Legal); 641 setIndexedLoadAction(im, MVT::i32, Legal); 642 setIndexedStoreAction(im, MVT::i1, Legal); 643 setIndexedStoreAction(im, MVT::i8, Legal); 644 setIndexedStoreAction(im, MVT::i16, Legal); 645 setIndexedStoreAction(im, MVT::i32, Legal); 646 } 647 } 648 649 // i64 operation support. 650 setOperationAction(ISD::MUL, MVT::i64, Expand); 651 setOperationAction(ISD::MULHU, MVT::i32, Expand); 652 if (Subtarget->isThumb1Only()) { 653 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 654 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 655 } 656 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 657 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 658 setOperationAction(ISD::MULHS, MVT::i32, Expand); 659 660 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 661 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 662 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 663 setOperationAction(ISD::SRL, MVT::i64, Custom); 664 setOperationAction(ISD::SRA, MVT::i64, Custom); 665 666 if (!Subtarget->isThumb1Only()) { 667 // FIXME: We should do this for Thumb1 as well. 668 setOperationAction(ISD::ADDC, MVT::i32, Custom); 669 setOperationAction(ISD::ADDE, MVT::i32, Custom); 670 setOperationAction(ISD::SUBC, MVT::i32, Custom); 671 setOperationAction(ISD::SUBE, MVT::i32, Custom); 672 } 673 674 // ARM does not have ROTL. 675 setOperationAction(ISD::ROTL, MVT::i32, Expand); 676 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 677 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 678 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 679 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 680 681 // These just redirect to CTTZ and CTLZ on ARM. 682 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand); 683 setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand); 684 685 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 686 687 // Only ARMv6 has BSWAP. 688 if (!Subtarget->hasV6Ops()) 689 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 690 691 if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && 692 !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { 693 // These are expanded into libcalls if the cpu doesn't have HW divider. 694 setOperationAction(ISD::SDIV, MVT::i32, Expand); 695 setOperationAction(ISD::UDIV, MVT::i32, Expand); 696 } 697 698 // FIXME: Also set divmod for SREM on EABI 699 setOperationAction(ISD::SREM, MVT::i32, Expand); 700 setOperationAction(ISD::UREM, MVT::i32, Expand); 701 // Register based DivRem for AEABI (RTABI 4.2) 702 if (Subtarget->isTargetAEABI()) { 703 setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); 704 setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); 705 setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); 706 setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); 707 setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); 708 setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); 709 setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); 710 setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); 711 712 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); 713 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); 714 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); 715 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); 716 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); 717 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); 718 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); 719 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); 720 721 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 722 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 723 } else { 724 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 725 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 726 } 727 728 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 729 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 730 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 731 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 732 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 733 734 setOperationAction(ISD::TRAP, MVT::Other, Legal); 735 736 // Use the default implementation. 737 setOperationAction(ISD::VASTART, MVT::Other, Custom); 738 setOperationAction(ISD::VAARG, MVT::Other, Expand); 739 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 740 setOperationAction(ISD::VAEND, MVT::Other, Expand); 741 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 742 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 743 744 if (!Subtarget->isTargetDarwin()) { 745 // Non-Darwin platforms may return values in these registers via the 746 // personality function. 747 setExceptionPointerRegister(ARM::R0); 748 setExceptionSelectorRegister(ARM::R1); 749 } 750 751 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 752 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 753 // the default expansion. 754 // FIXME: This should be checking for v6k, not just v6. 755 if (Subtarget->hasDataBarrier() || 756 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 757 // membarrier needs custom lowering; the rest are legal and handled 758 // normally. 759 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 760 // Custom lowering for 64-bit ops 761 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 762 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 763 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 764 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 765 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 766 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 767 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 768 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 769 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 770 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 771 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 772 // On v8, we have particularly efficient implementations of atomic fences 773 // if they can be combined with nearby atomic loads and stores. 774 if (!Subtarget->hasV8Ops()) { 775 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 776 setInsertFencesForAtomic(true); 777 } 778 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 779 //setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); 780 } else { 781 // Set them all for expansion, which will force libcalls. 782 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 783 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 784 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 785 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 786 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 787 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 788 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 789 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 790 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 791 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 792 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 793 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 794 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 795 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 796 // Unordered/Monotonic case. 797 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 798 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 799 } 800 801 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 802 803 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 804 if (!Subtarget->hasV6Ops()) { 805 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 806 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 807 } 808 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 809 810 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 811 !Subtarget->isThumb1Only()) { 812 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 813 // iff target supports vfp2. 814 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 815 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 816 } 817 818 // We want to custom lower some of our intrinsics. 819 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 820 if (Subtarget->isTargetDarwin()) { 821 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 822 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 823 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 824 } 825 826 setOperationAction(ISD::SETCC, MVT::i32, Expand); 827 setOperationAction(ISD::SETCC, MVT::f32, Expand); 828 setOperationAction(ISD::SETCC, MVT::f64, Expand); 829 setOperationAction(ISD::SELECT, MVT::i32, Custom); 830 setOperationAction(ISD::SELECT, MVT::f32, Custom); 831 setOperationAction(ISD::SELECT, MVT::f64, Custom); 832 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 833 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 834 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 835 836 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 837 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 838 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 839 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 840 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 841 842 // We don't support sin/cos/fmod/copysign/pow 843 setOperationAction(ISD::FSIN, MVT::f64, Expand); 844 setOperationAction(ISD::FSIN, MVT::f32, Expand); 845 setOperationAction(ISD::FCOS, MVT::f32, Expand); 846 setOperationAction(ISD::FCOS, MVT::f64, Expand); 847 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 848 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 849 setOperationAction(ISD::FREM, MVT::f64, Expand); 850 setOperationAction(ISD::FREM, MVT::f32, Expand); 851 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 852 !Subtarget->isThumb1Only()) { 853 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 854 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 855 } 856 setOperationAction(ISD::FPOW, MVT::f64, Expand); 857 setOperationAction(ISD::FPOW, MVT::f32, Expand); 858 859 if (!Subtarget->hasVFP4()) { 860 setOperationAction(ISD::FMA, MVT::f64, Expand); 861 setOperationAction(ISD::FMA, MVT::f32, Expand); 862 } 863 864 // Various VFP goodness 865 if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { 866 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 867 if (Subtarget->hasVFP2()) { 868 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 869 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 870 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 871 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 872 } 873 // Special handling for half-precision FP. 874 if (!Subtarget->hasFP16()) { 875 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 876 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 877 } 878 } 879 880 // We have target-specific dag combine patterns for the following nodes: 881 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 882 setTargetDAGCombine(ISD::ADD); 883 setTargetDAGCombine(ISD::SUB); 884 setTargetDAGCombine(ISD::MUL); 885 setTargetDAGCombine(ISD::AND); 886 setTargetDAGCombine(ISD::OR); 887 setTargetDAGCombine(ISD::XOR); 888 889 if (Subtarget->hasV6Ops()) 890 setTargetDAGCombine(ISD::SRL); 891 892 setStackPointerRegisterToSaveRestore(ARM::SP); 893 894 if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || 895 !Subtarget->hasVFP2()) 896 setSchedulingPreference(Sched::RegPressure); 897 else 898 setSchedulingPreference(Sched::Hybrid); 899 900 //// temporary - rewrite interface to use type 901 MaxStoresPerMemset = 8; 902 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 903 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 904 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 905 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 906 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 907 908 // On ARM arguments smaller than 4 bytes are extended, so all arguments 909 // are at least 4 bytes aligned. 910 setMinStackArgumentAlignment(4); 911 912 // Prefer likely predicted branches to selects on out-of-order cores. 913 PredictableSelectIsExpensive = Subtarget->isLikeA9(); 914 915 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 916 } 917 918 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, 919 bool isThumb2, unsigned &LdrOpc, 920 unsigned &StrOpc) { 921 static const unsigned LoadBares[4][2] = {{ARM::LDREXB, ARM::t2LDREXB}, 922 {ARM::LDREXH, ARM::t2LDREXH}, 923 {ARM::LDREX, ARM::t2LDREX}, 924 {ARM::LDREXD, ARM::t2LDREXD}}; 925 static const unsigned LoadAcqs[4][2] = {{ARM::LDAEXB, ARM::t2LDAEXB}, 926 {ARM::LDAEXH, ARM::t2LDAEXH}, 927 {ARM::LDAEX, ARM::t2LDAEX}, 928 {ARM::LDAEXD, ARM::t2LDAEXD}}; 929 static const unsigned StoreBares[4][2] = {{ARM::STREXB, ARM::t2STREXB}, 930 {ARM::STREXH, ARM::t2STREXH}, 931 {ARM::STREX, ARM::t2STREX}, 932 {ARM::STREXD, ARM::t2STREXD}}; 933 static const unsigned StoreRels[4][2] = {{ARM::STLEXB, ARM::t2STLEXB}, 934 {ARM::STLEXH, ARM::t2STLEXH}, 935 {ARM::STLEX, ARM::t2STLEX}, 936 {ARM::STLEXD, ARM::t2STLEXD}}; 937 938 const unsigned (*LoadOps)[2], (*StoreOps)[2]; 939 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) 940 LoadOps = LoadAcqs; 941 else 942 LoadOps = LoadBares; 943 944 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) 945 StoreOps = StoreRels; 946 else 947 StoreOps = StoreBares; 948 949 assert(isPowerOf2_32(Size) && Size <= 8 && 950 "unsupported size for atomic binary op!"); 951 952 LdrOpc = LoadOps[Log2_32(Size)][isThumb2]; 953 StrOpc = StoreOps[Log2_32(Size)][isThumb2]; 954 } 955 956 // FIXME: It might make sense to define the representative register class as the 957 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 958 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 959 // SPR's representative would be DPR_VFP2. This should work well if register 960 // pressure tracking were modified such that a register use would increment the 961 // pressure of the register class's representative and all of it's super 962 // classes' representatives transitively. We have not implemented this because 963 // of the difficulty prior to coalescing of modeling operand register classes 964 // due to the common occurrence of cross class copies and subregister insertions 965 // and extractions. 966 std::pair<const TargetRegisterClass*, uint8_t> 967 ARMTargetLowering::findRepresentativeClass(MVT VT) const{ 968 const TargetRegisterClass *RRC = 0; 969 uint8_t Cost = 1; 970 switch (VT.SimpleTy) { 971 default: 972 return TargetLowering::findRepresentativeClass(VT); 973 // Use DPR as representative register class for all floating point 974 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 975 // the cost is 1 for both f32 and f64. 976 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 977 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 978 RRC = &ARM::DPRRegClass; 979 // When NEON is used for SP, only half of the register file is available 980 // because operations that define both SP and DP results will be constrained 981 // to the VFP2 class (D0-D15). We currently model this constraint prior to 982 // coalescing by double-counting the SP regs. See the FIXME above. 983 if (Subtarget->useNEONForSinglePrecisionFP()) 984 Cost = 2; 985 break; 986 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 987 case MVT::v4f32: case MVT::v2f64: 988 RRC = &ARM::DPRRegClass; 989 Cost = 2; 990 break; 991 case MVT::v4i64: 992 RRC = &ARM::DPRRegClass; 993 Cost = 4; 994 break; 995 case MVT::v8i64: 996 RRC = &ARM::DPRRegClass; 997 Cost = 8; 998 break; 999 } 1000 return std::make_pair(RRC, Cost); 1001 } 1002 1003 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 1004 switch (Opcode) { 1005 default: return 0; 1006 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 1007 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 1008 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 1009 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 1010 case ARMISD::CALL: return "ARMISD::CALL"; 1011 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 1012 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 1013 case ARMISD::tCALL: return "ARMISD::tCALL"; 1014 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 1015 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 1016 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 1017 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 1018 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 1019 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 1020 case ARMISD::CMP: return "ARMISD::CMP"; 1021 case ARMISD::CMN: return "ARMISD::CMN"; 1022 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 1023 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 1024 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 1025 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 1026 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 1027 1028 case ARMISD::CMOV: return "ARMISD::CMOV"; 1029 1030 case ARMISD::RBIT: return "ARMISD::RBIT"; 1031 1032 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 1033 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 1034 case ARMISD::SITOF: return "ARMISD::SITOF"; 1035 case ARMISD::UITOF: return "ARMISD::UITOF"; 1036 1037 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 1038 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 1039 case ARMISD::RRX: return "ARMISD::RRX"; 1040 1041 case ARMISD::ADDC: return "ARMISD::ADDC"; 1042 case ARMISD::ADDE: return "ARMISD::ADDE"; 1043 case ARMISD::SUBC: return "ARMISD::SUBC"; 1044 case ARMISD::SUBE: return "ARMISD::SUBE"; 1045 1046 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 1047 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 1048 1049 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 1050 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 1051 1052 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 1053 1054 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 1055 1056 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 1057 1058 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 1059 1060 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 1061 1062 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 1063 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 1064 case ARMISD::VCGE: return "ARMISD::VCGE"; 1065 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 1066 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 1067 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 1068 case ARMISD::VCGT: return "ARMISD::VCGT"; 1069 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1070 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1071 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1072 case ARMISD::VTST: return "ARMISD::VTST"; 1073 1074 case ARMISD::VSHL: return "ARMISD::VSHL"; 1075 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1076 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1077 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 1078 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 1079 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 1080 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 1081 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1082 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1083 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1084 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1085 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1086 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1087 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1088 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1089 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1090 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1091 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1092 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1093 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1094 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1095 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1096 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1097 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1098 case ARMISD::VDUP: return "ARMISD::VDUP"; 1099 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1100 case ARMISD::VEXT: return "ARMISD::VEXT"; 1101 case ARMISD::VREV64: return "ARMISD::VREV64"; 1102 case ARMISD::VREV32: return "ARMISD::VREV32"; 1103 case ARMISD::VREV16: return "ARMISD::VREV16"; 1104 case ARMISD::VZIP: return "ARMISD::VZIP"; 1105 case ARMISD::VUZP: return "ARMISD::VUZP"; 1106 case ARMISD::VTRN: return "ARMISD::VTRN"; 1107 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1108 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1109 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1110 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1111 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1112 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1113 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1114 case ARMISD::FMAX: return "ARMISD::FMAX"; 1115 case ARMISD::FMIN: return "ARMISD::FMIN"; 1116 case ARMISD::VMAXNM: return "ARMISD::VMAX"; 1117 case ARMISD::VMINNM: return "ARMISD::VMIN"; 1118 case ARMISD::BFI: return "ARMISD::BFI"; 1119 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1120 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1121 case ARMISD::VBSL: return "ARMISD::VBSL"; 1122 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1123 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1124 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1125 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1126 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1127 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1128 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1129 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1130 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1131 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1132 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1133 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1134 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1135 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1136 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1137 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1138 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1139 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1140 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1141 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1142 } 1143 } 1144 1145 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1146 if (!VT.isVector()) return getPointerTy(); 1147 return VT.changeVectorElementTypeToInteger(); 1148 } 1149 1150 /// getRegClassFor - Return the register class that should be used for the 1151 /// specified value type. 1152 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1153 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1154 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1155 // load / store 4 to 8 consecutive D registers. 1156 if (Subtarget->hasNEON()) { 1157 if (VT == MVT::v4i64) 1158 return &ARM::QQPRRegClass; 1159 if (VT == MVT::v8i64) 1160 return &ARM::QQQQPRRegClass; 1161 } 1162 return TargetLowering::getRegClassFor(VT); 1163 } 1164 1165 // Create a fast isel object. 1166 FastISel * 1167 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1168 const TargetLibraryInfo *libInfo) const { 1169 return ARM::createFastISel(funcInfo, libInfo); 1170 } 1171 1172 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 1173 /// be used for loads / stores from the global. 1174 unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 1175 return (Subtarget->isThumb1Only() ? 127 : 4095); 1176 } 1177 1178 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1179 unsigned NumVals = N->getNumValues(); 1180 if (!NumVals) 1181 return Sched::RegPressure; 1182 1183 for (unsigned i = 0; i != NumVals; ++i) { 1184 EVT VT = N->getValueType(i); 1185 if (VT == MVT::Glue || VT == MVT::Other) 1186 continue; 1187 if (VT.isFloatingPoint() || VT.isVector()) 1188 return Sched::ILP; 1189 } 1190 1191 if (!N->isMachineOpcode()) 1192 return Sched::RegPressure; 1193 1194 // Load are scheduled for latency even if there instruction itinerary 1195 // is not available. 1196 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1197 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1198 1199 if (MCID.getNumDefs() == 0) 1200 return Sched::RegPressure; 1201 if (!Itins->isEmpty() && 1202 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1203 return Sched::ILP; 1204 1205 return Sched::RegPressure; 1206 } 1207 1208 //===----------------------------------------------------------------------===// 1209 // Lowering Code 1210 //===----------------------------------------------------------------------===// 1211 1212 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1213 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1214 switch (CC) { 1215 default: llvm_unreachable("Unknown condition code!"); 1216 case ISD::SETNE: return ARMCC::NE; 1217 case ISD::SETEQ: return ARMCC::EQ; 1218 case ISD::SETGT: return ARMCC::GT; 1219 case ISD::SETGE: return ARMCC::GE; 1220 case ISD::SETLT: return ARMCC::LT; 1221 case ISD::SETLE: return ARMCC::LE; 1222 case ISD::SETUGT: return ARMCC::HI; 1223 case ISD::SETUGE: return ARMCC::HS; 1224 case ISD::SETULT: return ARMCC::LO; 1225 case ISD::SETULE: return ARMCC::LS; 1226 } 1227 } 1228 1229 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1230 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1231 ARMCC::CondCodes &CondCode2) { 1232 CondCode2 = ARMCC::AL; 1233 switch (CC) { 1234 default: llvm_unreachable("Unknown FP condition!"); 1235 case ISD::SETEQ: 1236 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1237 case ISD::SETGT: 1238 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1239 case ISD::SETGE: 1240 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1241 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1242 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1243 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1244 case ISD::SETO: CondCode = ARMCC::VC; break; 1245 case ISD::SETUO: CondCode = ARMCC::VS; break; 1246 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1247 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1248 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1249 case ISD::SETLT: 1250 case ISD::SETULT: CondCode = ARMCC::LT; break; 1251 case ISD::SETLE: 1252 case ISD::SETULE: CondCode = ARMCC::LE; break; 1253 case ISD::SETNE: 1254 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1255 } 1256 } 1257 1258 //===----------------------------------------------------------------------===// 1259 // Calling Convention Implementation 1260 //===----------------------------------------------------------------------===// 1261 1262 #include "ARMGenCallingConv.inc" 1263 1264 /// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1265 /// given CallingConvention value. 1266 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1267 bool Return, 1268 bool isVarArg) const { 1269 switch (CC) { 1270 default: 1271 llvm_unreachable("Unsupported calling convention"); 1272 case CallingConv::Fast: 1273 if (Subtarget->hasVFP2() && !isVarArg) { 1274 if (!Subtarget->isAAPCS_ABI()) 1275 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1276 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1277 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1278 } 1279 // Fallthrough 1280 case CallingConv::C: { 1281 // Use target triple & subtarget features to do actual dispatch. 1282 if (!Subtarget->isAAPCS_ABI()) 1283 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1284 else if (Subtarget->hasVFP2() && 1285 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1286 !isVarArg) 1287 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1288 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1289 } 1290 case CallingConv::ARM_AAPCS_VFP: 1291 if (!isVarArg) 1292 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1293 // Fallthrough 1294 case CallingConv::ARM_AAPCS: 1295 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1296 case CallingConv::ARM_APCS: 1297 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1298 case CallingConv::GHC: 1299 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1300 } 1301 } 1302 1303 /// LowerCallResult - Lower the result values of a call into the 1304 /// appropriate copies out of appropriate physical registers. 1305 SDValue 1306 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1307 CallingConv::ID CallConv, bool isVarArg, 1308 const SmallVectorImpl<ISD::InputArg> &Ins, 1309 SDLoc dl, SelectionDAG &DAG, 1310 SmallVectorImpl<SDValue> &InVals, 1311 bool isThisReturn, SDValue ThisVal) const { 1312 1313 // Assign locations to each value returned by this call. 1314 SmallVector<CCValAssign, 16> RVLocs; 1315 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1316 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1317 CCInfo.AnalyzeCallResult(Ins, 1318 CCAssignFnForNode(CallConv, /* Return*/ true, 1319 isVarArg)); 1320 1321 // Copy all of the result registers out of their specified physreg. 1322 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1323 CCValAssign VA = RVLocs[i]; 1324 1325 // Pass 'this' value directly from the argument to return value, to avoid 1326 // reg unit interference 1327 if (i == 0 && isThisReturn) { 1328 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1329 "unexpected return calling convention register assignment"); 1330 InVals.push_back(ThisVal); 1331 continue; 1332 } 1333 1334 SDValue Val; 1335 if (VA.needsCustom()) { 1336 // Handle f64 or half of a v2f64. 1337 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1338 InFlag); 1339 Chain = Lo.getValue(1); 1340 InFlag = Lo.getValue(2); 1341 VA = RVLocs[++i]; // skip ahead to next loc 1342 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1343 InFlag); 1344 Chain = Hi.getValue(1); 1345 InFlag = Hi.getValue(2); 1346 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1347 1348 if (VA.getLocVT() == MVT::v2f64) { 1349 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1350 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1351 DAG.getConstant(0, MVT::i32)); 1352 1353 VA = RVLocs[++i]; // skip ahead to next loc 1354 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1355 Chain = Lo.getValue(1); 1356 InFlag = Lo.getValue(2); 1357 VA = RVLocs[++i]; // skip ahead to next loc 1358 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1359 Chain = Hi.getValue(1); 1360 InFlag = Hi.getValue(2); 1361 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1362 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1363 DAG.getConstant(1, MVT::i32)); 1364 } 1365 } else { 1366 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1367 InFlag); 1368 Chain = Val.getValue(1); 1369 InFlag = Val.getValue(2); 1370 } 1371 1372 switch (VA.getLocInfo()) { 1373 default: llvm_unreachable("Unknown loc info!"); 1374 case CCValAssign::Full: break; 1375 case CCValAssign::BCvt: 1376 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1377 break; 1378 } 1379 1380 InVals.push_back(Val); 1381 } 1382 1383 return Chain; 1384 } 1385 1386 /// LowerMemOpCallTo - Store the argument to the stack. 1387 SDValue 1388 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1389 SDValue StackPtr, SDValue Arg, 1390 SDLoc dl, SelectionDAG &DAG, 1391 const CCValAssign &VA, 1392 ISD::ArgFlagsTy Flags) const { 1393 unsigned LocMemOffset = VA.getLocMemOffset(); 1394 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1395 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1396 return DAG.getStore(Chain, dl, Arg, PtrOff, 1397 MachinePointerInfo::getStack(LocMemOffset), 1398 false, false, 0); 1399 } 1400 1401 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 1402 SDValue Chain, SDValue &Arg, 1403 RegsToPassVector &RegsToPass, 1404 CCValAssign &VA, CCValAssign &NextVA, 1405 SDValue &StackPtr, 1406 SmallVectorImpl<SDValue> &MemOpChains, 1407 ISD::ArgFlagsTy Flags) const { 1408 1409 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1410 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1411 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1412 1413 if (NextVA.isRegLoc()) 1414 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1415 else { 1416 assert(NextVA.isMemLoc()); 1417 if (StackPtr.getNode() == 0) 1418 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1419 1420 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1421 dl, DAG, NextVA, 1422 Flags)); 1423 } 1424 } 1425 1426 /// LowerCall - Lowering a call into a callseq_start <- 1427 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1428 /// nodes. 1429 SDValue 1430 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1431 SmallVectorImpl<SDValue> &InVals) const { 1432 SelectionDAG &DAG = CLI.DAG; 1433 SDLoc &dl = CLI.DL; 1434 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1435 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1436 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1437 SDValue Chain = CLI.Chain; 1438 SDValue Callee = CLI.Callee; 1439 bool &isTailCall = CLI.IsTailCall; 1440 CallingConv::ID CallConv = CLI.CallConv; 1441 bool doesNotRet = CLI.DoesNotReturn; 1442 bool isVarArg = CLI.IsVarArg; 1443 1444 MachineFunction &MF = DAG.getMachineFunction(); 1445 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1446 bool isThisReturn = false; 1447 bool isSibCall = false; 1448 // Disable tail calls if they're not supported. 1449 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1450 isTailCall = false; 1451 if (isTailCall) { 1452 // Check if it's really possible to do a tail call. 1453 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1454 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1455 Outs, OutVals, Ins, DAG); 1456 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1457 // detected sibcalls. 1458 if (isTailCall) { 1459 ++NumTailCalls; 1460 isSibCall = true; 1461 } 1462 } 1463 1464 // Analyze operands of the call, assigning locations to each operand. 1465 SmallVector<CCValAssign, 16> ArgLocs; 1466 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1467 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1468 CCInfo.AnalyzeCallOperands(Outs, 1469 CCAssignFnForNode(CallConv, /* Return*/ false, 1470 isVarArg)); 1471 1472 // Get a count of how many bytes are to be pushed on the stack. 1473 unsigned NumBytes = CCInfo.getNextStackOffset(); 1474 1475 // For tail calls, memory operands are available in our caller's stack. 1476 if (isSibCall) 1477 NumBytes = 0; 1478 1479 // Adjust the stack pointer for the new arguments... 1480 // These operations are automatically eliminated by the prolog/epilog pass 1481 if (!isSibCall) 1482 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1483 dl); 1484 1485 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1486 1487 RegsToPassVector RegsToPass; 1488 SmallVector<SDValue, 8> MemOpChains; 1489 1490 // Walk the register/memloc assignments, inserting copies/loads. In the case 1491 // of tail call optimization, arguments are handled later. 1492 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1493 i != e; 1494 ++i, ++realArgIdx) { 1495 CCValAssign &VA = ArgLocs[i]; 1496 SDValue Arg = OutVals[realArgIdx]; 1497 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1498 bool isByVal = Flags.isByVal(); 1499 1500 // Promote the value if needed. 1501 switch (VA.getLocInfo()) { 1502 default: llvm_unreachable("Unknown loc info!"); 1503 case CCValAssign::Full: break; 1504 case CCValAssign::SExt: 1505 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1506 break; 1507 case CCValAssign::ZExt: 1508 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1509 break; 1510 case CCValAssign::AExt: 1511 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1512 break; 1513 case CCValAssign::BCvt: 1514 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1515 break; 1516 } 1517 1518 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1519 if (VA.needsCustom()) { 1520 if (VA.getLocVT() == MVT::v2f64) { 1521 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1522 DAG.getConstant(0, MVT::i32)); 1523 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1524 DAG.getConstant(1, MVT::i32)); 1525 1526 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1527 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1528 1529 VA = ArgLocs[++i]; // skip ahead to next loc 1530 if (VA.isRegLoc()) { 1531 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1532 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1533 } else { 1534 assert(VA.isMemLoc()); 1535 1536 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1537 dl, DAG, VA, Flags)); 1538 } 1539 } else { 1540 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1541 StackPtr, MemOpChains, Flags); 1542 } 1543 } else if (VA.isRegLoc()) { 1544 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1545 assert(VA.getLocVT() == MVT::i32 && 1546 "unexpected calling convention register assignment"); 1547 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1548 "unexpected use of 'returned'"); 1549 isThisReturn = true; 1550 } 1551 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1552 } else if (isByVal) { 1553 assert(VA.isMemLoc()); 1554 unsigned offset = 0; 1555 1556 // True if this byval aggregate will be split between registers 1557 // and memory. 1558 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1559 unsigned CurByValIdx = CCInfo.getInRegsParamsProceed(); 1560 1561 if (CurByValIdx < ByValArgsCount) { 1562 1563 unsigned RegBegin, RegEnd; 1564 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1565 1566 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1567 unsigned int i, j; 1568 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1569 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1570 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1571 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1572 MachinePointerInfo(), 1573 false, false, false, 1574 DAG.InferPtrAlignment(AddArg)); 1575 MemOpChains.push_back(Load.getValue(1)); 1576 RegsToPass.push_back(std::make_pair(j, Load)); 1577 } 1578 1579 // If parameter size outsides register area, "offset" value 1580 // helps us to calculate stack slot for remained part properly. 1581 offset = RegEnd - RegBegin; 1582 1583 CCInfo.nextInRegsParam(); 1584 } 1585 1586 if (Flags.getByValSize() > 4*offset) { 1587 unsigned LocMemOffset = VA.getLocMemOffset(); 1588 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1589 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1590 StkPtrOff); 1591 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1592 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1593 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1594 MVT::i32); 1595 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1596 1597 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1598 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1599 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1600 Ops, array_lengthof(Ops))); 1601 } 1602 } else if (!isSibCall) { 1603 assert(VA.isMemLoc()); 1604 1605 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1606 dl, DAG, VA, Flags)); 1607 } 1608 } 1609 1610 if (!MemOpChains.empty()) 1611 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1612 &MemOpChains[0], MemOpChains.size()); 1613 1614 // Build a sequence of copy-to-reg nodes chained together with token chain 1615 // and flag operands which copy the outgoing args into the appropriate regs. 1616 SDValue InFlag; 1617 // Tail call byval lowering might overwrite argument registers so in case of 1618 // tail call optimization the copies to registers are lowered later. 1619 if (!isTailCall) 1620 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1621 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1622 RegsToPass[i].second, InFlag); 1623 InFlag = Chain.getValue(1); 1624 } 1625 1626 // For tail calls lower the arguments to the 'real' stack slot. 1627 if (isTailCall) { 1628 // Force all the incoming stack arguments to be loaded from the stack 1629 // before any new outgoing arguments are stored to the stack, because the 1630 // outgoing stack slots may alias the incoming argument stack slots, and 1631 // the alias isn't otherwise explicit. This is slightly more conservative 1632 // than necessary, because it means that each store effectively depends 1633 // on every argument instead of just those arguments it would clobber. 1634 1635 // Do not flag preceding copytoreg stuff together with the following stuff. 1636 InFlag = SDValue(); 1637 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1638 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1639 RegsToPass[i].second, InFlag); 1640 InFlag = Chain.getValue(1); 1641 } 1642 InFlag = SDValue(); 1643 } 1644 1645 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1646 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1647 // node so that legalize doesn't hack it. 1648 bool isDirect = false; 1649 bool isARMFunc = false; 1650 bool isLocalARMFunc = false; 1651 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1652 1653 if (EnableARMLongCalls) { 1654 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1655 && "long-calls with non-static relocation model!"); 1656 // Handle a global address or an external symbol. If it's not one of 1657 // those, the target's already in a register, so we don't need to do 1658 // anything extra. 1659 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1660 const GlobalValue *GV = G->getGlobal(); 1661 // Create a constant pool entry for the callee address 1662 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1663 ARMConstantPoolValue *CPV = 1664 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1665 1666 // Get the address of the callee into a register 1667 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1668 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1669 Callee = DAG.getLoad(getPointerTy(), dl, 1670 DAG.getEntryNode(), CPAddr, 1671 MachinePointerInfo::getConstantPool(), 1672 false, false, false, 0); 1673 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1674 const char *Sym = S->getSymbol(); 1675 1676 // Create a constant pool entry for the callee address 1677 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1678 ARMConstantPoolValue *CPV = 1679 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1680 ARMPCLabelIndex, 0); 1681 // Get the address of the callee into a register 1682 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1683 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1684 Callee = DAG.getLoad(getPointerTy(), dl, 1685 DAG.getEntryNode(), CPAddr, 1686 MachinePointerInfo::getConstantPool(), 1687 false, false, false, 0); 1688 } 1689 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1690 const GlobalValue *GV = G->getGlobal(); 1691 isDirect = true; 1692 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1693 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1694 getTargetMachine().getRelocationModel() != Reloc::Static; 1695 isARMFunc = !Subtarget->isThumb() || isStub; 1696 // ARM call to a local ARM function is predicable. 1697 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1698 // tBX takes a register source operand. 1699 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1700 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1701 ARMConstantPoolValue *CPV = 1702 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1703 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1704 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1705 Callee = DAG.getLoad(getPointerTy(), dl, 1706 DAG.getEntryNode(), CPAddr, 1707 MachinePointerInfo::getConstantPool(), 1708 false, false, false, 0); 1709 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1710 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1711 getPointerTy(), Callee, PICLabel); 1712 } else { 1713 // On ELF targets for PIC code, direct calls should go through the PLT 1714 unsigned OpFlags = 0; 1715 if (Subtarget->isTargetELF() && 1716 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1717 OpFlags = ARMII::MO_PLT; 1718 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1719 } 1720 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1721 isDirect = true; 1722 bool isStub = Subtarget->isTargetDarwin() && 1723 getTargetMachine().getRelocationModel() != Reloc::Static; 1724 isARMFunc = !Subtarget->isThumb() || isStub; 1725 // tBX takes a register source operand. 1726 const char *Sym = S->getSymbol(); 1727 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1728 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1729 ARMConstantPoolValue *CPV = 1730 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1731 ARMPCLabelIndex, 4); 1732 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1733 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1734 Callee = DAG.getLoad(getPointerTy(), dl, 1735 DAG.getEntryNode(), CPAddr, 1736 MachinePointerInfo::getConstantPool(), 1737 false, false, false, 0); 1738 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1739 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1740 getPointerTy(), Callee, PICLabel); 1741 } else { 1742 unsigned OpFlags = 0; 1743 // On ELF targets for PIC code, direct calls should go through the PLT 1744 if (Subtarget->isTargetELF() && 1745 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1746 OpFlags = ARMII::MO_PLT; 1747 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1748 } 1749 } 1750 1751 // FIXME: handle tail calls differently. 1752 unsigned CallOpc; 1753 bool HasMinSizeAttr = MF.getFunction()->getAttributes(). 1754 hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); 1755 if (Subtarget->isThumb()) { 1756 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1757 CallOpc = ARMISD::CALL_NOLINK; 1758 else 1759 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1760 } else { 1761 if (!isDirect && !Subtarget->hasV5TOps()) 1762 CallOpc = ARMISD::CALL_NOLINK; 1763 else if (doesNotRet && isDirect && Subtarget->hasRAS() && 1764 // Emit regular call when code size is the priority 1765 !HasMinSizeAttr) 1766 // "mov lr, pc; b _foo" to avoid confusing the RSP 1767 CallOpc = ARMISD::CALL_NOLINK; 1768 else 1769 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1770 } 1771 1772 std::vector<SDValue> Ops; 1773 Ops.push_back(Chain); 1774 Ops.push_back(Callee); 1775 1776 // Add argument registers to the end of the list so that they are known live 1777 // into the call. 1778 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1779 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1780 RegsToPass[i].second.getValueType())); 1781 1782 // Add a register mask operand representing the call-preserved registers. 1783 if (!isTailCall) { 1784 const uint32_t *Mask; 1785 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1786 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI); 1787 if (isThisReturn) { 1788 // For 'this' returns, use the R0-preserving mask if applicable 1789 Mask = ARI->getThisReturnPreservedMask(CallConv); 1790 if (!Mask) { 1791 // Set isThisReturn to false if the calling convention is not one that 1792 // allows 'returned' to be modeled in this way, so LowerCallResult does 1793 // not try to pass 'this' straight through 1794 isThisReturn = false; 1795 Mask = ARI->getCallPreservedMask(CallConv); 1796 } 1797 } else 1798 Mask = ARI->getCallPreservedMask(CallConv); 1799 1800 assert(Mask && "Missing call preserved mask for calling convention"); 1801 Ops.push_back(DAG.getRegisterMask(Mask)); 1802 } 1803 1804 if (InFlag.getNode()) 1805 Ops.push_back(InFlag); 1806 1807 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1808 if (isTailCall) 1809 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1810 1811 // Returns a chain and a flag for retval copy to use. 1812 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1813 InFlag = Chain.getValue(1); 1814 1815 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1816 DAG.getIntPtrConstant(0, true), InFlag, dl); 1817 if (!Ins.empty()) 1818 InFlag = Chain.getValue(1); 1819 1820 // Handle result values, copying them out of physregs into vregs that we 1821 // return. 1822 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 1823 InVals, isThisReturn, 1824 isThisReturn ? OutVals[0] : SDValue()); 1825 } 1826 1827 /// HandleByVal - Every parameter *after* a byval parameter is passed 1828 /// on the stack. Remember the next parameter register to allocate, 1829 /// and then confiscate the rest of the parameter registers to insure 1830 /// this. 1831 void 1832 ARMTargetLowering::HandleByVal( 1833 CCState *State, unsigned &size, unsigned Align) const { 1834 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1835 assert((State->getCallOrPrologue() == Prologue || 1836 State->getCallOrPrologue() == Call) && 1837 "unhandled ParmContext"); 1838 1839 // For in-prologue parameters handling, we also introduce stack offset 1840 // for byval registers: see CallingConvLower.cpp, CCState::HandleByVal. 1841 // This behaviour outsides AAPCS rules (5.5 Parameters Passing) of how 1842 // NSAA should be evaluted (NSAA means "next stacked argument address"). 1843 // So: NextStackOffset = NSAAOffset + SizeOfByValParamsStoredInRegs. 1844 // Then: NSAAOffset = NextStackOffset - SizeOfByValParamsStoredInRegs. 1845 unsigned NSAAOffset = State->getNextStackOffset(); 1846 if (State->getCallOrPrologue() != Call) { 1847 for (unsigned i = 0, e = State->getInRegsParamsCount(); i != e; ++i) { 1848 unsigned RB, RE; 1849 State->getInRegsParamInfo(i, RB, RE); 1850 assert(NSAAOffset >= (RE-RB)*4 && 1851 "Stack offset for byval regs doesn't introduced anymore?"); 1852 NSAAOffset -= (RE-RB)*4; 1853 } 1854 } 1855 if ((ARM::R0 <= reg) && (reg <= ARM::R3)) { 1856 if (Subtarget->isAAPCS_ABI() && Align > 4) { 1857 unsigned AlignInRegs = Align / 4; 1858 unsigned Waste = (ARM::R4 - reg) % AlignInRegs; 1859 for (unsigned i = 0; i < Waste; ++i) 1860 reg = State->AllocateReg(GPRArgRegs, 4); 1861 } 1862 if (reg != 0) { 1863 unsigned excess = 4 * (ARM::R4 - reg); 1864 1865 // Special case when NSAA != SP and parameter size greater than size of 1866 // all remained GPR regs. In that case we can't split parameter, we must 1867 // send it to stack. We also must set NCRN to R4, so waste all 1868 // remained registers. 1869 if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) { 1870 while (State->AllocateReg(GPRArgRegs, 4)) 1871 ; 1872 return; 1873 } 1874 1875 // First register for byval parameter is the first register that wasn't 1876 // allocated before this method call, so it would be "reg". 1877 // If parameter is small enough to be saved in range [reg, r4), then 1878 // the end (first after last) register would be reg + param-size-in-regs, 1879 // else parameter would be splitted between registers and stack, 1880 // end register would be r4 in this case. 1881 unsigned ByValRegBegin = reg; 1882 unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4; 1883 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 1884 // Note, first register is allocated in the beginning of function already, 1885 // allocate remained amount of registers we need. 1886 for (unsigned i = reg+1; i != ByValRegEnd; ++i) 1887 State->AllocateReg(GPRArgRegs, 4); 1888 // At a call site, a byval parameter that is split between 1889 // registers and memory needs its size truncated here. In a 1890 // function prologue, such byval parameters are reassembled in 1891 // memory, and are not truncated. 1892 if (State->getCallOrPrologue() == Call) { 1893 // Make remained size equal to 0 in case, when 1894 // the whole structure may be stored into registers. 1895 if (size < excess) 1896 size = 0; 1897 else 1898 size -= excess; 1899 } 1900 } 1901 } 1902 } 1903 1904 /// MatchingStackOffset - Return true if the given stack call argument is 1905 /// already available in the same position (relatively) of the caller's 1906 /// incoming argument stack. 1907 static 1908 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1909 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1910 const TargetInstrInfo *TII) { 1911 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1912 int FI = INT_MAX; 1913 if (Arg.getOpcode() == ISD::CopyFromReg) { 1914 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1915 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1916 return false; 1917 MachineInstr *Def = MRI->getVRegDef(VR); 1918 if (!Def) 1919 return false; 1920 if (!Flags.isByVal()) { 1921 if (!TII->isLoadFromStackSlot(Def, FI)) 1922 return false; 1923 } else { 1924 return false; 1925 } 1926 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1927 if (Flags.isByVal()) 1928 // ByVal argument is passed in as a pointer but it's now being 1929 // dereferenced. e.g. 1930 // define @foo(%struct.X* %A) { 1931 // tail call @bar(%struct.X* byval %A) 1932 // } 1933 return false; 1934 SDValue Ptr = Ld->getBasePtr(); 1935 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1936 if (!FINode) 1937 return false; 1938 FI = FINode->getIndex(); 1939 } else 1940 return false; 1941 1942 assert(FI != INT_MAX); 1943 if (!MFI->isFixedObjectIndex(FI)) 1944 return false; 1945 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1946 } 1947 1948 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 1949 /// for tail call optimization. Targets which want to do tail call 1950 /// optimization should implement this function. 1951 bool 1952 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1953 CallingConv::ID CalleeCC, 1954 bool isVarArg, 1955 bool isCalleeStructRet, 1956 bool isCallerStructRet, 1957 const SmallVectorImpl<ISD::OutputArg> &Outs, 1958 const SmallVectorImpl<SDValue> &OutVals, 1959 const SmallVectorImpl<ISD::InputArg> &Ins, 1960 SelectionDAG& DAG) const { 1961 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1962 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1963 bool CCMatch = CallerCC == CalleeCC; 1964 1965 // Look for obvious safe cases to perform tail call optimization that do not 1966 // require ABI changes. This is what gcc calls sibcall. 1967 1968 // Do not sibcall optimize vararg calls unless the call site is not passing 1969 // any arguments. 1970 if (isVarArg && !Outs.empty()) 1971 return false; 1972 1973 // Exception-handling functions need a special set of instructions to indicate 1974 // a return to the hardware. Tail-calling another function would probably 1975 // break this. 1976 if (CallerF->hasFnAttribute("interrupt")) 1977 return false; 1978 1979 // Also avoid sibcall optimization if either caller or callee uses struct 1980 // return semantics. 1981 if (isCalleeStructRet || isCallerStructRet) 1982 return false; 1983 1984 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1985 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1986 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1987 // support in the assembler and linker to be used. This would need to be 1988 // fixed to fully support tail calls in Thumb1. 1989 // 1990 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1991 // LR. This means if we need to reload LR, it takes an extra instructions, 1992 // which outweighs the value of the tail call; but here we don't know yet 1993 // whether LR is going to be used. Probably the right approach is to 1994 // generate the tail call here and turn it back into CALL/RET in 1995 // emitEpilogue if LR is used. 1996 1997 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1998 // but we need to make sure there are enough registers; the only valid 1999 // registers are the 4 used for parameters. We don't currently do this 2000 // case. 2001 if (Subtarget->isThumb1Only()) 2002 return false; 2003 2004 // If the calling conventions do not match, then we'd better make sure the 2005 // results are returned in the same way as what the caller expects. 2006 if (!CCMatch) { 2007 SmallVector<CCValAssign, 16> RVLocs1; 2008 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2009 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 2010 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 2011 2012 SmallVector<CCValAssign, 16> RVLocs2; 2013 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2014 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 2015 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 2016 2017 if (RVLocs1.size() != RVLocs2.size()) 2018 return false; 2019 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2020 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2021 return false; 2022 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2023 return false; 2024 if (RVLocs1[i].isRegLoc()) { 2025 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2026 return false; 2027 } else { 2028 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2029 return false; 2030 } 2031 } 2032 } 2033 2034 // If Caller's vararg or byval argument has been split between registers and 2035 // stack, do not perform tail call, since part of the argument is in caller's 2036 // local frame. 2037 const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). 2038 getInfo<ARMFunctionInfo>(); 2039 if (AFI_Caller->getArgRegsSaveSize()) 2040 return false; 2041 2042 // If the callee takes no arguments then go on to check the results of the 2043 // call. 2044 if (!Outs.empty()) { 2045 // Check if stack adjustment is needed. For now, do not do this if any 2046 // argument is passed on the stack. 2047 SmallVector<CCValAssign, 16> ArgLocs; 2048 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2049 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 2050 CCInfo.AnalyzeCallOperands(Outs, 2051 CCAssignFnForNode(CalleeCC, false, isVarArg)); 2052 if (CCInfo.getNextStackOffset()) { 2053 MachineFunction &MF = DAG.getMachineFunction(); 2054 2055 // Check if the arguments are already laid out in the right way as 2056 // the caller's fixed stack objects. 2057 MachineFrameInfo *MFI = MF.getFrameInfo(); 2058 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2059 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2060 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2061 i != e; 2062 ++i, ++realArgIdx) { 2063 CCValAssign &VA = ArgLocs[i]; 2064 EVT RegVT = VA.getLocVT(); 2065 SDValue Arg = OutVals[realArgIdx]; 2066 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2067 if (VA.getLocInfo() == CCValAssign::Indirect) 2068 return false; 2069 if (VA.needsCustom()) { 2070 // f64 and vector types are split into multiple registers or 2071 // register/stack-slot combinations. The types will not match 2072 // the registers; give up on memory f64 refs until we figure 2073 // out what to do about this. 2074 if (!VA.isRegLoc()) 2075 return false; 2076 if (!ArgLocs[++i].isRegLoc()) 2077 return false; 2078 if (RegVT == MVT::v2f64) { 2079 if (!ArgLocs[++i].isRegLoc()) 2080 return false; 2081 if (!ArgLocs[++i].isRegLoc()) 2082 return false; 2083 } 2084 } else if (!VA.isRegLoc()) { 2085 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2086 MFI, MRI, TII)) 2087 return false; 2088 } 2089 } 2090 } 2091 } 2092 2093 return true; 2094 } 2095 2096 bool 2097 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2098 MachineFunction &MF, bool isVarArg, 2099 const SmallVectorImpl<ISD::OutputArg> &Outs, 2100 LLVMContext &Context) const { 2101 SmallVector<CCValAssign, 16> RVLocs; 2102 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 2103 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2104 isVarArg)); 2105 } 2106 2107 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2108 SDLoc DL, SelectionDAG &DAG) { 2109 const MachineFunction &MF = DAG.getMachineFunction(); 2110 const Function *F = MF.getFunction(); 2111 2112 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); 2113 2114 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2115 // version of the "preferred return address". These offsets affect the return 2116 // instruction if this is a return from PL1 without hypervisor extensions. 2117 // IRQ/FIQ: +4 "subs pc, lr, #4" 2118 // SWI: 0 "subs pc, lr, #0" 2119 // ABORT: +4 "subs pc, lr, #4" 2120 // UNDEF: +4/+2 "subs pc, lr, #0" 2121 // UNDEF varies depending on where the exception came from ARM or Thumb 2122 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2123 2124 int64_t LROffset; 2125 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2126 IntKind == "ABORT") 2127 LROffset = 4; 2128 else if (IntKind == "SWI" || IntKind == "UNDEF") 2129 LROffset = 0; 2130 else 2131 report_fatal_error("Unsupported interrupt attribute. If present, value " 2132 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2133 2134 RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false)); 2135 2136 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, 2137 RetOps.data(), RetOps.size()); 2138 } 2139 2140 SDValue 2141 ARMTargetLowering::LowerReturn(SDValue Chain, 2142 CallingConv::ID CallConv, bool isVarArg, 2143 const SmallVectorImpl<ISD::OutputArg> &Outs, 2144 const SmallVectorImpl<SDValue> &OutVals, 2145 SDLoc dl, SelectionDAG &DAG) const { 2146 2147 // CCValAssign - represent the assignment of the return value to a location. 2148 SmallVector<CCValAssign, 16> RVLocs; 2149 2150 // CCState - Info about the registers and stack slots. 2151 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2152 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 2153 2154 // Analyze outgoing return values. 2155 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2156 isVarArg)); 2157 2158 SDValue Flag; 2159 SmallVector<SDValue, 4> RetOps; 2160 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2161 2162 // Copy the result values into the output registers. 2163 for (unsigned i = 0, realRVLocIdx = 0; 2164 i != RVLocs.size(); 2165 ++i, ++realRVLocIdx) { 2166 CCValAssign &VA = RVLocs[i]; 2167 assert(VA.isRegLoc() && "Can only return in registers!"); 2168 2169 SDValue Arg = OutVals[realRVLocIdx]; 2170 2171 switch (VA.getLocInfo()) { 2172 default: llvm_unreachable("Unknown loc info!"); 2173 case CCValAssign::Full: break; 2174 case CCValAssign::BCvt: 2175 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2176 break; 2177 } 2178 2179 if (VA.needsCustom()) { 2180 if (VA.getLocVT() == MVT::v2f64) { 2181 // Extract the first half and return it in two registers. 2182 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2183 DAG.getConstant(0, MVT::i32)); 2184 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2185 DAG.getVTList(MVT::i32, MVT::i32), Half); 2186 2187 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 2188 Flag = Chain.getValue(1); 2189 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2190 VA = RVLocs[++i]; // skip ahead to next loc 2191 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2192 HalfGPRs.getValue(1), Flag); 2193 Flag = Chain.getValue(1); 2194 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2195 VA = RVLocs[++i]; // skip ahead to next loc 2196 2197 // Extract the 2nd half and fall through to handle it as an f64 value. 2198 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2199 DAG.getConstant(1, MVT::i32)); 2200 } 2201 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2202 // available. 2203 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2204 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 2205 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 2206 Flag = Chain.getValue(1); 2207 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2208 VA = RVLocs[++i]; // skip ahead to next loc 2209 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 2210 Flag); 2211 } else 2212 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2213 2214 // Guarantee that all emitted copies are 2215 // stuck together, avoiding something bad. 2216 Flag = Chain.getValue(1); 2217 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2218 } 2219 2220 // Update chain and glue. 2221 RetOps[0] = Chain; 2222 if (Flag.getNode()) 2223 RetOps.push_back(Flag); 2224 2225 // CPUs which aren't M-class use a special sequence to return from 2226 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2227 // though we use "subs pc, lr, #N"). 2228 // 2229 // M-class CPUs actually use a normal return sequence with a special 2230 // (hardware-provided) value in LR, so the normal code path works. 2231 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && 2232 !Subtarget->isMClass()) { 2233 if (Subtarget->isThumb1Only()) 2234 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2235 return LowerInterruptReturn(RetOps, dl, DAG); 2236 } 2237 2238 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, 2239 RetOps.data(), RetOps.size()); 2240 } 2241 2242 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2243 if (N->getNumValues() != 1) 2244 return false; 2245 if (!N->hasNUsesOfValue(1, 0)) 2246 return false; 2247 2248 SDValue TCChain = Chain; 2249 SDNode *Copy = *N->use_begin(); 2250 if (Copy->getOpcode() == ISD::CopyToReg) { 2251 // If the copy has a glue operand, we conservatively assume it isn't safe to 2252 // perform a tail call. 2253 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2254 return false; 2255 TCChain = Copy->getOperand(0); 2256 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2257 SDNode *VMov = Copy; 2258 // f64 returned in a pair of GPRs. 2259 SmallPtrSet<SDNode*, 2> Copies; 2260 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2261 UI != UE; ++UI) { 2262 if (UI->getOpcode() != ISD::CopyToReg) 2263 return false; 2264 Copies.insert(*UI); 2265 } 2266 if (Copies.size() > 2) 2267 return false; 2268 2269 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2270 UI != UE; ++UI) { 2271 SDValue UseChain = UI->getOperand(0); 2272 if (Copies.count(UseChain.getNode())) 2273 // Second CopyToReg 2274 Copy = *UI; 2275 else 2276 // First CopyToReg 2277 TCChain = UseChain; 2278 } 2279 } else if (Copy->getOpcode() == ISD::BITCAST) { 2280 // f32 returned in a single GPR. 2281 if (!Copy->hasOneUse()) 2282 return false; 2283 Copy = *Copy->use_begin(); 2284 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2285 return false; 2286 TCChain = Copy->getOperand(0); 2287 } else { 2288 return false; 2289 } 2290 2291 bool HasRet = false; 2292 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2293 UI != UE; ++UI) { 2294 if (UI->getOpcode() != ARMISD::RET_FLAG && 2295 UI->getOpcode() != ARMISD::INTRET_FLAG) 2296 return false; 2297 HasRet = true; 2298 } 2299 2300 if (!HasRet) 2301 return false; 2302 2303 Chain = TCChain; 2304 return true; 2305 } 2306 2307 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2308 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 2309 return false; 2310 2311 if (!CI->isTailCall()) 2312 return false; 2313 2314 return !Subtarget->isThumb1Only(); 2315 } 2316 2317 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2318 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2319 // one of the above mentioned nodes. It has to be wrapped because otherwise 2320 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2321 // be used to form addressing mode. These wrapped nodes will be selected 2322 // into MOVi. 2323 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2324 EVT PtrVT = Op.getValueType(); 2325 // FIXME there is no actual debug info here 2326 SDLoc dl(Op); 2327 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2328 SDValue Res; 2329 if (CP->isMachineConstantPoolEntry()) 2330 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2331 CP->getAlignment()); 2332 else 2333 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2334 CP->getAlignment()); 2335 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2336 } 2337 2338 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2339 return MachineJumpTableInfo::EK_Inline; 2340 } 2341 2342 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2343 SelectionDAG &DAG) const { 2344 MachineFunction &MF = DAG.getMachineFunction(); 2345 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2346 unsigned ARMPCLabelIndex = 0; 2347 SDLoc DL(Op); 2348 EVT PtrVT = getPointerTy(); 2349 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2350 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2351 SDValue CPAddr; 2352 if (RelocM == Reloc::Static) { 2353 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2354 } else { 2355 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2356 ARMPCLabelIndex = AFI->createPICLabelUId(); 2357 ARMConstantPoolValue *CPV = 2358 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2359 ARMCP::CPBlockAddress, PCAdj); 2360 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2361 } 2362 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2363 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2364 MachinePointerInfo::getConstantPool(), 2365 false, false, false, 0); 2366 if (RelocM == Reloc::Static) 2367 return Result; 2368 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2369 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2370 } 2371 2372 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2373 SDValue 2374 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2375 SelectionDAG &DAG) const { 2376 SDLoc dl(GA); 2377 EVT PtrVT = getPointerTy(); 2378 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2379 MachineFunction &MF = DAG.getMachineFunction(); 2380 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2381 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2382 ARMConstantPoolValue *CPV = 2383 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2384 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2385 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2386 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2387 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2388 MachinePointerInfo::getConstantPool(), 2389 false, false, false, 0); 2390 SDValue Chain = Argument.getValue(1); 2391 2392 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2393 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2394 2395 // call __tls_get_addr. 2396 ArgListTy Args; 2397 ArgListEntry Entry; 2398 Entry.Node = Argument; 2399 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2400 Args.push_back(Entry); 2401 // FIXME: is there useful debug info available here? 2402 TargetLowering::CallLoweringInfo CLI(Chain, 2403 (Type *) Type::getInt32Ty(*DAG.getContext()), 2404 false, false, false, false, 2405 0, CallingConv::C, /*isTailCall=*/false, 2406 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 2407 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2408 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2409 return CallResult.first; 2410 } 2411 2412 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2413 // "local exec" model. 2414 SDValue 2415 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2416 SelectionDAG &DAG, 2417 TLSModel::Model model) const { 2418 const GlobalValue *GV = GA->getGlobal(); 2419 SDLoc dl(GA); 2420 SDValue Offset; 2421 SDValue Chain = DAG.getEntryNode(); 2422 EVT PtrVT = getPointerTy(); 2423 // Get the Thread Pointer 2424 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2425 2426 if (model == TLSModel::InitialExec) { 2427 MachineFunction &MF = DAG.getMachineFunction(); 2428 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2429 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2430 // Initial exec model. 2431 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2432 ARMConstantPoolValue *CPV = 2433 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2434 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2435 true); 2436 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2437 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2438 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2439 MachinePointerInfo::getConstantPool(), 2440 false, false, false, 0); 2441 Chain = Offset.getValue(1); 2442 2443 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2444 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2445 2446 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2447 MachinePointerInfo::getConstantPool(), 2448 false, false, false, 0); 2449 } else { 2450 // local exec model 2451 assert(model == TLSModel::LocalExec); 2452 ARMConstantPoolValue *CPV = 2453 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2454 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2455 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2456 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2457 MachinePointerInfo::getConstantPool(), 2458 false, false, false, 0); 2459 } 2460 2461 // The address of the thread local variable is the add of the thread 2462 // pointer with the offset of the variable. 2463 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2464 } 2465 2466 SDValue 2467 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2468 // TODO: implement the "local dynamic" model 2469 assert(Subtarget->isTargetELF() && 2470 "TLS not implemented for non-ELF targets"); 2471 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2472 2473 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2474 2475 switch (model) { 2476 case TLSModel::GeneralDynamic: 2477 case TLSModel::LocalDynamic: 2478 return LowerToTLSGeneralDynamicModel(GA, DAG); 2479 case TLSModel::InitialExec: 2480 case TLSModel::LocalExec: 2481 return LowerToTLSExecModels(GA, DAG, model); 2482 } 2483 llvm_unreachable("bogus TLS model"); 2484 } 2485 2486 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2487 SelectionDAG &DAG) const { 2488 EVT PtrVT = getPointerTy(); 2489 SDLoc dl(Op); 2490 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2491 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2492 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2493 ARMConstantPoolValue *CPV = 2494 ARMConstantPoolConstant::Create(GV, 2495 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2496 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2497 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2498 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2499 CPAddr, 2500 MachinePointerInfo::getConstantPool(), 2501 false, false, false, 0); 2502 SDValue Chain = Result.getValue(1); 2503 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2504 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2505 if (!UseGOTOFF) 2506 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2507 MachinePointerInfo::getGOT(), 2508 false, false, false, 0); 2509 return Result; 2510 } 2511 2512 // If we have T2 ops, we can materialize the address directly via movt/movw 2513 // pair. This is always cheaper. 2514 if (Subtarget->useMovt()) { 2515 ++NumMovwMovt; 2516 // FIXME: Once remat is capable of dealing with instructions with register 2517 // operands, expand this into two nodes. 2518 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2519 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2520 } else { 2521 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2522 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2523 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2524 MachinePointerInfo::getConstantPool(), 2525 false, false, false, 0); 2526 } 2527 } 2528 2529 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2530 SelectionDAG &DAG) const { 2531 EVT PtrVT = getPointerTy(); 2532 SDLoc dl(Op); 2533 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2534 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2535 2536 // FIXME: Enable this for static codegen when tool issues are fixed. Also 2537 // update ARMFastISel::ARMMaterializeGV. 2538 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2539 ++NumMovwMovt; 2540 // FIXME: Once remat is capable of dealing with instructions with register 2541 // operands, expand this into two nodes. 2542 if (RelocM == Reloc::Static) 2543 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2544 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2545 2546 unsigned Wrapper = (RelocM == Reloc::PIC_) 2547 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2548 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2549 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2550 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2551 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2552 MachinePointerInfo::getGOT(), 2553 false, false, false, 0); 2554 return Result; 2555 } 2556 2557 unsigned ARMPCLabelIndex = 0; 2558 SDValue CPAddr; 2559 if (RelocM == Reloc::Static) { 2560 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2561 } else { 2562 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2563 ARMPCLabelIndex = AFI->createPICLabelUId(); 2564 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2565 ARMConstantPoolValue *CPV = 2566 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2567 PCAdj); 2568 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2569 } 2570 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2571 2572 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2573 MachinePointerInfo::getConstantPool(), 2574 false, false, false, 0); 2575 SDValue Chain = Result.getValue(1); 2576 2577 if (RelocM == Reloc::PIC_) { 2578 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2579 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2580 } 2581 2582 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2583 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2584 false, false, false, 0); 2585 2586 return Result; 2587 } 2588 2589 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2590 SelectionDAG &DAG) const { 2591 assert(Subtarget->isTargetELF() && 2592 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2593 MachineFunction &MF = DAG.getMachineFunction(); 2594 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2595 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2596 EVT PtrVT = getPointerTy(); 2597 SDLoc dl(Op); 2598 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2599 ARMConstantPoolValue *CPV = 2600 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2601 ARMPCLabelIndex, PCAdj); 2602 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2603 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2604 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2605 MachinePointerInfo::getConstantPool(), 2606 false, false, false, 0); 2607 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2608 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2609 } 2610 2611 SDValue 2612 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2613 SDLoc dl(Op); 2614 SDValue Val = DAG.getConstant(0, MVT::i32); 2615 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2616 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2617 Op.getOperand(1), Val); 2618 } 2619 2620 SDValue 2621 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2622 SDLoc dl(Op); 2623 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2624 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2625 } 2626 2627 SDValue 2628 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2629 const ARMSubtarget *Subtarget) const { 2630 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2631 SDLoc dl(Op); 2632 switch (IntNo) { 2633 default: return SDValue(); // Don't custom lower most intrinsics. 2634 case Intrinsic::arm_thread_pointer: { 2635 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2636 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2637 } 2638 case Intrinsic::eh_sjlj_lsda: { 2639 MachineFunction &MF = DAG.getMachineFunction(); 2640 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2641 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2642 EVT PtrVT = getPointerTy(); 2643 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2644 SDValue CPAddr; 2645 unsigned PCAdj = (RelocM != Reloc::PIC_) 2646 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2647 ARMConstantPoolValue *CPV = 2648 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2649 ARMCP::CPLSDA, PCAdj); 2650 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2651 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2652 SDValue Result = 2653 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2654 MachinePointerInfo::getConstantPool(), 2655 false, false, false, 0); 2656 2657 if (RelocM == Reloc::PIC_) { 2658 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2659 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2660 } 2661 return Result; 2662 } 2663 case Intrinsic::arm_neon_vmulls: 2664 case Intrinsic::arm_neon_vmullu: { 2665 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2666 ? ARMISD::VMULLs : ARMISD::VMULLu; 2667 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2668 Op.getOperand(1), Op.getOperand(2)); 2669 } 2670 } 2671 } 2672 2673 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2674 const ARMSubtarget *Subtarget) { 2675 // FIXME: handle "fence singlethread" more efficiently. 2676 SDLoc dl(Op); 2677 if (!Subtarget->hasDataBarrier()) { 2678 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2679 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2680 // here. 2681 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2682 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2683 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2684 DAG.getConstant(0, MVT::i32)); 2685 } 2686 2687 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 2688 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 2689 unsigned Domain = ARM_MB::ISH; 2690 if (Subtarget->isMClass()) { 2691 // Only a full system barrier exists in the M-class architectures. 2692 Domain = ARM_MB::SY; 2693 } else if (Subtarget->isSwift() && Ord == Release) { 2694 // Swift happens to implement ISHST barriers in a way that's compatible with 2695 // Release semantics but weaker than ISH so we'd be fools not to use 2696 // it. Beware: other processors probably don't! 2697 Domain = ARM_MB::ISHST; 2698 } 2699 2700 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 2701 DAG.getConstant(Intrinsic::arm_dmb, MVT::i32), 2702 DAG.getConstant(Domain, MVT::i32)); 2703 } 2704 2705 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2706 const ARMSubtarget *Subtarget) { 2707 // ARM pre v5TE and Thumb1 does not have preload instructions. 2708 if (!(Subtarget->isThumb2() || 2709 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2710 // Just preserve the chain. 2711 return Op.getOperand(0); 2712 2713 SDLoc dl(Op); 2714 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2715 if (!isRead && 2716 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2717 // ARMv7 with MP extension has PLDW. 2718 return Op.getOperand(0); 2719 2720 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2721 if (Subtarget->isThumb()) { 2722 // Invert the bits. 2723 isRead = ~isRead & 1; 2724 isData = ~isData & 1; 2725 } 2726 2727 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2728 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2729 DAG.getConstant(isData, MVT::i32)); 2730 } 2731 2732 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2733 MachineFunction &MF = DAG.getMachineFunction(); 2734 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2735 2736 // vastart just stores the address of the VarArgsFrameIndex slot into the 2737 // memory location argument. 2738 SDLoc dl(Op); 2739 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2740 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2741 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2742 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2743 MachinePointerInfo(SV), false, false, 0); 2744 } 2745 2746 SDValue 2747 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2748 SDValue &Root, SelectionDAG &DAG, 2749 SDLoc dl) const { 2750 MachineFunction &MF = DAG.getMachineFunction(); 2751 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2752 2753 const TargetRegisterClass *RC; 2754 if (AFI->isThumb1OnlyFunction()) 2755 RC = &ARM::tGPRRegClass; 2756 else 2757 RC = &ARM::GPRRegClass; 2758 2759 // Transform the arguments stored in physical registers into virtual ones. 2760 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2761 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2762 2763 SDValue ArgValue2; 2764 if (NextVA.isMemLoc()) { 2765 MachineFrameInfo *MFI = MF.getFrameInfo(); 2766 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2767 2768 // Create load node to retrieve arguments from the stack. 2769 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2770 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2771 MachinePointerInfo::getFixedStack(FI), 2772 false, false, false, 0); 2773 } else { 2774 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2775 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2776 } 2777 2778 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2779 } 2780 2781 void 2782 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2783 unsigned InRegsParamRecordIdx, 2784 unsigned ArgSize, 2785 unsigned &ArgRegsSize, 2786 unsigned &ArgRegsSaveSize) 2787 const { 2788 unsigned NumGPRs; 2789 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2790 unsigned RBegin, REnd; 2791 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2792 NumGPRs = REnd - RBegin; 2793 } else { 2794 unsigned int firstUnalloced; 2795 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2796 sizeof(GPRArgRegs) / 2797 sizeof(GPRArgRegs[0])); 2798 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2799 } 2800 2801 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2802 ArgRegsSize = NumGPRs * 4; 2803 2804 // If parameter is split between stack and GPRs... 2805 if (NumGPRs && Align == 8 && 2806 (ArgRegsSize < ArgSize || 2807 InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) { 2808 // Add padding for part of param recovered from GPRs, so 2809 // its last byte must be at address K*8 - 1. 2810 // We need to do it, since remained (stack) part of parameter has 2811 // stack alignment, and we need to "attach" "GPRs head" without gaps 2812 // to it: 2813 // Stack: 2814 // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes... 2815 // [ [padding] [GPRs head] ] [ Tail passed via stack .... 2816 // 2817 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2818 unsigned Padding = 2819 ((ArgRegsSize + AFI->getArgRegsSaveSize() + Align - 1) & ~(Align-1)) - 2820 (ArgRegsSize + AFI->getArgRegsSaveSize()); 2821 ArgRegsSaveSize = ArgRegsSize + Padding; 2822 } else 2823 // We don't need to extend regs save size for byval parameters if they 2824 // are passed via GPRs only. 2825 ArgRegsSaveSize = ArgRegsSize; 2826 } 2827 2828 // The remaining GPRs hold either the beginning of variable-argument 2829 // data, or the beginning of an aggregate passed by value (usually 2830 // byval). Either way, we allocate stack slots adjacent to the data 2831 // provided by our caller, and store the unallocated registers there. 2832 // If this is a variadic function, the va_list pointer will begin with 2833 // these values; otherwise, this reassembles a (byval) structure that 2834 // was split between registers and memory. 2835 // Return: The frame index registers were stored into. 2836 int 2837 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 2838 SDLoc dl, SDValue &Chain, 2839 const Value *OrigArg, 2840 unsigned InRegsParamRecordIdx, 2841 unsigned OffsetFromOrigArg, 2842 unsigned ArgOffset, 2843 unsigned ArgSize, 2844 bool ForceMutable) const { 2845 2846 // Currently, two use-cases possible: 2847 // Case #1. Non var-args function, and we meet first byval parameter. 2848 // Setup first unallocated register as first byval register; 2849 // eat all remained registers 2850 // (these two actions are performed by HandleByVal method). 2851 // Then, here, we initialize stack frame with 2852 // "store-reg" instructions. 2853 // Case #2. Var-args function, that doesn't contain byval parameters. 2854 // The same: eat all remained unallocated registers, 2855 // initialize stack frame. 2856 2857 MachineFunction &MF = DAG.getMachineFunction(); 2858 MachineFrameInfo *MFI = MF.getFrameInfo(); 2859 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2860 unsigned firstRegToSaveIndex, lastRegToSaveIndex; 2861 unsigned RBegin, REnd; 2862 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2863 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2864 firstRegToSaveIndex = RBegin - ARM::R0; 2865 lastRegToSaveIndex = REnd - ARM::R0; 2866 } else { 2867 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2868 (GPRArgRegs, array_lengthof(GPRArgRegs)); 2869 lastRegToSaveIndex = 4; 2870 } 2871 2872 unsigned ArgRegsSize, ArgRegsSaveSize; 2873 computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize, 2874 ArgRegsSize, ArgRegsSaveSize); 2875 2876 // Store any by-val regs to their spots on the stack so that they may be 2877 // loaded by deferencing the result of formal parameter pointer or va_next. 2878 // Note: once stack area for byval/varargs registers 2879 // was initialized, it can't be initialized again. 2880 if (ArgRegsSaveSize) { 2881 2882 unsigned Padding = ArgRegsSaveSize - ArgRegsSize; 2883 2884 if (Padding) { 2885 assert(AFI->getStoredByValParamsPadding() == 0 && 2886 "The only parameter may be padded."); 2887 AFI->setStoredByValParamsPadding(Padding); 2888 } 2889 2890 int FrameIndex = MFI->CreateFixedObject( 2891 ArgRegsSaveSize, 2892 Padding + ArgOffset, 2893 false); 2894 SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy()); 2895 2896 SmallVector<SDValue, 4> MemOps; 2897 for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex; 2898 ++firstRegToSaveIndex, ++i) { 2899 const TargetRegisterClass *RC; 2900 if (AFI->isThumb1OnlyFunction()) 2901 RC = &ARM::tGPRRegClass; 2902 else 2903 RC = &ARM::GPRRegClass; 2904 2905 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2906 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2907 SDValue Store = 2908 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2909 MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), 2910 false, false, 0); 2911 MemOps.push_back(Store); 2912 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2913 DAG.getConstant(4, getPointerTy())); 2914 } 2915 2916 AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize()); 2917 2918 if (!MemOps.empty()) 2919 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2920 &MemOps[0], MemOps.size()); 2921 return FrameIndex; 2922 } else 2923 // This will point to the next argument passed via stack. 2924 return MFI->CreateFixedObject( 2925 4, AFI->getStoredByValParamsPadding() + ArgOffset, !ForceMutable); 2926 } 2927 2928 // Setup stack frame, the va_list pointer will start from. 2929 void 2930 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2931 SDLoc dl, SDValue &Chain, 2932 unsigned ArgOffset, 2933 bool ForceMutable) const { 2934 MachineFunction &MF = DAG.getMachineFunction(); 2935 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2936 2937 // Try to store any remaining integer argument regs 2938 // to their spots on the stack so that they may be loaded by deferencing 2939 // the result of va_next. 2940 // If there is no regs to be stored, just point address after last 2941 // argument passed via stack. 2942 int FrameIndex = 2943 StoreByValRegs(CCInfo, DAG, dl, Chain, 0, CCInfo.getInRegsParamsCount(), 2944 0, ArgOffset, 0, ForceMutable); 2945 2946 AFI->setVarArgsFrameIndex(FrameIndex); 2947 } 2948 2949 SDValue 2950 ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2951 CallingConv::ID CallConv, bool isVarArg, 2952 const SmallVectorImpl<ISD::InputArg> 2953 &Ins, 2954 SDLoc dl, SelectionDAG &DAG, 2955 SmallVectorImpl<SDValue> &InVals) 2956 const { 2957 MachineFunction &MF = DAG.getMachineFunction(); 2958 MachineFrameInfo *MFI = MF.getFrameInfo(); 2959 2960 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2961 2962 // Assign locations to all of the incoming arguments. 2963 SmallVector<CCValAssign, 16> ArgLocs; 2964 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2965 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2966 CCInfo.AnalyzeFormalArguments(Ins, 2967 CCAssignFnForNode(CallConv, /* Return*/ false, 2968 isVarArg)); 2969 2970 SmallVector<SDValue, 16> ArgValues; 2971 int lastInsIndex = -1; 2972 SDValue ArgValue; 2973 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 2974 unsigned CurArgIdx = 0; 2975 2976 // Initially ArgRegsSaveSize is zero. 2977 // Then we increase this value each time we meet byval parameter. 2978 // We also increase this value in case of varargs function. 2979 AFI->setArgRegsSaveSize(0); 2980 2981 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2982 CCValAssign &VA = ArgLocs[i]; 2983 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); 2984 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; 2985 // Arguments stored in registers. 2986 if (VA.isRegLoc()) { 2987 EVT RegVT = VA.getLocVT(); 2988 2989 if (VA.needsCustom()) { 2990 // f64 and vector types are split up into multiple registers or 2991 // combinations of registers and stack slots. 2992 if (VA.getLocVT() == MVT::v2f64) { 2993 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2994 Chain, DAG, dl); 2995 VA = ArgLocs[++i]; // skip ahead to next loc 2996 SDValue ArgValue2; 2997 if (VA.isMemLoc()) { 2998 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2999 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 3000 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 3001 MachinePointerInfo::getFixedStack(FI), 3002 false, false, false, 0); 3003 } else { 3004 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3005 Chain, DAG, dl); 3006 } 3007 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3008 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3009 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 3010 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3011 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 3012 } else 3013 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3014 3015 } else { 3016 const TargetRegisterClass *RC; 3017 3018 if (RegVT == MVT::f32) 3019 RC = &ARM::SPRRegClass; 3020 else if (RegVT == MVT::f64) 3021 RC = &ARM::DPRRegClass; 3022 else if (RegVT == MVT::v2f64) 3023 RC = &ARM::QPRRegClass; 3024 else if (RegVT == MVT::i32) 3025 RC = AFI->isThumb1OnlyFunction() ? 3026 (const TargetRegisterClass*)&ARM::tGPRRegClass : 3027 (const TargetRegisterClass*)&ARM::GPRRegClass; 3028 else 3029 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3030 3031 // Transform the arguments in physical registers into virtual ones. 3032 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3033 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3034 } 3035 3036 // If this is an 8 or 16-bit value, it is really passed promoted 3037 // to 32 bits. Insert an assert[sz]ext to capture this, then 3038 // truncate to the right size. 3039 switch (VA.getLocInfo()) { 3040 default: llvm_unreachable("Unknown loc info!"); 3041 case CCValAssign::Full: break; 3042 case CCValAssign::BCvt: 3043 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3044 break; 3045 case CCValAssign::SExt: 3046 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3047 DAG.getValueType(VA.getValVT())); 3048 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3049 break; 3050 case CCValAssign::ZExt: 3051 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3052 DAG.getValueType(VA.getValVT())); 3053 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3054 break; 3055 } 3056 3057 InVals.push_back(ArgValue); 3058 3059 } else { // VA.isRegLoc() 3060 3061 // sanity check 3062 assert(VA.isMemLoc()); 3063 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3064 3065 int index = ArgLocs[i].getValNo(); 3066 3067 // Some Ins[] entries become multiple ArgLoc[] entries. 3068 // Process them only once. 3069 if (index != lastInsIndex) 3070 { 3071 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3072 // FIXME: For now, all byval parameter objects are marked mutable. 3073 // This can be changed with more analysis. 3074 // In case of tail call optimization mark all arguments mutable. 3075 // Since they could be overwritten by lowering of arguments in case of 3076 // a tail call. 3077 if (Flags.isByVal()) { 3078 unsigned CurByValIndex = CCInfo.getInRegsParamsProceed(); 3079 int FrameIndex = StoreByValRegs( 3080 CCInfo, DAG, dl, Chain, CurOrigArg, 3081 CurByValIndex, 3082 Ins[VA.getValNo()].PartOffset, 3083 VA.getLocMemOffset(), 3084 Flags.getByValSize(), 3085 true /*force mutable frames*/); 3086 InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy())); 3087 CCInfo.nextInRegsParam(); 3088 } else { 3089 unsigned FIOffset = VA.getLocMemOffset() + 3090 AFI->getStoredByValParamsPadding(); 3091 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3092 FIOffset, true); 3093 3094 // Create load nodes to retrieve arguments from the stack. 3095 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 3096 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 3097 MachinePointerInfo::getFixedStack(FI), 3098 false, false, false, 0)); 3099 } 3100 lastInsIndex = index; 3101 } 3102 } 3103 } 3104 3105 // varargs 3106 if (isVarArg) 3107 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3108 CCInfo.getNextStackOffset()); 3109 3110 return Chain; 3111 } 3112 3113 /// isFloatingPointZero - Return true if this is +0.0. 3114 static bool isFloatingPointZero(SDValue Op) { 3115 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3116 return CFP->getValueAPF().isPosZero(); 3117 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3118 // Maybe this has already been legalized into the constant pool? 3119 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3120 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3121 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3122 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3123 return CFP->getValueAPF().isPosZero(); 3124 } 3125 } 3126 return false; 3127 } 3128 3129 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3130 /// the given operands. 3131 SDValue 3132 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3133 SDValue &ARMcc, SelectionDAG &DAG, 3134 SDLoc dl) const { 3135 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3136 unsigned C = RHSC->getZExtValue(); 3137 if (!isLegalICmpImmediate(C)) { 3138 // Constant does not fit, try adjusting it by one? 3139 switch (CC) { 3140 default: break; 3141 case ISD::SETLT: 3142 case ISD::SETGE: 3143 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3144 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3145 RHS = DAG.getConstant(C-1, MVT::i32); 3146 } 3147 break; 3148 case ISD::SETULT: 3149 case ISD::SETUGE: 3150 if (C != 0 && isLegalICmpImmediate(C-1)) { 3151 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3152 RHS = DAG.getConstant(C-1, MVT::i32); 3153 } 3154 break; 3155 case ISD::SETLE: 3156 case ISD::SETGT: 3157 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3158 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3159 RHS = DAG.getConstant(C+1, MVT::i32); 3160 } 3161 break; 3162 case ISD::SETULE: 3163 case ISD::SETUGT: 3164 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3165 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3166 RHS = DAG.getConstant(C+1, MVT::i32); 3167 } 3168 break; 3169 } 3170 } 3171 } 3172 3173 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3174 ARMISD::NodeType CompareType; 3175 switch (CondCode) { 3176 default: 3177 CompareType = ARMISD::CMP; 3178 break; 3179 case ARMCC::EQ: 3180 case ARMCC::NE: 3181 // Uses only Z Flag 3182 CompareType = ARMISD::CMPZ; 3183 break; 3184 } 3185 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3186 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3187 } 3188 3189 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3190 SDValue 3191 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 3192 SDLoc dl) const { 3193 SDValue Cmp; 3194 if (!isFloatingPointZero(RHS)) 3195 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3196 else 3197 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3198 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3199 } 3200 3201 /// duplicateCmp - Glue values can have only one use, so this function 3202 /// duplicates a comparison node. 3203 SDValue 3204 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3205 unsigned Opc = Cmp.getOpcode(); 3206 SDLoc DL(Cmp); 3207 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3208 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3209 3210 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3211 Cmp = Cmp.getOperand(0); 3212 Opc = Cmp.getOpcode(); 3213 if (Opc == ARMISD::CMPFP) 3214 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3215 else { 3216 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3217 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3218 } 3219 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3220 } 3221 3222 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3223 SDValue Cond = Op.getOperand(0); 3224 SDValue SelectTrue = Op.getOperand(1); 3225 SDValue SelectFalse = Op.getOperand(2); 3226 SDLoc dl(Op); 3227 3228 // Convert: 3229 // 3230 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3231 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3232 // 3233 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3234 const ConstantSDNode *CMOVTrue = 3235 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3236 const ConstantSDNode *CMOVFalse = 3237 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3238 3239 if (CMOVTrue && CMOVFalse) { 3240 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3241 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3242 3243 SDValue True; 3244 SDValue False; 3245 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3246 True = SelectTrue; 3247 False = SelectFalse; 3248 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3249 True = SelectFalse; 3250 False = SelectTrue; 3251 } 3252 3253 if (True.getNode() && False.getNode()) { 3254 EVT VT = Op.getValueType(); 3255 SDValue ARMcc = Cond.getOperand(2); 3256 SDValue CCR = Cond.getOperand(3); 3257 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3258 assert(True.getValueType() == VT); 3259 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 3260 } 3261 } 3262 } 3263 3264 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 3265 // undefined bits before doing a full-word comparison with zero. 3266 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 3267 DAG.getConstant(1, Cond.getValueType())); 3268 3269 return DAG.getSelectCC(dl, Cond, 3270 DAG.getConstant(0, Cond.getValueType()), 3271 SelectTrue, SelectFalse, ISD::SETNE); 3272 } 3273 3274 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) { 3275 if (CC == ISD::SETNE) 3276 return ISD::SETEQ; 3277 return ISD::getSetCCSwappedOperands(CC); 3278 } 3279 3280 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 3281 bool &swpCmpOps, bool &swpVselOps) { 3282 // Start by selecting the GE condition code for opcodes that return true for 3283 // 'equality' 3284 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 3285 CC == ISD::SETULE) 3286 CondCode = ARMCC::GE; 3287 3288 // and GT for opcodes that return false for 'equality'. 3289 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 3290 CC == ISD::SETULT) 3291 CondCode = ARMCC::GT; 3292 3293 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 3294 // to swap the compare operands. 3295 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 3296 CC == ISD::SETULT) 3297 swpCmpOps = true; 3298 3299 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 3300 // If we have an unordered opcode, we need to swap the operands to the VSEL 3301 // instruction (effectively negating the condition). 3302 // 3303 // This also has the effect of swapping which one of 'less' or 'greater' 3304 // returns true, so we also swap the compare operands. It also switches 3305 // whether we return true for 'equality', so we compensate by picking the 3306 // opposite condition code to our original choice. 3307 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 3308 CC == ISD::SETUGT) { 3309 swpCmpOps = !swpCmpOps; 3310 swpVselOps = !swpVselOps; 3311 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 3312 } 3313 3314 // 'ordered' is 'anything but unordered', so use the VS condition code and 3315 // swap the VSEL operands. 3316 if (CC == ISD::SETO) { 3317 CondCode = ARMCC::VS; 3318 swpVselOps = true; 3319 } 3320 3321 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 3322 // code and swap the VSEL operands. 3323 if (CC == ISD::SETUNE) { 3324 CondCode = ARMCC::EQ; 3325 swpVselOps = true; 3326 } 3327 } 3328 3329 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3330 EVT VT = Op.getValueType(); 3331 SDValue LHS = Op.getOperand(0); 3332 SDValue RHS = Op.getOperand(1); 3333 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3334 SDValue TrueVal = Op.getOperand(2); 3335 SDValue FalseVal = Op.getOperand(3); 3336 SDLoc dl(Op); 3337 3338 if (LHS.getValueType() == MVT::i32) { 3339 // Try to generate VSEL on ARMv8. 3340 // The VSEL instruction can't use all the usual ARM condition 3341 // codes: it only has two bits to select the condition code, so it's 3342 // constrained to use only GE, GT, VS and EQ. 3343 // 3344 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 3345 // swap the operands of the previous compare instruction (effectively 3346 // inverting the compare condition, swapping 'less' and 'greater') and 3347 // sometimes need to swap the operands to the VSEL (which inverts the 3348 // condition in the sense of firing whenever the previous condition didn't) 3349 if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3350 TrueVal.getValueType() == MVT::f64)) { 3351 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3352 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 3353 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 3354 CC = getInverseCCForVSEL(CC); 3355 std::swap(TrueVal, FalseVal); 3356 } 3357 } 3358 3359 SDValue ARMcc; 3360 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3361 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3362 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 3363 Cmp); 3364 } 3365 3366 ARMCC::CondCodes CondCode, CondCode2; 3367 FPCCToARMCC(CC, CondCode, CondCode2); 3368 3369 // Try to generate VSEL on ARMv8. 3370 if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3371 TrueVal.getValueType() == MVT::f64)) { 3372 // We can select VMAXNM/VMINNM from a compare followed by a select with the 3373 // same operands, as follows: 3374 // c = fcmp [ogt, olt, ugt, ult] a, b 3375 // select c, a, b 3376 // We only do this in unsafe-fp-math, because signed zeros and NaNs are 3377 // handled differently than the original code sequence. 3378 if (getTargetMachine().Options.UnsafeFPMath && LHS == TrueVal && 3379 RHS == FalseVal) { 3380 if (CC == ISD::SETOGT || CC == ISD::SETUGT) 3381 return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal); 3382 if (CC == ISD::SETOLT || CC == ISD::SETULT) 3383 return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal); 3384 } 3385 3386 bool swpCmpOps = false; 3387 bool swpVselOps = false; 3388 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 3389 3390 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 3391 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 3392 if (swpCmpOps) 3393 std::swap(LHS, RHS); 3394 if (swpVselOps) 3395 std::swap(TrueVal, FalseVal); 3396 } 3397 } 3398 3399 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3400 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3401 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3402 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 3403 ARMcc, CCR, Cmp); 3404 if (CondCode2 != ARMCC::AL) { 3405 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 3406 // FIXME: Needs another CMP because flag can have but one use. 3407 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 3408 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 3409 Result, TrueVal, ARMcc2, CCR, Cmp2); 3410 } 3411 return Result; 3412 } 3413 3414 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 3415 /// to morph to an integer compare sequence. 3416 static bool canChangeToInt(SDValue Op, bool &SeenZero, 3417 const ARMSubtarget *Subtarget) { 3418 SDNode *N = Op.getNode(); 3419 if (!N->hasOneUse()) 3420 // Otherwise it requires moving the value from fp to integer registers. 3421 return false; 3422 if (!N->getNumValues()) 3423 return false; 3424 EVT VT = Op.getValueType(); 3425 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3426 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3427 // vmrs are very slow, e.g. cortex-a8. 3428 return false; 3429 3430 if (isFloatingPointZero(Op)) { 3431 SeenZero = true; 3432 return true; 3433 } 3434 return ISD::isNormalLoad(N); 3435 } 3436 3437 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3438 if (isFloatingPointZero(Op)) 3439 return DAG.getConstant(0, MVT::i32); 3440 3441 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3442 return DAG.getLoad(MVT::i32, SDLoc(Op), 3443 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3444 Ld->isVolatile(), Ld->isNonTemporal(), 3445 Ld->isInvariant(), Ld->getAlignment()); 3446 3447 llvm_unreachable("Unknown VFP cmp argument!"); 3448 } 3449 3450 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3451 SDValue &RetVal1, SDValue &RetVal2) { 3452 if (isFloatingPointZero(Op)) { 3453 RetVal1 = DAG.getConstant(0, MVT::i32); 3454 RetVal2 = DAG.getConstant(0, MVT::i32); 3455 return; 3456 } 3457 3458 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3459 SDValue Ptr = Ld->getBasePtr(); 3460 RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op), 3461 Ld->getChain(), Ptr, 3462 Ld->getPointerInfo(), 3463 Ld->isVolatile(), Ld->isNonTemporal(), 3464 Ld->isInvariant(), Ld->getAlignment()); 3465 3466 EVT PtrType = Ptr.getValueType(); 3467 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3468 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op), 3469 PtrType, Ptr, DAG.getConstant(4, PtrType)); 3470 RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op), 3471 Ld->getChain(), NewPtr, 3472 Ld->getPointerInfo().getWithOffset(4), 3473 Ld->isVolatile(), Ld->isNonTemporal(), 3474 Ld->isInvariant(), NewAlign); 3475 return; 3476 } 3477 3478 llvm_unreachable("Unknown VFP cmp argument!"); 3479 } 3480 3481 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3482 /// f32 and even f64 comparisons to integer ones. 3483 SDValue 3484 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3485 SDValue Chain = Op.getOperand(0); 3486 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3487 SDValue LHS = Op.getOperand(2); 3488 SDValue RHS = Op.getOperand(3); 3489 SDValue Dest = Op.getOperand(4); 3490 SDLoc dl(Op); 3491 3492 bool LHSSeenZero = false; 3493 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3494 bool RHSSeenZero = false; 3495 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3496 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3497 // If unsafe fp math optimization is enabled and there are no other uses of 3498 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3499 // to an integer comparison. 3500 if (CC == ISD::SETOEQ) 3501 CC = ISD::SETEQ; 3502 else if (CC == ISD::SETUNE) 3503 CC = ISD::SETNE; 3504 3505 SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32); 3506 SDValue ARMcc; 3507 if (LHS.getValueType() == MVT::f32) { 3508 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3509 bitcastf32Toi32(LHS, DAG), Mask); 3510 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3511 bitcastf32Toi32(RHS, DAG), Mask); 3512 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3513 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3514 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3515 Chain, Dest, ARMcc, CCR, Cmp); 3516 } 3517 3518 SDValue LHS1, LHS2; 3519 SDValue RHS1, RHS2; 3520 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3521 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3522 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3523 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3524 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3525 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3526 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3527 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3528 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 3529 } 3530 3531 return SDValue(); 3532 } 3533 3534 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3535 SDValue Chain = Op.getOperand(0); 3536 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3537 SDValue LHS = Op.getOperand(2); 3538 SDValue RHS = Op.getOperand(3); 3539 SDValue Dest = Op.getOperand(4); 3540 SDLoc dl(Op); 3541 3542 if (LHS.getValueType() == MVT::i32) { 3543 SDValue ARMcc; 3544 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3545 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3546 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3547 Chain, Dest, ARMcc, CCR, Cmp); 3548 } 3549 3550 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3551 3552 if (getTargetMachine().Options.UnsafeFPMath && 3553 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3554 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3555 SDValue Result = OptimizeVFPBrcond(Op, DAG); 3556 if (Result.getNode()) 3557 return Result; 3558 } 3559 3560 ARMCC::CondCodes CondCode, CondCode2; 3561 FPCCToARMCC(CC, CondCode, CondCode2); 3562 3563 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3564 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3565 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3566 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3567 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3568 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3569 if (CondCode2 != ARMCC::AL) { 3570 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 3571 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3572 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3573 } 3574 return Res; 3575 } 3576 3577 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3578 SDValue Chain = Op.getOperand(0); 3579 SDValue Table = Op.getOperand(1); 3580 SDValue Index = Op.getOperand(2); 3581 SDLoc dl(Op); 3582 3583 EVT PTy = getPointerTy(); 3584 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3585 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3586 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3587 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3588 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3589 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3590 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3591 if (Subtarget->isThumb2()) { 3592 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3593 // which does another jump to the destination. This also makes it easier 3594 // to translate it to TBB / TBH later. 3595 // FIXME: This might not work if the function is extremely large. 3596 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3597 Addr, Op.getOperand(2), JTI, UId); 3598 } 3599 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3600 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3601 MachinePointerInfo::getJumpTable(), 3602 false, false, false, 0); 3603 Chain = Addr.getValue(1); 3604 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3605 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3606 } else { 3607 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3608 MachinePointerInfo::getJumpTable(), 3609 false, false, false, 0); 3610 Chain = Addr.getValue(1); 3611 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3612 } 3613 } 3614 3615 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3616 EVT VT = Op.getValueType(); 3617 SDLoc dl(Op); 3618 3619 if (Op.getValueType().getVectorElementType() == MVT::i32) { 3620 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3621 return Op; 3622 return DAG.UnrollVectorOp(Op.getNode()); 3623 } 3624 3625 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 3626 "Invalid type for custom lowering!"); 3627 if (VT != MVT::v4i16) 3628 return DAG.UnrollVectorOp(Op.getNode()); 3629 3630 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 3631 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 3632 } 3633 3634 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3635 EVT VT = Op.getValueType(); 3636 if (VT.isVector()) 3637 return LowerVectorFP_TO_INT(Op, DAG); 3638 3639 SDLoc dl(Op); 3640 unsigned Opc; 3641 3642 switch (Op.getOpcode()) { 3643 default: llvm_unreachable("Invalid opcode!"); 3644 case ISD::FP_TO_SINT: 3645 Opc = ARMISD::FTOSI; 3646 break; 3647 case ISD::FP_TO_UINT: 3648 Opc = ARMISD::FTOUI; 3649 break; 3650 } 3651 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3652 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3653 } 3654 3655 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3656 EVT VT = Op.getValueType(); 3657 SDLoc dl(Op); 3658 3659 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3660 if (VT.getVectorElementType() == MVT::f32) 3661 return Op; 3662 return DAG.UnrollVectorOp(Op.getNode()); 3663 } 3664 3665 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3666 "Invalid type for custom lowering!"); 3667 if (VT != MVT::v4f32) 3668 return DAG.UnrollVectorOp(Op.getNode()); 3669 3670 unsigned CastOpc; 3671 unsigned Opc; 3672 switch (Op.getOpcode()) { 3673 default: llvm_unreachable("Invalid opcode!"); 3674 case ISD::SINT_TO_FP: 3675 CastOpc = ISD::SIGN_EXTEND; 3676 Opc = ISD::SINT_TO_FP; 3677 break; 3678 case ISD::UINT_TO_FP: 3679 CastOpc = ISD::ZERO_EXTEND; 3680 Opc = ISD::UINT_TO_FP; 3681 break; 3682 } 3683 3684 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3685 return DAG.getNode(Opc, dl, VT, Op); 3686 } 3687 3688 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3689 EVT VT = Op.getValueType(); 3690 if (VT.isVector()) 3691 return LowerVectorINT_TO_FP(Op, DAG); 3692 3693 SDLoc dl(Op); 3694 unsigned Opc; 3695 3696 switch (Op.getOpcode()) { 3697 default: llvm_unreachable("Invalid opcode!"); 3698 case ISD::SINT_TO_FP: 3699 Opc = ARMISD::SITOF; 3700 break; 3701 case ISD::UINT_TO_FP: 3702 Opc = ARMISD::UITOF; 3703 break; 3704 } 3705 3706 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3707 return DAG.getNode(Opc, dl, VT, Op); 3708 } 3709 3710 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3711 // Implement fcopysign with a fabs and a conditional fneg. 3712 SDValue Tmp0 = Op.getOperand(0); 3713 SDValue Tmp1 = Op.getOperand(1); 3714 SDLoc dl(Op); 3715 EVT VT = Op.getValueType(); 3716 EVT SrcVT = Tmp1.getValueType(); 3717 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3718 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3719 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3720 3721 if (UseNEON) { 3722 // Use VBSL to copy the sign bit. 3723 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3724 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3725 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3726 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3727 if (VT == MVT::f64) 3728 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3729 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3730 DAG.getConstant(32, MVT::i32)); 3731 else /*if (VT == MVT::f32)*/ 3732 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3733 if (SrcVT == MVT::f32) { 3734 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3735 if (VT == MVT::f64) 3736 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3737 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3738 DAG.getConstant(32, MVT::i32)); 3739 } else if (VT == MVT::f32) 3740 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3741 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3742 DAG.getConstant(32, MVT::i32)); 3743 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3744 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3745 3746 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3747 MVT::i32); 3748 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3749 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3750 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3751 3752 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3753 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3754 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3755 if (VT == MVT::f32) { 3756 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3757 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3758 DAG.getConstant(0, MVT::i32)); 3759 } else { 3760 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3761 } 3762 3763 return Res; 3764 } 3765 3766 // Bitcast operand 1 to i32. 3767 if (SrcVT == MVT::f64) 3768 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3769 &Tmp1, 1).getValue(1); 3770 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3771 3772 // Or in the signbit with integer operations. 3773 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3774 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3775 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3776 if (VT == MVT::f32) { 3777 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3778 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3779 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3780 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3781 } 3782 3783 // f64: Or the high part with signbit and then combine two parts. 3784 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3785 &Tmp0, 1); 3786 SDValue Lo = Tmp0.getValue(0); 3787 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3788 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3789 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3790 } 3791 3792 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3793 MachineFunction &MF = DAG.getMachineFunction(); 3794 MachineFrameInfo *MFI = MF.getFrameInfo(); 3795 MFI->setReturnAddressIsTaken(true); 3796 3797 EVT VT = Op.getValueType(); 3798 SDLoc dl(Op); 3799 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3800 if (Depth) { 3801 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3802 SDValue Offset = DAG.getConstant(4, MVT::i32); 3803 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3804 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3805 MachinePointerInfo(), false, false, false, 0); 3806 } 3807 3808 // Return LR, which contains the return address. Mark it an implicit live-in. 3809 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3810 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3811 } 3812 3813 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3814 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3815 MFI->setFrameAddressIsTaken(true); 3816 3817 EVT VT = Op.getValueType(); 3818 SDLoc dl(Op); // FIXME probably not meaningful 3819 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3820 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3821 ? ARM::R7 : ARM::R11; 3822 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3823 while (Depth--) 3824 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3825 MachinePointerInfo(), 3826 false, false, false, 0); 3827 return FrameAddr; 3828 } 3829 3830 /// Custom Expand long vector extensions, where size(DestVec) > 2*size(SrcVec), 3831 /// and size(DestVec) > 128-bits. 3832 /// This is achieved by doing the one extension from the SrcVec, splitting the 3833 /// result, extending these parts, and then concatenating these into the 3834 /// destination. 3835 static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) { 3836 SDValue Op = N->getOperand(0); 3837 EVT SrcVT = Op.getValueType(); 3838 EVT DestVT = N->getValueType(0); 3839 3840 assert(DestVT.getSizeInBits() > 128 && 3841 "Custom sext/zext expansion needs >128-bit vector."); 3842 // If this is a normal length extension, use the default expansion. 3843 if (SrcVT.getSizeInBits()*4 != DestVT.getSizeInBits() && 3844 SrcVT.getSizeInBits()*8 != DestVT.getSizeInBits()) 3845 return SDValue(); 3846 3847 SDLoc dl(N); 3848 unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); 3849 unsigned DestEltSize = DestVT.getVectorElementType().getSizeInBits(); 3850 unsigned NumElts = SrcVT.getVectorNumElements(); 3851 LLVMContext &Ctx = *DAG.getContext(); 3852 SDValue Mid, SplitLo, SplitHi, ExtLo, ExtHi; 3853 3854 EVT MidVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), 3855 NumElts); 3856 EVT SplitVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), 3857 NumElts/2); 3858 EVT ExtVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, DestEltSize), 3859 NumElts/2); 3860 3861 Mid = DAG.getNode(N->getOpcode(), dl, MidVT, Op); 3862 SplitLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, 3863 DAG.getIntPtrConstant(0)); 3864 SplitHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, 3865 DAG.getIntPtrConstant(NumElts/2)); 3866 ExtLo = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitLo); 3867 ExtHi = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitHi); 3868 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, ExtLo, ExtHi); 3869 } 3870 3871 /// ExpandBITCAST - If the target supports VFP, this function is called to 3872 /// expand a bit convert where either the source or destination type is i64 to 3873 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3874 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 3875 /// vectors), since the legalizer won't know what to do with that. 3876 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3877 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3878 SDLoc dl(N); 3879 SDValue Op = N->getOperand(0); 3880 3881 // This function is only supposed to be called for i64 types, either as the 3882 // source or destination of the bit convert. 3883 EVT SrcVT = Op.getValueType(); 3884 EVT DstVT = N->getValueType(0); 3885 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3886 "ExpandBITCAST called for non-i64 type"); 3887 3888 // Turn i64->f64 into VMOVDRR. 3889 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3890 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3891 DAG.getConstant(0, MVT::i32)); 3892 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3893 DAG.getConstant(1, MVT::i32)); 3894 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3895 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3896 } 3897 3898 // Turn f64->i64 into VMOVRRD. 3899 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3900 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3901 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3902 // Merge the pieces into a single i64 value. 3903 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3904 } 3905 3906 return SDValue(); 3907 } 3908 3909 /// getZeroVector - Returns a vector of specified type with all zero elements. 3910 /// Zero vectors are used to represent vector negation and in those cases 3911 /// will be implemented with the NEON VNEG instruction. However, VNEG does 3912 /// not support i64 elements, so sometimes the zero vectors will need to be 3913 /// explicitly constructed. Regardless, use a canonical VMOV to create the 3914 /// zero vector. 3915 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) { 3916 assert(VT.isVector() && "Expected a vector type"); 3917 // The canonical modified immediate encoding of a zero vector is....0! 3918 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3919 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3920 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3921 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3922 } 3923 3924 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3925 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 3926 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3927 SelectionDAG &DAG) const { 3928 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3929 EVT VT = Op.getValueType(); 3930 unsigned VTBits = VT.getSizeInBits(); 3931 SDLoc dl(Op); 3932 SDValue ShOpLo = Op.getOperand(0); 3933 SDValue ShOpHi = Op.getOperand(1); 3934 SDValue ShAmt = Op.getOperand(2); 3935 SDValue ARMcc; 3936 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3937 3938 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3939 3940 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3941 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3942 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3943 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3944 DAG.getConstant(VTBits, MVT::i32)); 3945 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3946 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3947 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3948 3949 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3950 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3951 ARMcc, DAG, dl); 3952 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3953 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3954 CCR, Cmp); 3955 3956 SDValue Ops[2] = { Lo, Hi }; 3957 return DAG.getMergeValues(Ops, 2, dl); 3958 } 3959 3960 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3961 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 3962 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3963 SelectionDAG &DAG) const { 3964 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3965 EVT VT = Op.getValueType(); 3966 unsigned VTBits = VT.getSizeInBits(); 3967 SDLoc dl(Op); 3968 SDValue ShOpLo = Op.getOperand(0); 3969 SDValue ShOpHi = Op.getOperand(1); 3970 SDValue ShAmt = Op.getOperand(2); 3971 SDValue ARMcc; 3972 3973 assert(Op.getOpcode() == ISD::SHL_PARTS); 3974 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3975 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3976 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3977 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3978 DAG.getConstant(VTBits, MVT::i32)); 3979 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3980 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3981 3982 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3983 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3984 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3985 ARMcc, DAG, dl); 3986 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3987 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3988 CCR, Cmp); 3989 3990 SDValue Ops[2] = { Lo, Hi }; 3991 return DAG.getMergeValues(Ops, 2, dl); 3992 } 3993 3994 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3995 SelectionDAG &DAG) const { 3996 // The rounding mode is in bits 23:22 of the FPSCR. 3997 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3998 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3999 // so that the shift + and get folded into a bitfield extract. 4000 SDLoc dl(Op); 4001 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 4002 DAG.getConstant(Intrinsic::arm_get_fpscr, 4003 MVT::i32)); 4004 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 4005 DAG.getConstant(1U << 22, MVT::i32)); 4006 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 4007 DAG.getConstant(22, MVT::i32)); 4008 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 4009 DAG.getConstant(3, MVT::i32)); 4010 } 4011 4012 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 4013 const ARMSubtarget *ST) { 4014 EVT VT = N->getValueType(0); 4015 SDLoc dl(N); 4016 4017 if (!ST->hasV6T2Ops()) 4018 return SDValue(); 4019 4020 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 4021 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 4022 } 4023 4024 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 4025 /// for each 16-bit element from operand, repeated. The basic idea is to 4026 /// leverage vcnt to get the 8-bit counts, gather and add the results. 4027 /// 4028 /// Trace for v4i16: 4029 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4030 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 4031 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 4032 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 4033 /// [b0 b1 b2 b3 b4 b5 b6 b7] 4034 /// +[b1 b0 b3 b2 b5 b4 b7 b6] 4035 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 4036 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 4037 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 4038 EVT VT = N->getValueType(0); 4039 SDLoc DL(N); 4040 4041 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4042 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 4043 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 4044 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 4045 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 4046 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 4047 } 4048 4049 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 4050 /// bit-count for each 16-bit element from the operand. We need slightly 4051 /// different sequencing for v4i16 and v8i16 to stay within NEON's available 4052 /// 64/128-bit registers. 4053 /// 4054 /// Trace for v4i16: 4055 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4056 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 4057 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 4058 /// v4i16:Extracted = [k0 k1 k2 k3 ] 4059 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 4060 EVT VT = N->getValueType(0); 4061 SDLoc DL(N); 4062 4063 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 4064 if (VT.is64BitVector()) { 4065 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 4066 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 4067 DAG.getIntPtrConstant(0)); 4068 } else { 4069 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 4070 BitCounts, DAG.getIntPtrConstant(0)); 4071 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 4072 } 4073 } 4074 4075 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 4076 /// bit-count for each 32-bit element from the operand. The idea here is 4077 /// to split the vector into 16-bit elements, leverage the 16-bit count 4078 /// routine, and then combine the results. 4079 /// 4080 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 4081 /// input = [v0 v1 ] (vi: 32-bit elements) 4082 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 4083 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 4084 /// vrev: N0 = [k1 k0 k3 k2 ] 4085 /// [k0 k1 k2 k3 ] 4086 /// N1 =+[k1 k0 k3 k2 ] 4087 /// [k0 k2 k1 k3 ] 4088 /// N2 =+[k1 k3 k0 k2 ] 4089 /// [k0 k2 k1 k3 ] 4090 /// Extended =+[k1 k3 k0 k2 ] 4091 /// [k0 k2 ] 4092 /// Extracted=+[k1 k3 ] 4093 /// 4094 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 4095 EVT VT = N->getValueType(0); 4096 SDLoc DL(N); 4097 4098 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4099 4100 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 4101 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 4102 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 4103 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 4104 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 4105 4106 if (VT.is64BitVector()) { 4107 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 4108 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 4109 DAG.getIntPtrConstant(0)); 4110 } else { 4111 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 4112 DAG.getIntPtrConstant(0)); 4113 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 4114 } 4115 } 4116 4117 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 4118 const ARMSubtarget *ST) { 4119 EVT VT = N->getValueType(0); 4120 4121 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 4122 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 4123 VT == MVT::v4i16 || VT == MVT::v8i16) && 4124 "Unexpected type for custom ctpop lowering"); 4125 4126 if (VT.getVectorElementType() == MVT::i32) 4127 return lowerCTPOP32BitElements(N, DAG); 4128 else 4129 return lowerCTPOP16BitElements(N, DAG); 4130 } 4131 4132 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 4133 const ARMSubtarget *ST) { 4134 EVT VT = N->getValueType(0); 4135 SDLoc dl(N); 4136 4137 if (!VT.isVector()) 4138 return SDValue(); 4139 4140 // Lower vector shifts on NEON to use VSHL. 4141 assert(ST->hasNEON() && "unexpected vector shift"); 4142 4143 // Left shifts translate directly to the vshiftu intrinsic. 4144 if (N->getOpcode() == ISD::SHL) 4145 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4146 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 4147 N->getOperand(0), N->getOperand(1)); 4148 4149 assert((N->getOpcode() == ISD::SRA || 4150 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 4151 4152 // NEON uses the same intrinsics for both left and right shifts. For 4153 // right shifts, the shift amounts are negative, so negate the vector of 4154 // shift amounts. 4155 EVT ShiftVT = N->getOperand(1).getValueType(); 4156 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 4157 getZeroVector(ShiftVT, DAG, dl), 4158 N->getOperand(1)); 4159 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 4160 Intrinsic::arm_neon_vshifts : 4161 Intrinsic::arm_neon_vshiftu); 4162 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4163 DAG.getConstant(vshiftInt, MVT::i32), 4164 N->getOperand(0), NegatedCount); 4165 } 4166 4167 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 4168 const ARMSubtarget *ST) { 4169 EVT VT = N->getValueType(0); 4170 SDLoc dl(N); 4171 4172 // We can get here for a node like i32 = ISD::SHL i32, i64 4173 if (VT != MVT::i64) 4174 return SDValue(); 4175 4176 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 4177 "Unknown shift to lower!"); 4178 4179 // We only lower SRA, SRL of 1 here, all others use generic lowering. 4180 if (!isa<ConstantSDNode>(N->getOperand(1)) || 4181 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 4182 return SDValue(); 4183 4184 // If we are in thumb mode, we don't have RRX. 4185 if (ST->isThumb1Only()) return SDValue(); 4186 4187 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 4188 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4189 DAG.getConstant(0, MVT::i32)); 4190 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4191 DAG.getConstant(1, MVT::i32)); 4192 4193 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 4194 // captures the result into a carry flag. 4195 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 4196 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 4197 4198 // The low part is an ARMISD::RRX operand, which shifts the carry in. 4199 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 4200 4201 // Merge the pieces into a single i64 value. 4202 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4203 } 4204 4205 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 4206 SDValue TmpOp0, TmpOp1; 4207 bool Invert = false; 4208 bool Swap = false; 4209 unsigned Opc = 0; 4210 4211 SDValue Op0 = Op.getOperand(0); 4212 SDValue Op1 = Op.getOperand(1); 4213 SDValue CC = Op.getOperand(2); 4214 EVT VT = Op.getValueType(); 4215 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4216 SDLoc dl(Op); 4217 4218 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 4219 switch (SetCCOpcode) { 4220 default: llvm_unreachable("Illegal FP comparison"); 4221 case ISD::SETUNE: 4222 case ISD::SETNE: Invert = true; // Fallthrough 4223 case ISD::SETOEQ: 4224 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4225 case ISD::SETOLT: 4226 case ISD::SETLT: Swap = true; // Fallthrough 4227 case ISD::SETOGT: 4228 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4229 case ISD::SETOLE: 4230 case ISD::SETLE: Swap = true; // Fallthrough 4231 case ISD::SETOGE: 4232 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4233 case ISD::SETUGE: Swap = true; // Fallthrough 4234 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 4235 case ISD::SETUGT: Swap = true; // Fallthrough 4236 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 4237 case ISD::SETUEQ: Invert = true; // Fallthrough 4238 case ISD::SETONE: 4239 // Expand this to (OLT | OGT). 4240 TmpOp0 = Op0; 4241 TmpOp1 = Op1; 4242 Opc = ISD::OR; 4243 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 4244 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 4245 break; 4246 case ISD::SETUO: Invert = true; // Fallthrough 4247 case ISD::SETO: 4248 // Expand this to (OLT | OGE). 4249 TmpOp0 = Op0; 4250 TmpOp1 = Op1; 4251 Opc = ISD::OR; 4252 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 4253 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 4254 break; 4255 } 4256 } else { 4257 // Integer comparisons. 4258 switch (SetCCOpcode) { 4259 default: llvm_unreachable("Illegal integer comparison"); 4260 case ISD::SETNE: Invert = true; 4261 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4262 case ISD::SETLT: Swap = true; 4263 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4264 case ISD::SETLE: Swap = true; 4265 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4266 case ISD::SETULT: Swap = true; 4267 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 4268 case ISD::SETULE: Swap = true; 4269 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 4270 } 4271 4272 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 4273 if (Opc == ARMISD::VCEQ) { 4274 4275 SDValue AndOp; 4276 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4277 AndOp = Op0; 4278 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 4279 AndOp = Op1; 4280 4281 // Ignore bitconvert. 4282 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 4283 AndOp = AndOp.getOperand(0); 4284 4285 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 4286 Opc = ARMISD::VTST; 4287 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 4288 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 4289 Invert = !Invert; 4290 } 4291 } 4292 } 4293 4294 if (Swap) 4295 std::swap(Op0, Op1); 4296 4297 // If one of the operands is a constant vector zero, attempt to fold the 4298 // comparison to a specialized compare-against-zero form. 4299 SDValue SingleOp; 4300 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4301 SingleOp = Op0; 4302 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 4303 if (Opc == ARMISD::VCGE) 4304 Opc = ARMISD::VCLEZ; 4305 else if (Opc == ARMISD::VCGT) 4306 Opc = ARMISD::VCLTZ; 4307 SingleOp = Op1; 4308 } 4309 4310 SDValue Result; 4311 if (SingleOp.getNode()) { 4312 switch (Opc) { 4313 case ARMISD::VCEQ: 4314 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 4315 case ARMISD::VCGE: 4316 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 4317 case ARMISD::VCLEZ: 4318 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 4319 case ARMISD::VCGT: 4320 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 4321 case ARMISD::VCLTZ: 4322 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 4323 default: 4324 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4325 } 4326 } else { 4327 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4328 } 4329 4330 if (Invert) 4331 Result = DAG.getNOT(dl, Result, VT); 4332 4333 return Result; 4334 } 4335 4336 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 4337 /// valid vector constant for a NEON instruction with a "modified immediate" 4338 /// operand (e.g., VMOV). If so, return the encoded value. 4339 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 4340 unsigned SplatBitSize, SelectionDAG &DAG, 4341 EVT &VT, bool is128Bits, NEONModImmType type) { 4342 unsigned OpCmode, Imm; 4343 4344 // SplatBitSize is set to the smallest size that splats the vector, so a 4345 // zero vector will always have SplatBitSize == 8. However, NEON modified 4346 // immediate instructions others than VMOV do not support the 8-bit encoding 4347 // of a zero vector, and the default encoding of zero is supposed to be the 4348 // 32-bit version. 4349 if (SplatBits == 0) 4350 SplatBitSize = 32; 4351 4352 switch (SplatBitSize) { 4353 case 8: 4354 if (type != VMOVModImm) 4355 return SDValue(); 4356 // Any 1-byte value is OK. Op=0, Cmode=1110. 4357 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 4358 OpCmode = 0xe; 4359 Imm = SplatBits; 4360 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 4361 break; 4362 4363 case 16: 4364 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 4365 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 4366 if ((SplatBits & ~0xff) == 0) { 4367 // Value = 0x00nn: Op=x, Cmode=100x. 4368 OpCmode = 0x8; 4369 Imm = SplatBits; 4370 break; 4371 } 4372 if ((SplatBits & ~0xff00) == 0) { 4373 // Value = 0xnn00: Op=x, Cmode=101x. 4374 OpCmode = 0xa; 4375 Imm = SplatBits >> 8; 4376 break; 4377 } 4378 return SDValue(); 4379 4380 case 32: 4381 // NEON's 32-bit VMOV supports splat values where: 4382 // * only one byte is nonzero, or 4383 // * the least significant byte is 0xff and the second byte is nonzero, or 4384 // * the least significant 2 bytes are 0xff and the third is nonzero. 4385 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 4386 if ((SplatBits & ~0xff) == 0) { 4387 // Value = 0x000000nn: Op=x, Cmode=000x. 4388 OpCmode = 0; 4389 Imm = SplatBits; 4390 break; 4391 } 4392 if ((SplatBits & ~0xff00) == 0) { 4393 // Value = 0x0000nn00: Op=x, Cmode=001x. 4394 OpCmode = 0x2; 4395 Imm = SplatBits >> 8; 4396 break; 4397 } 4398 if ((SplatBits & ~0xff0000) == 0) { 4399 // Value = 0x00nn0000: Op=x, Cmode=010x. 4400 OpCmode = 0x4; 4401 Imm = SplatBits >> 16; 4402 break; 4403 } 4404 if ((SplatBits & ~0xff000000) == 0) { 4405 // Value = 0xnn000000: Op=x, Cmode=011x. 4406 OpCmode = 0x6; 4407 Imm = SplatBits >> 24; 4408 break; 4409 } 4410 4411 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 4412 if (type == OtherModImm) return SDValue(); 4413 4414 if ((SplatBits & ~0xffff) == 0 && 4415 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 4416 // Value = 0x0000nnff: Op=x, Cmode=1100. 4417 OpCmode = 0xc; 4418 Imm = SplatBits >> 8; 4419 SplatBits |= 0xff; 4420 break; 4421 } 4422 4423 if ((SplatBits & ~0xffffff) == 0 && 4424 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 4425 // Value = 0x00nnffff: Op=x, Cmode=1101. 4426 OpCmode = 0xd; 4427 Imm = SplatBits >> 16; 4428 SplatBits |= 0xffff; 4429 break; 4430 } 4431 4432 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 4433 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 4434 // VMOV.I32. A (very) minor optimization would be to replicate the value 4435 // and fall through here to test for a valid 64-bit splat. But, then the 4436 // caller would also need to check and handle the change in size. 4437 return SDValue(); 4438 4439 case 64: { 4440 if (type != VMOVModImm) 4441 return SDValue(); 4442 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 4443 uint64_t BitMask = 0xff; 4444 uint64_t Val = 0; 4445 unsigned ImmMask = 1; 4446 Imm = 0; 4447 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 4448 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 4449 Val |= BitMask; 4450 Imm |= ImmMask; 4451 } else if ((SplatBits & BitMask) != 0) { 4452 return SDValue(); 4453 } 4454 BitMask <<= 8; 4455 ImmMask <<= 1; 4456 } 4457 // Op=1, Cmode=1110. 4458 OpCmode = 0x1e; 4459 SplatBits = Val; 4460 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 4461 break; 4462 } 4463 4464 default: 4465 llvm_unreachable("unexpected size for isNEONModifiedImm"); 4466 } 4467 4468 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 4469 return DAG.getTargetConstant(EncodedVal, MVT::i32); 4470 } 4471 4472 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 4473 const ARMSubtarget *ST) const { 4474 if (!ST->hasVFP3()) 4475 return SDValue(); 4476 4477 bool IsDouble = Op.getValueType() == MVT::f64; 4478 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 4479 4480 // Try splatting with a VMOV.f32... 4481 APFloat FPVal = CFP->getValueAPF(); 4482 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 4483 4484 if (ImmVal != -1) { 4485 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 4486 // We have code in place to select a valid ConstantFP already, no need to 4487 // do any mangling. 4488 return Op; 4489 } 4490 4491 // It's a float and we are trying to use NEON operations where 4492 // possible. Lower it to a splat followed by an extract. 4493 SDLoc DL(Op); 4494 SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); 4495 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 4496 NewVal); 4497 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 4498 DAG.getConstant(0, MVT::i32)); 4499 } 4500 4501 // The rest of our options are NEON only, make sure that's allowed before 4502 // proceeding.. 4503 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 4504 return SDValue(); 4505 4506 EVT VMovVT; 4507 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 4508 4509 // It wouldn't really be worth bothering for doubles except for one very 4510 // important value, which does happen to match: 0.0. So make sure we don't do 4511 // anything stupid. 4512 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 4513 return SDValue(); 4514 4515 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 4516 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT, 4517 false, VMOVModImm); 4518 if (NewVal != SDValue()) { 4519 SDLoc DL(Op); 4520 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 4521 NewVal); 4522 if (IsDouble) 4523 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 4524 4525 // It's a float: cast and extract a vector element. 4526 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4527 VecConstant); 4528 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4529 DAG.getConstant(0, MVT::i32)); 4530 } 4531 4532 // Finally, try a VMVN.i32 4533 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT, 4534 false, VMVNModImm); 4535 if (NewVal != SDValue()) { 4536 SDLoc DL(Op); 4537 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 4538 4539 if (IsDouble) 4540 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 4541 4542 // It's a float: cast and extract a vector element. 4543 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4544 VecConstant); 4545 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4546 DAG.getConstant(0, MVT::i32)); 4547 } 4548 4549 return SDValue(); 4550 } 4551 4552 // check if an VEXT instruction can handle the shuffle mask when the 4553 // vector sources of the shuffle are the same. 4554 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 4555 unsigned NumElts = VT.getVectorNumElements(); 4556 4557 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4558 if (M[0] < 0) 4559 return false; 4560 4561 Imm = M[0]; 4562 4563 // If this is a VEXT shuffle, the immediate value is the index of the first 4564 // element. The other shuffle indices must be the successive elements after 4565 // the first one. 4566 unsigned ExpectedElt = Imm; 4567 for (unsigned i = 1; i < NumElts; ++i) { 4568 // Increment the expected index. If it wraps around, just follow it 4569 // back to index zero and keep going. 4570 ++ExpectedElt; 4571 if (ExpectedElt == NumElts) 4572 ExpectedElt = 0; 4573 4574 if (M[i] < 0) continue; // ignore UNDEF indices 4575 if (ExpectedElt != static_cast<unsigned>(M[i])) 4576 return false; 4577 } 4578 4579 return true; 4580 } 4581 4582 4583 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 4584 bool &ReverseVEXT, unsigned &Imm) { 4585 unsigned NumElts = VT.getVectorNumElements(); 4586 ReverseVEXT = false; 4587 4588 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4589 if (M[0] < 0) 4590 return false; 4591 4592 Imm = M[0]; 4593 4594 // If this is a VEXT shuffle, the immediate value is the index of the first 4595 // element. The other shuffle indices must be the successive elements after 4596 // the first one. 4597 unsigned ExpectedElt = Imm; 4598 for (unsigned i = 1; i < NumElts; ++i) { 4599 // Increment the expected index. If it wraps around, it may still be 4600 // a VEXT but the source vectors must be swapped. 4601 ExpectedElt += 1; 4602 if (ExpectedElt == NumElts * 2) { 4603 ExpectedElt = 0; 4604 ReverseVEXT = true; 4605 } 4606 4607 if (M[i] < 0) continue; // ignore UNDEF indices 4608 if (ExpectedElt != static_cast<unsigned>(M[i])) 4609 return false; 4610 } 4611 4612 // Adjust the index value if the source operands will be swapped. 4613 if (ReverseVEXT) 4614 Imm -= NumElts; 4615 4616 return true; 4617 } 4618 4619 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 4620 /// instruction with the specified blocksize. (The order of the elements 4621 /// within each block of the vector is reversed.) 4622 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4623 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 4624 "Only possible block sizes for VREV are: 16, 32, 64"); 4625 4626 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4627 if (EltSz == 64) 4628 return false; 4629 4630 unsigned NumElts = VT.getVectorNumElements(); 4631 unsigned BlockElts = M[0] + 1; 4632 // If the first shuffle index is UNDEF, be optimistic. 4633 if (M[0] < 0) 4634 BlockElts = BlockSize / EltSz; 4635 4636 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4637 return false; 4638 4639 for (unsigned i = 0; i < NumElts; ++i) { 4640 if (M[i] < 0) continue; // ignore UNDEF indices 4641 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 4642 return false; 4643 } 4644 4645 return true; 4646 } 4647 4648 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 4649 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 4650 // range, then 0 is placed into the resulting vector. So pretty much any mask 4651 // of 8 elements can work here. 4652 return VT == MVT::v8i8 && M.size() == 8; 4653 } 4654 4655 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4656 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4657 if (EltSz == 64) 4658 return false; 4659 4660 unsigned NumElts = VT.getVectorNumElements(); 4661 WhichResult = (M[0] == 0 ? 0 : 1); 4662 for (unsigned i = 0; i < NumElts; i += 2) { 4663 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4664 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 4665 return false; 4666 } 4667 return true; 4668 } 4669 4670 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 4671 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4672 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 4673 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4674 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4675 if (EltSz == 64) 4676 return false; 4677 4678 unsigned NumElts = VT.getVectorNumElements(); 4679 WhichResult = (M[0] == 0 ? 0 : 1); 4680 for (unsigned i = 0; i < NumElts; i += 2) { 4681 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4682 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 4683 return false; 4684 } 4685 return true; 4686 } 4687 4688 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4689 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4690 if (EltSz == 64) 4691 return false; 4692 4693 unsigned NumElts = VT.getVectorNumElements(); 4694 WhichResult = (M[0] == 0 ? 0 : 1); 4695 for (unsigned i = 0; i != NumElts; ++i) { 4696 if (M[i] < 0) continue; // ignore UNDEF indices 4697 if ((unsigned) M[i] != 2 * i + WhichResult) 4698 return false; 4699 } 4700 4701 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4702 if (VT.is64BitVector() && EltSz == 32) 4703 return false; 4704 4705 return true; 4706 } 4707 4708 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 4709 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4710 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 4711 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4712 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4713 if (EltSz == 64) 4714 return false; 4715 4716 unsigned Half = VT.getVectorNumElements() / 2; 4717 WhichResult = (M[0] == 0 ? 0 : 1); 4718 for (unsigned j = 0; j != 2; ++j) { 4719 unsigned Idx = WhichResult; 4720 for (unsigned i = 0; i != Half; ++i) { 4721 int MIdx = M[i + j * Half]; 4722 if (MIdx >= 0 && (unsigned) MIdx != Idx) 4723 return false; 4724 Idx += 2; 4725 } 4726 } 4727 4728 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4729 if (VT.is64BitVector() && EltSz == 32) 4730 return false; 4731 4732 return true; 4733 } 4734 4735 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4736 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4737 if (EltSz == 64) 4738 return false; 4739 4740 unsigned NumElts = VT.getVectorNumElements(); 4741 WhichResult = (M[0] == 0 ? 0 : 1); 4742 unsigned Idx = WhichResult * NumElts / 2; 4743 for (unsigned i = 0; i != NumElts; i += 2) { 4744 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4745 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 4746 return false; 4747 Idx += 1; 4748 } 4749 4750 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4751 if (VT.is64BitVector() && EltSz == 32) 4752 return false; 4753 4754 return true; 4755 } 4756 4757 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 4758 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4759 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 4760 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4761 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4762 if (EltSz == 64) 4763 return false; 4764 4765 unsigned NumElts = VT.getVectorNumElements(); 4766 WhichResult = (M[0] == 0 ? 0 : 1); 4767 unsigned Idx = WhichResult * NumElts / 2; 4768 for (unsigned i = 0; i != NumElts; i += 2) { 4769 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4770 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 4771 return false; 4772 Idx += 1; 4773 } 4774 4775 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4776 if (VT.is64BitVector() && EltSz == 32) 4777 return false; 4778 4779 return true; 4780 } 4781 4782 /// \return true if this is a reverse operation on an vector. 4783 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 4784 unsigned NumElts = VT.getVectorNumElements(); 4785 // Make sure the mask has the right size. 4786 if (NumElts != M.size()) 4787 return false; 4788 4789 // Look for <15, ..., 3, -1, 1, 0>. 4790 for (unsigned i = 0; i != NumElts; ++i) 4791 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 4792 return false; 4793 4794 return true; 4795 } 4796 4797 // If N is an integer constant that can be moved into a register in one 4798 // instruction, return an SDValue of such a constant (will become a MOV 4799 // instruction). Otherwise return null. 4800 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 4801 const ARMSubtarget *ST, SDLoc dl) { 4802 uint64_t Val; 4803 if (!isa<ConstantSDNode>(N)) 4804 return SDValue(); 4805 Val = cast<ConstantSDNode>(N)->getZExtValue(); 4806 4807 if (ST->isThumb1Only()) { 4808 if (Val <= 255 || ~Val <= 255) 4809 return DAG.getConstant(Val, MVT::i32); 4810 } else { 4811 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 4812 return DAG.getConstant(Val, MVT::i32); 4813 } 4814 return SDValue(); 4815 } 4816 4817 // If this is a case we can't handle, return null and let the default 4818 // expansion code take care of it. 4819 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 4820 const ARMSubtarget *ST) const { 4821 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4822 SDLoc dl(Op); 4823 EVT VT = Op.getValueType(); 4824 4825 APInt SplatBits, SplatUndef; 4826 unsigned SplatBitSize; 4827 bool HasAnyUndefs; 4828 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4829 if (SplatBitSize <= 64) { 4830 // Check if an immediate VMOV works. 4831 EVT VmovVT; 4832 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4833 SplatUndef.getZExtValue(), SplatBitSize, 4834 DAG, VmovVT, VT.is128BitVector(), 4835 VMOVModImm); 4836 if (Val.getNode()) { 4837 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 4838 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4839 } 4840 4841 // Try an immediate VMVN. 4842 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 4843 Val = isNEONModifiedImm(NegatedImm, 4844 SplatUndef.getZExtValue(), SplatBitSize, 4845 DAG, VmovVT, VT.is128BitVector(), 4846 VMVNModImm); 4847 if (Val.getNode()) { 4848 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 4849 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4850 } 4851 4852 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 4853 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 4854 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 4855 if (ImmVal != -1) { 4856 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 4857 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 4858 } 4859 } 4860 } 4861 } 4862 4863 // Scan through the operands to see if only one value is used. 4864 // 4865 // As an optimisation, even if more than one value is used it may be more 4866 // profitable to splat with one value then change some lanes. 4867 // 4868 // Heuristically we decide to do this if the vector has a "dominant" value, 4869 // defined as splatted to more than half of the lanes. 4870 unsigned NumElts = VT.getVectorNumElements(); 4871 bool isOnlyLowElement = true; 4872 bool usesOnlyOneValue = true; 4873 bool hasDominantValue = false; 4874 bool isConstant = true; 4875 4876 // Map of the number of times a particular SDValue appears in the 4877 // element list. 4878 DenseMap<SDValue, unsigned> ValueCounts; 4879 SDValue Value; 4880 for (unsigned i = 0; i < NumElts; ++i) { 4881 SDValue V = Op.getOperand(i); 4882 if (V.getOpcode() == ISD::UNDEF) 4883 continue; 4884 if (i > 0) 4885 isOnlyLowElement = false; 4886 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4887 isConstant = false; 4888 4889 ValueCounts.insert(std::make_pair(V, 0)); 4890 unsigned &Count = ValueCounts[V]; 4891 4892 // Is this value dominant? (takes up more than half of the lanes) 4893 if (++Count > (NumElts / 2)) { 4894 hasDominantValue = true; 4895 Value = V; 4896 } 4897 } 4898 if (ValueCounts.size() != 1) 4899 usesOnlyOneValue = false; 4900 if (!Value.getNode() && ValueCounts.size() > 0) 4901 Value = ValueCounts.begin()->first; 4902 4903 if (ValueCounts.size() == 0) 4904 return DAG.getUNDEF(VT); 4905 4906 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 4907 // Keep going if we are hitting this case. 4908 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 4909 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4910 4911 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4912 4913 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4914 // i32 and try again. 4915 if (hasDominantValue && EltSize <= 32) { 4916 if (!isConstant) { 4917 SDValue N; 4918 4919 // If we are VDUPing a value that comes directly from a vector, that will 4920 // cause an unnecessary move to and from a GPR, where instead we could 4921 // just use VDUPLANE. We can only do this if the lane being extracted 4922 // is at a constant index, as the VDUP from lane instructions only have 4923 // constant-index forms. 4924 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4925 isa<ConstantSDNode>(Value->getOperand(1))) { 4926 // We need to create a new undef vector to use for the VDUPLANE if the 4927 // size of the vector from which we get the value is different than the 4928 // size of the vector that we need to create. We will insert the element 4929 // such that the register coalescer will remove unnecessary copies. 4930 if (VT != Value->getOperand(0).getValueType()) { 4931 ConstantSDNode *constIndex; 4932 constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)); 4933 assert(constIndex && "The index is not a constant!"); 4934 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 4935 VT.getVectorNumElements(); 4936 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4937 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 4938 Value, DAG.getConstant(index, MVT::i32)), 4939 DAG.getConstant(index, MVT::i32)); 4940 } else 4941 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4942 Value->getOperand(0), Value->getOperand(1)); 4943 } else 4944 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 4945 4946 if (!usesOnlyOneValue) { 4947 // The dominant value was splatted as 'N', but we now have to insert 4948 // all differing elements. 4949 for (unsigned I = 0; I < NumElts; ++I) { 4950 if (Op.getOperand(I) == Value) 4951 continue; 4952 SmallVector<SDValue, 3> Ops; 4953 Ops.push_back(N); 4954 Ops.push_back(Op.getOperand(I)); 4955 Ops.push_back(DAG.getConstant(I, MVT::i32)); 4956 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3); 4957 } 4958 } 4959 return N; 4960 } 4961 if (VT.getVectorElementType().isFloatingPoint()) { 4962 SmallVector<SDValue, 8> Ops; 4963 for (unsigned i = 0; i < NumElts; ++i) 4964 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4965 Op.getOperand(i))); 4966 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4967 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4968 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4969 if (Val.getNode()) 4970 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4971 } 4972 if (usesOnlyOneValue) { 4973 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4974 if (isConstant && Val.getNode()) 4975 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4976 } 4977 } 4978 4979 // If all elements are constants and the case above didn't get hit, fall back 4980 // to the default expansion, which will generate a load from the constant 4981 // pool. 4982 if (isConstant) 4983 return SDValue(); 4984 4985 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4986 if (NumElts >= 4) { 4987 SDValue shuffle = ReconstructShuffle(Op, DAG); 4988 if (shuffle != SDValue()) 4989 return shuffle; 4990 } 4991 4992 // Vectors with 32- or 64-bit elements can be built by directly assigning 4993 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4994 // will be legalized. 4995 if (EltSize >= 32) { 4996 // Do the expansion with floating-point types, since that is what the VFP 4997 // registers are defined to use, and since i64 is not legal. 4998 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4999 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5000 SmallVector<SDValue, 8> Ops; 5001 for (unsigned i = 0; i < NumElts; ++i) 5002 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 5003 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 5004 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5005 } 5006 5007 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 5008 // know the default expansion would otherwise fall back on something even 5009 // worse. For a vector with one or two non-undef values, that's 5010 // scalar_to_vector for the elements followed by a shuffle (provided the 5011 // shuffle is valid for the target) and materialization element by element 5012 // on the stack followed by a load for everything else. 5013 if (!isConstant && !usesOnlyOneValue) { 5014 SDValue Vec = DAG.getUNDEF(VT); 5015 for (unsigned i = 0 ; i < NumElts; ++i) { 5016 SDValue V = Op.getOperand(i); 5017 if (V.getOpcode() == ISD::UNDEF) 5018 continue; 5019 SDValue LaneIdx = DAG.getConstant(i, MVT::i32); 5020 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 5021 } 5022 return Vec; 5023 } 5024 5025 return SDValue(); 5026 } 5027 5028 // Gather data to see if the operation can be modelled as a 5029 // shuffle in combination with VEXTs. 5030 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 5031 SelectionDAG &DAG) const { 5032 SDLoc dl(Op); 5033 EVT VT = Op.getValueType(); 5034 unsigned NumElts = VT.getVectorNumElements(); 5035 5036 SmallVector<SDValue, 2> SourceVecs; 5037 SmallVector<unsigned, 2> MinElts; 5038 SmallVector<unsigned, 2> MaxElts; 5039 5040 for (unsigned i = 0; i < NumElts; ++i) { 5041 SDValue V = Op.getOperand(i); 5042 if (V.getOpcode() == ISD::UNDEF) 5043 continue; 5044 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 5045 // A shuffle can only come from building a vector from various 5046 // elements of other vectors. 5047 return SDValue(); 5048 } else if (V.getOperand(0).getValueType().getVectorElementType() != 5049 VT.getVectorElementType()) { 5050 // This code doesn't know how to handle shuffles where the vector 5051 // element types do not match (this happens because type legalization 5052 // promotes the return type of EXTRACT_VECTOR_ELT). 5053 // FIXME: It might be appropriate to extend this code to handle 5054 // mismatched types. 5055 return SDValue(); 5056 } 5057 5058 // Record this extraction against the appropriate vector if possible... 5059 SDValue SourceVec = V.getOperand(0); 5060 // If the element number isn't a constant, we can't effectively 5061 // analyze what's going on. 5062 if (!isa<ConstantSDNode>(V.getOperand(1))) 5063 return SDValue(); 5064 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 5065 bool FoundSource = false; 5066 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 5067 if (SourceVecs[j] == SourceVec) { 5068 if (MinElts[j] > EltNo) 5069 MinElts[j] = EltNo; 5070 if (MaxElts[j] < EltNo) 5071 MaxElts[j] = EltNo; 5072 FoundSource = true; 5073 break; 5074 } 5075 } 5076 5077 // Or record a new source if not... 5078 if (!FoundSource) { 5079 SourceVecs.push_back(SourceVec); 5080 MinElts.push_back(EltNo); 5081 MaxElts.push_back(EltNo); 5082 } 5083 } 5084 5085 // Currently only do something sane when at most two source vectors 5086 // involved. 5087 if (SourceVecs.size() > 2) 5088 return SDValue(); 5089 5090 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 5091 int VEXTOffsets[2] = {0, 0}; 5092 5093 // This loop extracts the usage patterns of the source vectors 5094 // and prepares appropriate SDValues for a shuffle if possible. 5095 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 5096 if (SourceVecs[i].getValueType() == VT) { 5097 // No VEXT necessary 5098 ShuffleSrcs[i] = SourceVecs[i]; 5099 VEXTOffsets[i] = 0; 5100 continue; 5101 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 5102 // It probably isn't worth padding out a smaller vector just to 5103 // break it down again in a shuffle. 5104 return SDValue(); 5105 } 5106 5107 // Since only 64-bit and 128-bit vectors are legal on ARM and 5108 // we've eliminated the other cases... 5109 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 5110 "unexpected vector sizes in ReconstructShuffle"); 5111 5112 if (MaxElts[i] - MinElts[i] >= NumElts) { 5113 // Span too large for a VEXT to cope 5114 return SDValue(); 5115 } 5116 5117 if (MinElts[i] >= NumElts) { 5118 // The extraction can just take the second half 5119 VEXTOffsets[i] = NumElts; 5120 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5121 SourceVecs[i], 5122 DAG.getIntPtrConstant(NumElts)); 5123 } else if (MaxElts[i] < NumElts) { 5124 // The extraction can just take the first half 5125 VEXTOffsets[i] = 0; 5126 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5127 SourceVecs[i], 5128 DAG.getIntPtrConstant(0)); 5129 } else { 5130 // An actual VEXT is needed 5131 VEXTOffsets[i] = MinElts[i]; 5132 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5133 SourceVecs[i], 5134 DAG.getIntPtrConstant(0)); 5135 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5136 SourceVecs[i], 5137 DAG.getIntPtrConstant(NumElts)); 5138 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 5139 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 5140 } 5141 } 5142 5143 SmallVector<int, 8> Mask; 5144 5145 for (unsigned i = 0; i < NumElts; ++i) { 5146 SDValue Entry = Op.getOperand(i); 5147 if (Entry.getOpcode() == ISD::UNDEF) { 5148 Mask.push_back(-1); 5149 continue; 5150 } 5151 5152 SDValue ExtractVec = Entry.getOperand(0); 5153 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 5154 .getOperand(1))->getSExtValue(); 5155 if (ExtractVec == SourceVecs[0]) { 5156 Mask.push_back(ExtractElt - VEXTOffsets[0]); 5157 } else { 5158 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 5159 } 5160 } 5161 5162 // Final check before we try to produce nonsense... 5163 if (isShuffleMaskLegal(Mask, VT)) 5164 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 5165 &Mask[0]); 5166 5167 return SDValue(); 5168 } 5169 5170 /// isShuffleMaskLegal - Targets can use this to indicate that they only 5171 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5172 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5173 /// are assumed to be legal. 5174 bool 5175 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 5176 EVT VT) const { 5177 if (VT.getVectorNumElements() == 4 && 5178 (VT.is128BitVector() || VT.is64BitVector())) { 5179 unsigned PFIndexes[4]; 5180 for (unsigned i = 0; i != 4; ++i) { 5181 if (M[i] < 0) 5182 PFIndexes[i] = 8; 5183 else 5184 PFIndexes[i] = M[i]; 5185 } 5186 5187 // Compute the index in the perfect shuffle table. 5188 unsigned PFTableIndex = 5189 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5190 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5191 unsigned Cost = (PFEntry >> 30); 5192 5193 if (Cost <= 4) 5194 return true; 5195 } 5196 5197 bool ReverseVEXT; 5198 unsigned Imm, WhichResult; 5199 5200 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5201 return (EltSize >= 32 || 5202 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 5203 isVREVMask(M, VT, 64) || 5204 isVREVMask(M, VT, 32) || 5205 isVREVMask(M, VT, 16) || 5206 isVEXTMask(M, VT, ReverseVEXT, Imm) || 5207 isVTBLMask(M, VT) || 5208 isVTRNMask(M, VT, WhichResult) || 5209 isVUZPMask(M, VT, WhichResult) || 5210 isVZIPMask(M, VT, WhichResult) || 5211 isVTRN_v_undef_Mask(M, VT, WhichResult) || 5212 isVUZP_v_undef_Mask(M, VT, WhichResult) || 5213 isVZIP_v_undef_Mask(M, VT, WhichResult) || 5214 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 5215 } 5216 5217 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5218 /// the specified operations to build the shuffle. 5219 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5220 SDValue RHS, SelectionDAG &DAG, 5221 SDLoc dl) { 5222 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5223 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5224 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5225 5226 enum { 5227 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5228 OP_VREV, 5229 OP_VDUP0, 5230 OP_VDUP1, 5231 OP_VDUP2, 5232 OP_VDUP3, 5233 OP_VEXT1, 5234 OP_VEXT2, 5235 OP_VEXT3, 5236 OP_VUZPL, // VUZP, left result 5237 OP_VUZPR, // VUZP, right result 5238 OP_VZIPL, // VZIP, left result 5239 OP_VZIPR, // VZIP, right result 5240 OP_VTRNL, // VTRN, left result 5241 OP_VTRNR // VTRN, right result 5242 }; 5243 5244 if (OpNum == OP_COPY) { 5245 if (LHSID == (1*9+2)*9+3) return LHS; 5246 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5247 return RHS; 5248 } 5249 5250 SDValue OpLHS, OpRHS; 5251 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5252 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5253 EVT VT = OpLHS.getValueType(); 5254 5255 switch (OpNum) { 5256 default: llvm_unreachable("Unknown shuffle opcode!"); 5257 case OP_VREV: 5258 // VREV divides the vector in half and swaps within the half. 5259 if (VT.getVectorElementType() == MVT::i32 || 5260 VT.getVectorElementType() == MVT::f32) 5261 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 5262 // vrev <4 x i16> -> VREV32 5263 if (VT.getVectorElementType() == MVT::i16) 5264 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 5265 // vrev <4 x i8> -> VREV16 5266 assert(VT.getVectorElementType() == MVT::i8); 5267 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 5268 case OP_VDUP0: 5269 case OP_VDUP1: 5270 case OP_VDUP2: 5271 case OP_VDUP3: 5272 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5273 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 5274 case OP_VEXT1: 5275 case OP_VEXT2: 5276 case OP_VEXT3: 5277 return DAG.getNode(ARMISD::VEXT, dl, VT, 5278 OpLHS, OpRHS, 5279 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 5280 case OP_VUZPL: 5281 case OP_VUZPR: 5282 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5283 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 5284 case OP_VZIPL: 5285 case OP_VZIPR: 5286 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5287 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 5288 case OP_VTRNL: 5289 case OP_VTRNR: 5290 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5291 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 5292 } 5293 } 5294 5295 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 5296 ArrayRef<int> ShuffleMask, 5297 SelectionDAG &DAG) { 5298 // Check to see if we can use the VTBL instruction. 5299 SDValue V1 = Op.getOperand(0); 5300 SDValue V2 = Op.getOperand(1); 5301 SDLoc DL(Op); 5302 5303 SmallVector<SDValue, 8> VTBLMask; 5304 for (ArrayRef<int>::iterator 5305 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 5306 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 5307 5308 if (V2.getNode()->getOpcode() == ISD::UNDEF) 5309 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 5310 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 5311 &VTBLMask[0], 8)); 5312 5313 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 5314 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 5315 &VTBLMask[0], 8)); 5316 } 5317 5318 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 5319 SelectionDAG &DAG) { 5320 SDLoc DL(Op); 5321 SDValue OpLHS = Op.getOperand(0); 5322 EVT VT = OpLHS.getValueType(); 5323 5324 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 5325 "Expect an v8i16/v16i8 type"); 5326 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 5327 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 5328 // extract the first 8 bytes into the top double word and the last 8 bytes 5329 // into the bottom double word. The v8i16 case is similar. 5330 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 5331 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 5332 DAG.getConstant(ExtractNum, MVT::i32)); 5333 } 5334 5335 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 5336 SDValue V1 = Op.getOperand(0); 5337 SDValue V2 = Op.getOperand(1); 5338 SDLoc dl(Op); 5339 EVT VT = Op.getValueType(); 5340 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 5341 5342 // Convert shuffles that are directly supported on NEON to target-specific 5343 // DAG nodes, instead of keeping them as shuffles and matching them again 5344 // during code selection. This is more efficient and avoids the possibility 5345 // of inconsistencies between legalization and selection. 5346 // FIXME: floating-point vectors should be canonicalized to integer vectors 5347 // of the same time so that they get CSEd properly. 5348 ArrayRef<int> ShuffleMask = SVN->getMask(); 5349 5350 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5351 if (EltSize <= 32) { 5352 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 5353 int Lane = SVN->getSplatIndex(); 5354 // If this is undef splat, generate it via "just" vdup, if possible. 5355 if (Lane == -1) Lane = 0; 5356 5357 // Test if V1 is a SCALAR_TO_VECTOR. 5358 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5359 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5360 } 5361 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 5362 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 5363 // reaches it). 5364 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 5365 !isa<ConstantSDNode>(V1.getOperand(0))) { 5366 bool IsScalarToVector = true; 5367 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 5368 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 5369 IsScalarToVector = false; 5370 break; 5371 } 5372 if (IsScalarToVector) 5373 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5374 } 5375 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 5376 DAG.getConstant(Lane, MVT::i32)); 5377 } 5378 5379 bool ReverseVEXT; 5380 unsigned Imm; 5381 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 5382 if (ReverseVEXT) 5383 std::swap(V1, V2); 5384 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 5385 DAG.getConstant(Imm, MVT::i32)); 5386 } 5387 5388 if (isVREVMask(ShuffleMask, VT, 64)) 5389 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 5390 if (isVREVMask(ShuffleMask, VT, 32)) 5391 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 5392 if (isVREVMask(ShuffleMask, VT, 16)) 5393 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 5394 5395 if (V2->getOpcode() == ISD::UNDEF && 5396 isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 5397 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 5398 DAG.getConstant(Imm, MVT::i32)); 5399 } 5400 5401 // Check for Neon shuffles that modify both input vectors in place. 5402 // If both results are used, i.e., if there are two shuffles with the same 5403 // source operands and with masks corresponding to both results of one of 5404 // these operations, DAG memoization will ensure that a single node is 5405 // used for both shuffles. 5406 unsigned WhichResult; 5407 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5408 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5409 V1, V2).getValue(WhichResult); 5410 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5411 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5412 V1, V2).getValue(WhichResult); 5413 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5414 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5415 V1, V2).getValue(WhichResult); 5416 5417 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5418 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5419 V1, V1).getValue(WhichResult); 5420 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5421 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5422 V1, V1).getValue(WhichResult); 5423 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5424 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5425 V1, V1).getValue(WhichResult); 5426 } 5427 5428 // If the shuffle is not directly supported and it has 4 elements, use 5429 // the PerfectShuffle-generated table to synthesize it from other shuffles. 5430 unsigned NumElts = VT.getVectorNumElements(); 5431 if (NumElts == 4) { 5432 unsigned PFIndexes[4]; 5433 for (unsigned i = 0; i != 4; ++i) { 5434 if (ShuffleMask[i] < 0) 5435 PFIndexes[i] = 8; 5436 else 5437 PFIndexes[i] = ShuffleMask[i]; 5438 } 5439 5440 // Compute the index in the perfect shuffle table. 5441 unsigned PFTableIndex = 5442 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5443 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5444 unsigned Cost = (PFEntry >> 30); 5445 5446 if (Cost <= 4) 5447 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5448 } 5449 5450 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 5451 if (EltSize >= 32) { 5452 // Do the expansion with floating-point types, since that is what the VFP 5453 // registers are defined to use, and since i64 is not legal. 5454 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5455 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5456 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 5457 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 5458 SmallVector<SDValue, 8> Ops; 5459 for (unsigned i = 0; i < NumElts; ++i) { 5460 if (ShuffleMask[i] < 0) 5461 Ops.push_back(DAG.getUNDEF(EltVT)); 5462 else 5463 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 5464 ShuffleMask[i] < (int)NumElts ? V1 : V2, 5465 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 5466 MVT::i32))); 5467 } 5468 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 5469 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5470 } 5471 5472 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 5473 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 5474 5475 if (VT == MVT::v8i8) { 5476 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 5477 if (NewOp.getNode()) 5478 return NewOp; 5479 } 5480 5481 return SDValue(); 5482 } 5483 5484 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5485 // INSERT_VECTOR_ELT is legal only for immediate indexes. 5486 SDValue Lane = Op.getOperand(2); 5487 if (!isa<ConstantSDNode>(Lane)) 5488 return SDValue(); 5489 5490 return Op; 5491 } 5492 5493 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5494 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 5495 SDValue Lane = Op.getOperand(1); 5496 if (!isa<ConstantSDNode>(Lane)) 5497 return SDValue(); 5498 5499 SDValue Vec = Op.getOperand(0); 5500 if (Op.getValueType() == MVT::i32 && 5501 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 5502 SDLoc dl(Op); 5503 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 5504 } 5505 5506 return Op; 5507 } 5508 5509 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5510 // The only time a CONCAT_VECTORS operation can have legal types is when 5511 // two 64-bit vectors are concatenated to a 128-bit vector. 5512 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 5513 "unexpected CONCAT_VECTORS"); 5514 SDLoc dl(Op); 5515 SDValue Val = DAG.getUNDEF(MVT::v2f64); 5516 SDValue Op0 = Op.getOperand(0); 5517 SDValue Op1 = Op.getOperand(1); 5518 if (Op0.getOpcode() != ISD::UNDEF) 5519 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5520 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 5521 DAG.getIntPtrConstant(0)); 5522 if (Op1.getOpcode() != ISD::UNDEF) 5523 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5524 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 5525 DAG.getIntPtrConstant(1)); 5526 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 5527 } 5528 5529 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 5530 /// element has been zero/sign-extended, depending on the isSigned parameter, 5531 /// from an integer type half its size. 5532 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 5533 bool isSigned) { 5534 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 5535 EVT VT = N->getValueType(0); 5536 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 5537 SDNode *BVN = N->getOperand(0).getNode(); 5538 if (BVN->getValueType(0) != MVT::v4i32 || 5539 BVN->getOpcode() != ISD::BUILD_VECTOR) 5540 return false; 5541 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5542 unsigned HiElt = 1 - LoElt; 5543 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 5544 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 5545 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 5546 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 5547 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 5548 return false; 5549 if (isSigned) { 5550 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 5551 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 5552 return true; 5553 } else { 5554 if (Hi0->isNullValue() && Hi1->isNullValue()) 5555 return true; 5556 } 5557 return false; 5558 } 5559 5560 if (N->getOpcode() != ISD::BUILD_VECTOR) 5561 return false; 5562 5563 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 5564 SDNode *Elt = N->getOperand(i).getNode(); 5565 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 5566 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5567 unsigned HalfSize = EltSize / 2; 5568 if (isSigned) { 5569 if (!isIntN(HalfSize, C->getSExtValue())) 5570 return false; 5571 } else { 5572 if (!isUIntN(HalfSize, C->getZExtValue())) 5573 return false; 5574 } 5575 continue; 5576 } 5577 return false; 5578 } 5579 5580 return true; 5581 } 5582 5583 /// isSignExtended - Check if a node is a vector value that is sign-extended 5584 /// or a constant BUILD_VECTOR with sign-extended elements. 5585 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 5586 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 5587 return true; 5588 if (isExtendedBUILD_VECTOR(N, DAG, true)) 5589 return true; 5590 return false; 5591 } 5592 5593 /// isZeroExtended - Check if a node is a vector value that is zero-extended 5594 /// or a constant BUILD_VECTOR with zero-extended elements. 5595 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 5596 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 5597 return true; 5598 if (isExtendedBUILD_VECTOR(N, DAG, false)) 5599 return true; 5600 return false; 5601 } 5602 5603 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 5604 if (OrigVT.getSizeInBits() >= 64) 5605 return OrigVT; 5606 5607 assert(OrigVT.isSimple() && "Expecting a simple value type"); 5608 5609 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 5610 switch (OrigSimpleTy) { 5611 default: llvm_unreachable("Unexpected Vector Type"); 5612 case MVT::v2i8: 5613 case MVT::v2i16: 5614 return MVT::v2i32; 5615 case MVT::v4i8: 5616 return MVT::v4i16; 5617 } 5618 } 5619 5620 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 5621 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 5622 /// We insert the required extension here to get the vector to fill a D register. 5623 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 5624 const EVT &OrigTy, 5625 const EVT &ExtTy, 5626 unsigned ExtOpcode) { 5627 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 5628 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 5629 // 64-bits we need to insert a new extension so that it will be 64-bits. 5630 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 5631 if (OrigTy.getSizeInBits() >= 64) 5632 return N; 5633 5634 // Must extend size to at least 64 bits to be used as an operand for VMULL. 5635 EVT NewVT = getExtensionTo64Bits(OrigTy); 5636 5637 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 5638 } 5639 5640 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 5641 /// does not do any sign/zero extension. If the original vector is less 5642 /// than 64 bits, an appropriate extension will be added after the load to 5643 /// reach a total size of 64 bits. We have to add the extension separately 5644 /// because ARM does not have a sign/zero extending load for vectors. 5645 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 5646 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 5647 5648 // The load already has the right type. 5649 if (ExtendedTy == LD->getMemoryVT()) 5650 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 5651 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 5652 LD->isNonTemporal(), LD->isInvariant(), 5653 LD->getAlignment()); 5654 5655 // We need to create a zextload/sextload. We cannot just create a load 5656 // followed by a zext/zext node because LowerMUL is also run during normal 5657 // operation legalization where we can't create illegal types. 5658 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 5659 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 5660 LD->getMemoryVT(), LD->isVolatile(), 5661 LD->isNonTemporal(), LD->getAlignment()); 5662 } 5663 5664 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 5665 /// extending load, or BUILD_VECTOR with extended elements, return the 5666 /// unextended value. The unextended vector should be 64 bits so that it can 5667 /// be used as an operand to a VMULL instruction. If the original vector size 5668 /// before extension is less than 64 bits we add a an extension to resize 5669 /// the vector to 64 bits. 5670 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 5671 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 5672 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 5673 N->getOperand(0)->getValueType(0), 5674 N->getValueType(0), 5675 N->getOpcode()); 5676 5677 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 5678 return SkipLoadExtensionForVMULL(LD, DAG); 5679 5680 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 5681 // have been legalized as a BITCAST from v4i32. 5682 if (N->getOpcode() == ISD::BITCAST) { 5683 SDNode *BVN = N->getOperand(0).getNode(); 5684 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 5685 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 5686 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5687 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32, 5688 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 5689 } 5690 // Construct a new BUILD_VECTOR with elements truncated to half the size. 5691 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 5692 EVT VT = N->getValueType(0); 5693 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 5694 unsigned NumElts = VT.getVectorNumElements(); 5695 MVT TruncVT = MVT::getIntegerVT(EltSize); 5696 SmallVector<SDValue, 8> Ops; 5697 for (unsigned i = 0; i != NumElts; ++i) { 5698 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 5699 const APInt &CInt = C->getAPIntValue(); 5700 // Element types smaller than 32 bits are not legal, so use i32 elements. 5701 // The values are implicitly truncated so sext vs. zext doesn't matter. 5702 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); 5703 } 5704 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), 5705 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 5706 } 5707 5708 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 5709 unsigned Opcode = N->getOpcode(); 5710 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5711 SDNode *N0 = N->getOperand(0).getNode(); 5712 SDNode *N1 = N->getOperand(1).getNode(); 5713 return N0->hasOneUse() && N1->hasOneUse() && 5714 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 5715 } 5716 return false; 5717 } 5718 5719 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 5720 unsigned Opcode = N->getOpcode(); 5721 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5722 SDNode *N0 = N->getOperand(0).getNode(); 5723 SDNode *N1 = N->getOperand(1).getNode(); 5724 return N0->hasOneUse() && N1->hasOneUse() && 5725 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 5726 } 5727 return false; 5728 } 5729 5730 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 5731 // Multiplications are only custom-lowered for 128-bit vectors so that 5732 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 5733 EVT VT = Op.getValueType(); 5734 assert(VT.is128BitVector() && VT.isInteger() && 5735 "unexpected type for custom-lowering ISD::MUL"); 5736 SDNode *N0 = Op.getOperand(0).getNode(); 5737 SDNode *N1 = Op.getOperand(1).getNode(); 5738 unsigned NewOpc = 0; 5739 bool isMLA = false; 5740 bool isN0SExt = isSignExtended(N0, DAG); 5741 bool isN1SExt = isSignExtended(N1, DAG); 5742 if (isN0SExt && isN1SExt) 5743 NewOpc = ARMISD::VMULLs; 5744 else { 5745 bool isN0ZExt = isZeroExtended(N0, DAG); 5746 bool isN1ZExt = isZeroExtended(N1, DAG); 5747 if (isN0ZExt && isN1ZExt) 5748 NewOpc = ARMISD::VMULLu; 5749 else if (isN1SExt || isN1ZExt) { 5750 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 5751 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 5752 if (isN1SExt && isAddSubSExt(N0, DAG)) { 5753 NewOpc = ARMISD::VMULLs; 5754 isMLA = true; 5755 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 5756 NewOpc = ARMISD::VMULLu; 5757 isMLA = true; 5758 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 5759 std::swap(N0, N1); 5760 NewOpc = ARMISD::VMULLu; 5761 isMLA = true; 5762 } 5763 } 5764 5765 if (!NewOpc) { 5766 if (VT == MVT::v2i64) 5767 // Fall through to expand this. It is not legal. 5768 return SDValue(); 5769 else 5770 // Other vector multiplications are legal. 5771 return Op; 5772 } 5773 } 5774 5775 // Legalize to a VMULL instruction. 5776 SDLoc DL(Op); 5777 SDValue Op0; 5778 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 5779 if (!isMLA) { 5780 Op0 = SkipExtensionForVMULL(N0, DAG); 5781 assert(Op0.getValueType().is64BitVector() && 5782 Op1.getValueType().is64BitVector() && 5783 "unexpected types for extended operands to VMULL"); 5784 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 5785 } 5786 5787 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 5788 // isel lowering to take advantage of no-stall back to back vmul + vmla. 5789 // vmull q0, d4, d6 5790 // vmlal q0, d5, d6 5791 // is faster than 5792 // vaddl q0, d4, d5 5793 // vmovl q1, d6 5794 // vmul q0, q0, q1 5795 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 5796 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 5797 EVT Op1VT = Op1.getValueType(); 5798 return DAG.getNode(N0->getOpcode(), DL, VT, 5799 DAG.getNode(NewOpc, DL, VT, 5800 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 5801 DAG.getNode(NewOpc, DL, VT, 5802 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 5803 } 5804 5805 static SDValue 5806 LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) { 5807 // Convert to float 5808 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 5809 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 5810 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 5811 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 5812 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 5813 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 5814 // Get reciprocal estimate. 5815 // float4 recip = vrecpeq_f32(yf); 5816 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5817 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 5818 // Because char has a smaller range than uchar, we can actually get away 5819 // without any newton steps. This requires that we use a weird bias 5820 // of 0xb000, however (again, this has been exhaustively tested). 5821 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 5822 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 5823 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 5824 Y = DAG.getConstant(0xb000, MVT::i32); 5825 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 5826 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 5827 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 5828 // Convert back to short. 5829 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 5830 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 5831 return X; 5832 } 5833 5834 static SDValue 5835 LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) { 5836 SDValue N2; 5837 // Convert to float. 5838 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 5839 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 5840 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 5841 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 5842 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5843 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5844 5845 // Use reciprocal estimate and one refinement step. 5846 // float4 recip = vrecpeq_f32(yf); 5847 // recip *= vrecpsq_f32(yf, recip); 5848 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5849 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 5850 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5851 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5852 N1, N2); 5853 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5854 // Because short has a smaller range than ushort, we can actually get away 5855 // with only a single newton step. This requires that we use a weird bias 5856 // of 89, however (again, this has been exhaustively tested). 5857 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 5858 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5859 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5860 N1 = DAG.getConstant(0x89, MVT::i32); 5861 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5862 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5863 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5864 // Convert back to integer and return. 5865 // return vmovn_s32(vcvt_s32_f32(result)); 5866 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5867 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5868 return N0; 5869 } 5870 5871 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 5872 EVT VT = Op.getValueType(); 5873 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5874 "unexpected type for custom-lowering ISD::SDIV"); 5875 5876 SDLoc dl(Op); 5877 SDValue N0 = Op.getOperand(0); 5878 SDValue N1 = Op.getOperand(1); 5879 SDValue N2, N3; 5880 5881 if (VT == MVT::v8i8) { 5882 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 5883 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 5884 5885 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5886 DAG.getIntPtrConstant(4)); 5887 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5888 DAG.getIntPtrConstant(4)); 5889 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5890 DAG.getIntPtrConstant(0)); 5891 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5892 DAG.getIntPtrConstant(0)); 5893 5894 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 5895 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 5896 5897 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5898 N0 = LowerCONCAT_VECTORS(N0, DAG); 5899 5900 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 5901 return N0; 5902 } 5903 return LowerSDIV_v4i16(N0, N1, dl, DAG); 5904 } 5905 5906 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 5907 EVT VT = Op.getValueType(); 5908 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5909 "unexpected type for custom-lowering ISD::UDIV"); 5910 5911 SDLoc dl(Op); 5912 SDValue N0 = Op.getOperand(0); 5913 SDValue N1 = Op.getOperand(1); 5914 SDValue N2, N3; 5915 5916 if (VT == MVT::v8i8) { 5917 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 5918 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 5919 5920 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5921 DAG.getIntPtrConstant(4)); 5922 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5923 DAG.getIntPtrConstant(4)); 5924 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5925 DAG.getIntPtrConstant(0)); 5926 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5927 DAG.getIntPtrConstant(0)); 5928 5929 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 5930 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 5931 5932 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5933 N0 = LowerCONCAT_VECTORS(N0, DAG); 5934 5935 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 5936 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 5937 N0); 5938 return N0; 5939 } 5940 5941 // v4i16 sdiv ... Convert to float. 5942 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 5943 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 5944 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 5945 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 5946 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5947 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5948 5949 // Use reciprocal estimate and two refinement steps. 5950 // float4 recip = vrecpeq_f32(yf); 5951 // recip *= vrecpsq_f32(yf, recip); 5952 // recip *= vrecpsq_f32(yf, recip); 5953 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5954 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 5955 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5956 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5957 BN1, N2); 5958 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5959 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5960 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5961 BN1, N2); 5962 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5963 // Simply multiplying by the reciprocal estimate can leave us a few ulps 5964 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 5965 // and that it will never cause us to return an answer too large). 5966 // float4 result = as_float4(as_int4(xf*recip) + 2); 5967 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5968 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5969 N1 = DAG.getConstant(2, MVT::i32); 5970 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5971 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5972 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5973 // Convert back to integer and return. 5974 // return vmovn_u32(vcvt_s32_f32(result)); 5975 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5976 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5977 return N0; 5978 } 5979 5980 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 5981 EVT VT = Op.getNode()->getValueType(0); 5982 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 5983 5984 unsigned Opc; 5985 bool ExtraOp = false; 5986 switch (Op.getOpcode()) { 5987 default: llvm_unreachable("Invalid code"); 5988 case ISD::ADDC: Opc = ARMISD::ADDC; break; 5989 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 5990 case ISD::SUBC: Opc = ARMISD::SUBC; break; 5991 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 5992 } 5993 5994 if (!ExtraOp) 5995 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 5996 Op.getOperand(1)); 5997 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 5998 Op.getOperand(1), Op.getOperand(2)); 5999 } 6000 6001 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 6002 // Monotonic load/store is legal for all targets 6003 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 6004 return Op; 6005 6006 // Aquire/Release load/store is not legal for targets without a 6007 // dmb or equivalent available. 6008 return SDValue(); 6009 } 6010 6011 static void 6012 ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 6013 SelectionDAG &DAG) { 6014 SDLoc dl(Node); 6015 assert (Node->getValueType(0) == MVT::i64 && 6016 "Only know how to expand i64 atomics"); 6017 AtomicSDNode *AN = cast<AtomicSDNode>(Node); 6018 6019 SmallVector<SDValue, 6> Ops; 6020 Ops.push_back(Node->getOperand(0)); // Chain 6021 Ops.push_back(Node->getOperand(1)); // Ptr 6022 for(unsigned i=2; i<Node->getNumOperands(); i++) { 6023 // Low part 6024 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6025 Node->getOperand(i), DAG.getIntPtrConstant(0))); 6026 // High part 6027 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 6028 Node->getOperand(i), DAG.getIntPtrConstant(1))); 6029 } 6030 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 6031 SDValue Result = 6032 DAG.getAtomic(Node->getOpcode(), dl, MVT::i64, Tys, Ops.data(), Ops.size(), 6033 cast<MemSDNode>(Node)->getMemOperand(), AN->getOrdering(), 6034 AN->getSynchScope()); 6035 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 6036 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 6037 Results.push_back(Result.getValue(2)); 6038 } 6039 6040 static void ReplaceREADCYCLECOUNTER(SDNode *N, 6041 SmallVectorImpl<SDValue> &Results, 6042 SelectionDAG &DAG, 6043 const ARMSubtarget *Subtarget) { 6044 SDLoc DL(N); 6045 SDValue Cycles32, OutChain; 6046 6047 if (Subtarget->hasPerfMon()) { 6048 // Under Power Management extensions, the cycle-count is: 6049 // mrc p15, #0, <Rt>, c9, c13, #0 6050 SDValue Ops[] = { N->getOperand(0), // Chain 6051 DAG.getConstant(Intrinsic::arm_mrc, MVT::i32), 6052 DAG.getConstant(15, MVT::i32), 6053 DAG.getConstant(0, MVT::i32), 6054 DAG.getConstant(9, MVT::i32), 6055 DAG.getConstant(13, MVT::i32), 6056 DAG.getConstant(0, MVT::i32) 6057 }; 6058 6059 Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 6060 DAG.getVTList(MVT::i32, MVT::Other), &Ops[0], 6061 array_lengthof(Ops)); 6062 OutChain = Cycles32.getValue(1); 6063 } else { 6064 // Intrinsic is defined to return 0 on unsupported platforms. Technically 6065 // there are older ARM CPUs that have implementation-specific ways of 6066 // obtaining this information (FIXME!). 6067 Cycles32 = DAG.getConstant(0, MVT::i32); 6068 OutChain = DAG.getEntryNode(); 6069 } 6070 6071 6072 SDValue Cycles64 = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, 6073 Cycles32, DAG.getConstant(0, MVT::i32)); 6074 Results.push_back(Cycles64); 6075 Results.push_back(OutChain); 6076 } 6077 6078 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 6079 switch (Op.getOpcode()) { 6080 default: llvm_unreachable("Don't know how to custom lower this!"); 6081 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 6082 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 6083 case ISD::GlobalAddress: 6084 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 6085 LowerGlobalAddressELF(Op, DAG); 6086 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 6087 case ISD::SELECT: return LowerSELECT(Op, DAG); 6088 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 6089 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 6090 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 6091 case ISD::VASTART: return LowerVASTART(Op, DAG); 6092 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 6093 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 6094 case ISD::SINT_TO_FP: 6095 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 6096 case ISD::FP_TO_SINT: 6097 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 6098 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 6099 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 6100 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 6101 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 6102 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 6103 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 6104 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 6105 Subtarget); 6106 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 6107 case ISD::SHL: 6108 case ISD::SRL: 6109 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 6110 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 6111 case ISD::SRL_PARTS: 6112 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 6113 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 6114 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 6115 case ISD::SETCC: return LowerVSETCC(Op, DAG); 6116 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 6117 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 6118 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 6119 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 6120 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 6121 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 6122 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 6123 case ISD::MUL: return LowerMUL(Op, DAG); 6124 case ISD::SDIV: return LowerSDIV(Op, DAG); 6125 case ISD::UDIV: return LowerUDIV(Op, DAG); 6126 case ISD::ADDC: 6127 case ISD::ADDE: 6128 case ISD::SUBC: 6129 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 6130 case ISD::ATOMIC_LOAD: 6131 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 6132 case ISD::SDIVREM: 6133 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 6134 } 6135 } 6136 6137 /// ReplaceNodeResults - Replace the results of node with an illegal result 6138 /// type with new values built out of custom code. 6139 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 6140 SmallVectorImpl<SDValue>&Results, 6141 SelectionDAG &DAG) const { 6142 SDValue Res; 6143 switch (N->getOpcode()) { 6144 default: 6145 llvm_unreachable("Don't know how to custom expand this!"); 6146 case ISD::BITCAST: 6147 Res = ExpandBITCAST(N, DAG); 6148 break; 6149 case ISD::SIGN_EXTEND: 6150 case ISD::ZERO_EXTEND: 6151 Res = ExpandVectorExtension(N, DAG); 6152 break; 6153 case ISD::SRL: 6154 case ISD::SRA: 6155 Res = Expand64BitShift(N, DAG, Subtarget); 6156 break; 6157 case ISD::READCYCLECOUNTER: 6158 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 6159 return; 6160 case ISD::ATOMIC_STORE: 6161 case ISD::ATOMIC_LOAD: 6162 case ISD::ATOMIC_LOAD_ADD: 6163 case ISD::ATOMIC_LOAD_AND: 6164 case ISD::ATOMIC_LOAD_NAND: 6165 case ISD::ATOMIC_LOAD_OR: 6166 case ISD::ATOMIC_LOAD_SUB: 6167 case ISD::ATOMIC_LOAD_XOR: 6168 case ISD::ATOMIC_SWAP: 6169 case ISD::ATOMIC_CMP_SWAP: 6170 case ISD::ATOMIC_LOAD_MIN: 6171 case ISD::ATOMIC_LOAD_UMIN: 6172 case ISD::ATOMIC_LOAD_MAX: 6173 case ISD::ATOMIC_LOAD_UMAX: 6174 ReplaceATOMIC_OP_64(N, Results, DAG); 6175 return; 6176 } 6177 if (Res.getNode()) 6178 Results.push_back(Res); 6179 } 6180 6181 //===----------------------------------------------------------------------===// 6182 // ARM Scheduler Hooks 6183 //===----------------------------------------------------------------------===// 6184 6185 MachineBasicBlock * 6186 ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 6187 MachineBasicBlock *BB, 6188 unsigned Size) const { 6189 unsigned dest = MI->getOperand(0).getReg(); 6190 unsigned ptr = MI->getOperand(1).getReg(); 6191 unsigned oldval = MI->getOperand(2).getReg(); 6192 unsigned newval = MI->getOperand(3).getReg(); 6193 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6194 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); 6195 DebugLoc dl = MI->getDebugLoc(); 6196 bool isThumb2 = Subtarget->isThumb2(); 6197 6198 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6199 unsigned scratch = MRI.createVirtualRegister(isThumb2 ? 6200 (const TargetRegisterClass*)&ARM::rGPRRegClass : 6201 (const TargetRegisterClass*)&ARM::GPRRegClass); 6202 6203 if (isThumb2) { 6204 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 6205 MRI.constrainRegClass(oldval, &ARM::rGPRRegClass); 6206 MRI.constrainRegClass(newval, &ARM::rGPRRegClass); 6207 } 6208 6209 unsigned ldrOpc, strOpc; 6210 getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc); 6211 6212 MachineFunction *MF = BB->getParent(); 6213 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6214 MachineFunction::iterator It = BB; 6215 ++It; // insert the new blocks after the current block 6216 6217 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 6218 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 6219 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6220 MF->insert(It, loop1MBB); 6221 MF->insert(It, loop2MBB); 6222 MF->insert(It, exitMBB); 6223 6224 // Transfer the remainder of BB and its successor edges to exitMBB. 6225 exitMBB->splice(exitMBB->begin(), BB, 6226 llvm::next(MachineBasicBlock::iterator(MI)), 6227 BB->end()); 6228 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6229 6230 // thisMBB: 6231 // ... 6232 // fallthrough --> loop1MBB 6233 BB->addSuccessor(loop1MBB); 6234 6235 // loop1MBB: 6236 // ldrex dest, [ptr] 6237 // cmp dest, oldval 6238 // bne exitMBB 6239 BB = loop1MBB; 6240 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 6241 if (ldrOpc == ARM::t2LDREX) 6242 MIB.addImm(0); 6243 AddDefaultPred(MIB); 6244 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6245 .addReg(dest).addReg(oldval)); 6246 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6247 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6248 BB->addSuccessor(loop2MBB); 6249 BB->addSuccessor(exitMBB); 6250 6251 // loop2MBB: 6252 // strex scratch, newval, [ptr] 6253 // cmp scratch, #0 6254 // bne loop1MBB 6255 BB = loop2MBB; 6256 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 6257 if (strOpc == ARM::t2STREX) 6258 MIB.addImm(0); 6259 AddDefaultPred(MIB); 6260 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6261 .addReg(scratch).addImm(0)); 6262 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6263 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6264 BB->addSuccessor(loop1MBB); 6265 BB->addSuccessor(exitMBB); 6266 6267 // exitMBB: 6268 // ... 6269 BB = exitMBB; 6270 6271 MI->eraseFromParent(); // The instruction is gone now. 6272 6273 return BB; 6274 } 6275 6276 MachineBasicBlock * 6277 ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 6278 unsigned Size, unsigned BinOpcode) const { 6279 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 6280 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6281 6282 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6283 MachineFunction *MF = BB->getParent(); 6284 MachineFunction::iterator It = BB; 6285 ++It; 6286 6287 unsigned dest = MI->getOperand(0).getReg(); 6288 unsigned ptr = MI->getOperand(1).getReg(); 6289 unsigned incr = MI->getOperand(2).getReg(); 6290 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 6291 DebugLoc dl = MI->getDebugLoc(); 6292 bool isThumb2 = Subtarget->isThumb2(); 6293 6294 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6295 if (isThumb2) { 6296 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 6297 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6298 MRI.constrainRegClass(incr, &ARM::rGPRRegClass); 6299 } 6300 6301 unsigned ldrOpc, strOpc; 6302 getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc); 6303 6304 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6305 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6306 MF->insert(It, loopMBB); 6307 MF->insert(It, exitMBB); 6308 6309 // Transfer the remainder of BB and its successor edges to exitMBB. 6310 exitMBB->splice(exitMBB->begin(), BB, 6311 llvm::next(MachineBasicBlock::iterator(MI)), 6312 BB->end()); 6313 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6314 6315 const TargetRegisterClass *TRC = isThumb2 ? 6316 (const TargetRegisterClass*)&ARM::rGPRRegClass : 6317 (const TargetRegisterClass*)&ARM::GPRRegClass; 6318 unsigned scratch = MRI.createVirtualRegister(TRC); 6319 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 6320 6321 // thisMBB: 6322 // ... 6323 // fallthrough --> loopMBB 6324 BB->addSuccessor(loopMBB); 6325 6326 // loopMBB: 6327 // ldrex dest, ptr 6328 // <binop> scratch2, dest, incr 6329 // strex scratch, scratch2, ptr 6330 // cmp scratch, #0 6331 // bne- loopMBB 6332 // fallthrough --> exitMBB 6333 BB = loopMBB; 6334 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 6335 if (ldrOpc == ARM::t2LDREX) 6336 MIB.addImm(0); 6337 AddDefaultPred(MIB); 6338 if (BinOpcode) { 6339 // operand order needs to go the other way for NAND 6340 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 6341 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 6342 addReg(incr).addReg(dest)).addReg(0); 6343 else 6344 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 6345 addReg(dest).addReg(incr)).addReg(0); 6346 } 6347 6348 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 6349 if (strOpc == ARM::t2STREX) 6350 MIB.addImm(0); 6351 AddDefaultPred(MIB); 6352 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6353 .addReg(scratch).addImm(0)); 6354 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6355 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6356 6357 BB->addSuccessor(loopMBB); 6358 BB->addSuccessor(exitMBB); 6359 6360 // exitMBB: 6361 // ... 6362 BB = exitMBB; 6363 6364 MI->eraseFromParent(); // The instruction is gone now. 6365 6366 return BB; 6367 } 6368 6369 MachineBasicBlock * 6370 ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 6371 MachineBasicBlock *BB, 6372 unsigned Size, 6373 bool signExtend, 6374 ARMCC::CondCodes Cond) const { 6375 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6376 6377 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6378 MachineFunction *MF = BB->getParent(); 6379 MachineFunction::iterator It = BB; 6380 ++It; 6381 6382 unsigned dest = MI->getOperand(0).getReg(); 6383 unsigned ptr = MI->getOperand(1).getReg(); 6384 unsigned incr = MI->getOperand(2).getReg(); 6385 unsigned oldval = dest; 6386 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 6387 DebugLoc dl = MI->getDebugLoc(); 6388 bool isThumb2 = Subtarget->isThumb2(); 6389 6390 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6391 if (isThumb2) { 6392 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 6393 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6394 MRI.constrainRegClass(incr, &ARM::rGPRRegClass); 6395 } 6396 6397 unsigned ldrOpc, strOpc, extendOpc; 6398 getExclusiveOperation(Size, Ord, isThumb2, ldrOpc, strOpc); 6399 switch (Size) { 6400 default: llvm_unreachable("unsupported size for AtomicBinaryMinMax!"); 6401 case 1: 6402 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 6403 break; 6404 case 2: 6405 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 6406 break; 6407 case 4: 6408 extendOpc = 0; 6409 break; 6410 } 6411 6412 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6413 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6414 MF->insert(It, loopMBB); 6415 MF->insert(It, exitMBB); 6416 6417 // Transfer the remainder of BB and its successor edges to exitMBB. 6418 exitMBB->splice(exitMBB->begin(), BB, 6419 llvm::next(MachineBasicBlock::iterator(MI)), 6420 BB->end()); 6421 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6422 6423 const TargetRegisterClass *TRC = isThumb2 ? 6424 (const TargetRegisterClass*)&ARM::rGPRRegClass : 6425 (const TargetRegisterClass*)&ARM::GPRRegClass; 6426 unsigned scratch = MRI.createVirtualRegister(TRC); 6427 unsigned scratch2 = MRI.createVirtualRegister(TRC); 6428 6429 // thisMBB: 6430 // ... 6431 // fallthrough --> loopMBB 6432 BB->addSuccessor(loopMBB); 6433 6434 // loopMBB: 6435 // ldrex dest, ptr 6436 // (sign extend dest, if required) 6437 // cmp dest, incr 6438 // cmov.cond scratch2, incr, dest 6439 // strex scratch, scratch2, ptr 6440 // cmp scratch, #0 6441 // bne- loopMBB 6442 // fallthrough --> exitMBB 6443 BB = loopMBB; 6444 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 6445 if (ldrOpc == ARM::t2LDREX) 6446 MIB.addImm(0); 6447 AddDefaultPred(MIB); 6448 6449 // Sign extend the value, if necessary. 6450 if (signExtend && extendOpc) { 6451 oldval = MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass 6452 : &ARM::GPRnopcRegClass); 6453 if (!isThumb2) 6454 MRI.constrainRegClass(dest, &ARM::GPRnopcRegClass); 6455 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 6456 .addReg(dest) 6457 .addImm(0)); 6458 } 6459 6460 // Build compare and cmov instructions. 6461 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6462 .addReg(oldval).addReg(incr)); 6463 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 6464 .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR); 6465 6466 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 6467 if (strOpc == ARM::t2STREX) 6468 MIB.addImm(0); 6469 AddDefaultPred(MIB); 6470 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6471 .addReg(scratch).addImm(0)); 6472 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6473 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6474 6475 BB->addSuccessor(loopMBB); 6476 BB->addSuccessor(exitMBB); 6477 6478 // exitMBB: 6479 // ... 6480 BB = exitMBB; 6481 6482 MI->eraseFromParent(); // The instruction is gone now. 6483 6484 return BB; 6485 } 6486 6487 MachineBasicBlock * 6488 ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 6489 unsigned Op1, unsigned Op2, 6490 bool NeedsCarry, bool IsCmpxchg, 6491 bool IsMinMax, ARMCC::CondCodes CC) const { 6492 // This also handles ATOMIC_SWAP and ATOMIC_STORE, indicated by Op1==0. 6493 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6494 6495 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6496 MachineFunction *MF = BB->getParent(); 6497 MachineFunction::iterator It = BB; 6498 ++It; 6499 6500 bool isStore = (MI->getOpcode() == ARM::ATOMIC_STORE_I64); 6501 unsigned offset = (isStore ? -2 : 0); 6502 unsigned destlo = MI->getOperand(0).getReg(); 6503 unsigned desthi = MI->getOperand(1).getReg(); 6504 unsigned ptr = MI->getOperand(offset+2).getReg(); 6505 unsigned vallo = MI->getOperand(offset+3).getReg(); 6506 unsigned valhi = MI->getOperand(offset+4).getReg(); 6507 unsigned OrdIdx = offset + (IsCmpxchg ? 7 : 5); 6508 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(OrdIdx).getImm()); 6509 DebugLoc dl = MI->getDebugLoc(); 6510 bool isThumb2 = Subtarget->isThumb2(); 6511 6512 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6513 if (isThumb2) { 6514 MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); 6515 MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); 6516 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6517 MRI.constrainRegClass(vallo, &ARM::rGPRRegClass); 6518 MRI.constrainRegClass(valhi, &ARM::rGPRRegClass); 6519 } 6520 6521 unsigned ldrOpc, strOpc; 6522 getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc); 6523 6524 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6525 MachineBasicBlock *contBB = 0, *cont2BB = 0; 6526 if (IsCmpxchg || IsMinMax) 6527 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 6528 if (IsCmpxchg) 6529 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 6530 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6531 6532 MF->insert(It, loopMBB); 6533 if (IsCmpxchg || IsMinMax) MF->insert(It, contBB); 6534 if (IsCmpxchg) MF->insert(It, cont2BB); 6535 MF->insert(It, exitMBB); 6536 6537 // Transfer the remainder of BB and its successor edges to exitMBB. 6538 exitMBB->splice(exitMBB->begin(), BB, 6539 llvm::next(MachineBasicBlock::iterator(MI)), 6540 BB->end()); 6541 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6542 6543 const TargetRegisterClass *TRC = isThumb2 ? 6544 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6545 (const TargetRegisterClass*)&ARM::GPRRegClass; 6546 unsigned storesuccess = MRI.createVirtualRegister(TRC); 6547 6548 // thisMBB: 6549 // ... 6550 // fallthrough --> loopMBB 6551 BB->addSuccessor(loopMBB); 6552 6553 // loopMBB: 6554 // ldrexd r2, r3, ptr 6555 // <binopa> r0, r2, incr 6556 // <binopb> r1, r3, incr 6557 // strexd storesuccess, r0, r1, ptr 6558 // cmp storesuccess, #0 6559 // bne- loopMBB 6560 // fallthrough --> exitMBB 6561 BB = loopMBB; 6562 6563 if (!isStore) { 6564 // Load 6565 if (isThumb2) { 6566 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 6567 .addReg(destlo, RegState::Define) 6568 .addReg(desthi, RegState::Define) 6569 .addReg(ptr)); 6570 } else { 6571 unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6572 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc)) 6573 .addReg(GPRPair0, RegState::Define).addReg(ptr)); 6574 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 6575 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo) 6576 .addReg(GPRPair0, 0, ARM::gsub_0); 6577 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi) 6578 .addReg(GPRPair0, 0, ARM::gsub_1); 6579 } 6580 } 6581 6582 unsigned StoreLo, StoreHi; 6583 if (IsCmpxchg) { 6584 // Add early exit 6585 for (unsigned i = 0; i < 2; i++) { 6586 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 6587 ARM::CMPrr)) 6588 .addReg(i == 0 ? destlo : desthi) 6589 .addReg(i == 0 ? vallo : valhi)); 6590 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6591 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6592 BB->addSuccessor(exitMBB); 6593 BB->addSuccessor(i == 0 ? contBB : cont2BB); 6594 BB = (i == 0 ? contBB : cont2BB); 6595 } 6596 6597 // Copy to physregs for strexd 6598 StoreLo = MI->getOperand(5).getReg(); 6599 StoreHi = MI->getOperand(6).getReg(); 6600 } else if (Op1) { 6601 // Perform binary operation 6602 unsigned tmpRegLo = MRI.createVirtualRegister(TRC); 6603 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), tmpRegLo) 6604 .addReg(destlo).addReg(vallo)) 6605 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 6606 unsigned tmpRegHi = MRI.createVirtualRegister(TRC); 6607 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), tmpRegHi) 6608 .addReg(desthi).addReg(valhi)) 6609 .addReg(IsMinMax ? ARM::CPSR : 0, getDefRegState(IsMinMax)); 6610 6611 StoreLo = tmpRegLo; 6612 StoreHi = tmpRegHi; 6613 } else { 6614 // Copy to physregs for strexd 6615 StoreLo = vallo; 6616 StoreHi = valhi; 6617 } 6618 if (IsMinMax) { 6619 // Compare and branch to exit block. 6620 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6621 .addMBB(exitMBB).addImm(CC).addReg(ARM::CPSR); 6622 BB->addSuccessor(exitMBB); 6623 BB->addSuccessor(contBB); 6624 BB = contBB; 6625 StoreLo = vallo; 6626 StoreHi = valhi; 6627 } 6628 6629 // Store 6630 if (isThumb2) { 6631 MRI.constrainRegClass(StoreLo, &ARM::rGPRRegClass); 6632 MRI.constrainRegClass(StoreHi, &ARM::rGPRRegClass); 6633 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 6634 .addReg(StoreLo).addReg(StoreHi).addReg(ptr)); 6635 } else { 6636 // Marshal a pair... 6637 unsigned StorePair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6638 unsigned UndefPair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6639 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6640 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), UndefPair); 6641 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 6642 .addReg(UndefPair) 6643 .addReg(StoreLo) 6644 .addImm(ARM::gsub_0); 6645 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), StorePair) 6646 .addReg(r1) 6647 .addReg(StoreHi) 6648 .addImm(ARM::gsub_1); 6649 6650 // ...and store it 6651 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), storesuccess) 6652 .addReg(StorePair).addReg(ptr)); 6653 } 6654 // Cmp+jump 6655 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6656 .addReg(storesuccess).addImm(0)); 6657 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6658 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6659 6660 BB->addSuccessor(loopMBB); 6661 BB->addSuccessor(exitMBB); 6662 6663 // exitMBB: 6664 // ... 6665 BB = exitMBB; 6666 6667 MI->eraseFromParent(); // The instruction is gone now. 6668 6669 return BB; 6670 } 6671 6672 MachineBasicBlock * 6673 ARMTargetLowering::EmitAtomicLoad64(MachineInstr *MI, MachineBasicBlock *BB) const { 6674 6675 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6676 6677 unsigned destlo = MI->getOperand(0).getReg(); 6678 unsigned desthi = MI->getOperand(1).getReg(); 6679 unsigned ptr = MI->getOperand(2).getReg(); 6680 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 6681 DebugLoc dl = MI->getDebugLoc(); 6682 bool isThumb2 = Subtarget->isThumb2(); 6683 6684 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6685 if (isThumb2) { 6686 MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); 6687 MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); 6688 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6689 } 6690 unsigned ldrOpc, strOpc; 6691 getExclusiveOperation(8, Ord, isThumb2, ldrOpc, strOpc); 6692 6693 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(ldrOpc)); 6694 6695 if (isThumb2) { 6696 MIB.addReg(destlo, RegState::Define) 6697 .addReg(desthi, RegState::Define) 6698 .addReg(ptr); 6699 6700 } else { 6701 unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6702 MIB.addReg(GPRPair0, RegState::Define).addReg(ptr); 6703 6704 // Copy GPRPair0 into dest. (This copy will normally be coalesced.) 6705 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), destlo) 6706 .addReg(GPRPair0, 0, ARM::gsub_0); 6707 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), desthi) 6708 .addReg(GPRPair0, 0, ARM::gsub_1); 6709 } 6710 AddDefaultPred(MIB); 6711 6712 MI->eraseFromParent(); // The instruction is gone now. 6713 6714 return BB; 6715 } 6716 6717 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 6718 /// registers the function context. 6719 void ARMTargetLowering:: 6720 SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 6721 MachineBasicBlock *DispatchBB, int FI) const { 6722 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6723 DebugLoc dl = MI->getDebugLoc(); 6724 MachineFunction *MF = MBB->getParent(); 6725 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6726 MachineConstantPool *MCP = MF->getConstantPool(); 6727 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6728 const Function *F = MF->getFunction(); 6729 6730 bool isThumb = Subtarget->isThumb(); 6731 bool isThumb2 = Subtarget->isThumb2(); 6732 6733 unsigned PCLabelId = AFI->createPICLabelUId(); 6734 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 6735 ARMConstantPoolValue *CPV = 6736 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 6737 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 6738 6739 const TargetRegisterClass *TRC = isThumb ? 6740 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6741 (const TargetRegisterClass*)&ARM::GPRRegClass; 6742 6743 // Grab constant pool and fixed stack memory operands. 6744 MachineMemOperand *CPMMO = 6745 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 6746 MachineMemOperand::MOLoad, 4, 4); 6747 6748 MachineMemOperand *FIMMOSt = 6749 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6750 MachineMemOperand::MOStore, 4, 4); 6751 6752 // Load the address of the dispatch MBB into the jump buffer. 6753 if (isThumb2) { 6754 // Incoming value: jbuf 6755 // ldr.n r5, LCPI1_1 6756 // orr r5, r5, #1 6757 // add r5, pc 6758 // str r5, [$jbuf, #+4] ; &jbuf[1] 6759 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6760 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 6761 .addConstantPoolIndex(CPI) 6762 .addMemOperand(CPMMO)); 6763 // Set the low bit because of thumb mode. 6764 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6765 AddDefaultCC( 6766 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 6767 .addReg(NewVReg1, RegState::Kill) 6768 .addImm(0x01))); 6769 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6770 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 6771 .addReg(NewVReg2, RegState::Kill) 6772 .addImm(PCLabelId); 6773 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 6774 .addReg(NewVReg3, RegState::Kill) 6775 .addFrameIndex(FI) 6776 .addImm(36) // &jbuf[1] :: pc 6777 .addMemOperand(FIMMOSt)); 6778 } else if (isThumb) { 6779 // Incoming value: jbuf 6780 // ldr.n r1, LCPI1_4 6781 // add r1, pc 6782 // mov r2, #1 6783 // orrs r1, r2 6784 // add r2, $jbuf, #+4 ; &jbuf[1] 6785 // str r1, [r2] 6786 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6787 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 6788 .addConstantPoolIndex(CPI) 6789 .addMemOperand(CPMMO)); 6790 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6791 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 6792 .addReg(NewVReg1, RegState::Kill) 6793 .addImm(PCLabelId); 6794 // Set the low bit because of thumb mode. 6795 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6796 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 6797 .addReg(ARM::CPSR, RegState::Define) 6798 .addImm(1)); 6799 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6800 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 6801 .addReg(ARM::CPSR, RegState::Define) 6802 .addReg(NewVReg2, RegState::Kill) 6803 .addReg(NewVReg3, RegState::Kill)); 6804 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6805 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 6806 .addFrameIndex(FI) 6807 .addImm(36)); // &jbuf[1] :: pc 6808 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 6809 .addReg(NewVReg4, RegState::Kill) 6810 .addReg(NewVReg5, RegState::Kill) 6811 .addImm(0) 6812 .addMemOperand(FIMMOSt)); 6813 } else { 6814 // Incoming value: jbuf 6815 // ldr r1, LCPI1_1 6816 // add r1, pc, r1 6817 // str r1, [$jbuf, #+4] ; &jbuf[1] 6818 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6819 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 6820 .addConstantPoolIndex(CPI) 6821 .addImm(0) 6822 .addMemOperand(CPMMO)); 6823 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6824 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 6825 .addReg(NewVReg1, RegState::Kill) 6826 .addImm(PCLabelId)); 6827 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 6828 .addReg(NewVReg2, RegState::Kill) 6829 .addFrameIndex(FI) 6830 .addImm(36) // &jbuf[1] :: pc 6831 .addMemOperand(FIMMOSt)); 6832 } 6833 } 6834 6835 MachineBasicBlock *ARMTargetLowering:: 6836 EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 6837 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6838 DebugLoc dl = MI->getDebugLoc(); 6839 MachineFunction *MF = MBB->getParent(); 6840 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6841 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6842 MachineFrameInfo *MFI = MF->getFrameInfo(); 6843 int FI = MFI->getFunctionContextIndex(); 6844 6845 const TargetRegisterClass *TRC = Subtarget->isThumb() ? 6846 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6847 (const TargetRegisterClass*)&ARM::GPRnopcRegClass; 6848 6849 // Get a mapping of the call site numbers to all of the landing pads they're 6850 // associated with. 6851 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 6852 unsigned MaxCSNum = 0; 6853 MachineModuleInfo &MMI = MF->getMMI(); 6854 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 6855 ++BB) { 6856 if (!BB->isLandingPad()) continue; 6857 6858 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 6859 // pad. 6860 for (MachineBasicBlock::iterator 6861 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 6862 if (!II->isEHLabel()) continue; 6863 6864 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 6865 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 6866 6867 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 6868 for (SmallVectorImpl<unsigned>::iterator 6869 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 6870 CSI != CSE; ++CSI) { 6871 CallSiteNumToLPad[*CSI].push_back(BB); 6872 MaxCSNum = std::max(MaxCSNum, *CSI); 6873 } 6874 break; 6875 } 6876 } 6877 6878 // Get an ordered list of the machine basic blocks for the jump table. 6879 std::vector<MachineBasicBlock*> LPadList; 6880 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 6881 LPadList.reserve(CallSiteNumToLPad.size()); 6882 for (unsigned I = 1; I <= MaxCSNum; ++I) { 6883 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 6884 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6885 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 6886 LPadList.push_back(*II); 6887 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 6888 } 6889 } 6890 6891 assert(!LPadList.empty() && 6892 "No landing pad destinations for the dispatch jump table!"); 6893 6894 // Create the jump table and associated information. 6895 MachineJumpTableInfo *JTI = 6896 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 6897 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 6898 unsigned UId = AFI->createJumpTableUId(); 6899 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 6900 6901 // Create the MBBs for the dispatch code. 6902 6903 // Shove the dispatch's address into the return slot in the function context. 6904 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 6905 DispatchBB->setIsLandingPad(); 6906 6907 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 6908 unsigned trap_opcode; 6909 if (Subtarget->isThumb()) 6910 trap_opcode = ARM::tTRAP; 6911 else 6912 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 6913 6914 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 6915 DispatchBB->addSuccessor(TrapBB); 6916 6917 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 6918 DispatchBB->addSuccessor(DispContBB); 6919 6920 // Insert and MBBs. 6921 MF->insert(MF->end(), DispatchBB); 6922 MF->insert(MF->end(), DispContBB); 6923 MF->insert(MF->end(), TrapBB); 6924 6925 // Insert code into the entry block that creates and registers the function 6926 // context. 6927 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 6928 6929 MachineMemOperand *FIMMOLd = 6930 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6931 MachineMemOperand::MOLoad | 6932 MachineMemOperand::MOVolatile, 4, 4); 6933 6934 MachineInstrBuilder MIB; 6935 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 6936 6937 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6938 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6939 6940 // Add a register mask with no preserved registers. This results in all 6941 // registers being marked as clobbered. 6942 MIB.addRegMask(RI.getNoPreservedMask()); 6943 6944 unsigned NumLPads = LPadList.size(); 6945 if (Subtarget->isThumb2()) { 6946 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6947 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 6948 .addFrameIndex(FI) 6949 .addImm(4) 6950 .addMemOperand(FIMMOLd)); 6951 6952 if (NumLPads < 256) { 6953 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 6954 .addReg(NewVReg1) 6955 .addImm(LPadList.size())); 6956 } else { 6957 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6958 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 6959 .addImm(NumLPads & 0xFFFF)); 6960 6961 unsigned VReg2 = VReg1; 6962 if ((NumLPads & 0xFFFF0000) != 0) { 6963 VReg2 = MRI->createVirtualRegister(TRC); 6964 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 6965 .addReg(VReg1) 6966 .addImm(NumLPads >> 16)); 6967 } 6968 6969 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 6970 .addReg(NewVReg1) 6971 .addReg(VReg2)); 6972 } 6973 6974 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 6975 .addMBB(TrapBB) 6976 .addImm(ARMCC::HI) 6977 .addReg(ARM::CPSR); 6978 6979 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6980 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 6981 .addJumpTableIndex(MJTI) 6982 .addImm(UId)); 6983 6984 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6985 AddDefaultCC( 6986 AddDefaultPred( 6987 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 6988 .addReg(NewVReg3, RegState::Kill) 6989 .addReg(NewVReg1) 6990 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6991 6992 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 6993 .addReg(NewVReg4, RegState::Kill) 6994 .addReg(NewVReg1) 6995 .addJumpTableIndex(MJTI) 6996 .addImm(UId); 6997 } else if (Subtarget->isThumb()) { 6998 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6999 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 7000 .addFrameIndex(FI) 7001 .addImm(1) 7002 .addMemOperand(FIMMOLd)); 7003 7004 if (NumLPads < 256) { 7005 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 7006 .addReg(NewVReg1) 7007 .addImm(NumLPads)); 7008 } else { 7009 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7010 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7011 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7012 7013 // MachineConstantPool wants an explicit alignment. 7014 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 7015 if (Align == 0) 7016 Align = getDataLayout()->getTypeAllocSize(C->getType()); 7017 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7018 7019 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7020 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 7021 .addReg(VReg1, RegState::Define) 7022 .addConstantPoolIndex(Idx)); 7023 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 7024 .addReg(NewVReg1) 7025 .addReg(VReg1)); 7026 } 7027 7028 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 7029 .addMBB(TrapBB) 7030 .addImm(ARMCC::HI) 7031 .addReg(ARM::CPSR); 7032 7033 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 7034 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 7035 .addReg(ARM::CPSR, RegState::Define) 7036 .addReg(NewVReg1) 7037 .addImm(2)); 7038 7039 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7040 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 7041 .addJumpTableIndex(MJTI) 7042 .addImm(UId)); 7043 7044 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7045 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 7046 .addReg(ARM::CPSR, RegState::Define) 7047 .addReg(NewVReg2, RegState::Kill) 7048 .addReg(NewVReg3)); 7049 7050 MachineMemOperand *JTMMOLd = 7051 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 7052 MachineMemOperand::MOLoad, 4, 4); 7053 7054 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7055 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 7056 .addReg(NewVReg4, RegState::Kill) 7057 .addImm(0) 7058 .addMemOperand(JTMMOLd)); 7059 7060 unsigned NewVReg6 = NewVReg5; 7061 if (RelocM == Reloc::PIC_) { 7062 NewVReg6 = MRI->createVirtualRegister(TRC); 7063 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 7064 .addReg(ARM::CPSR, RegState::Define) 7065 .addReg(NewVReg5, RegState::Kill) 7066 .addReg(NewVReg3)); 7067 } 7068 7069 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 7070 .addReg(NewVReg6, RegState::Kill) 7071 .addJumpTableIndex(MJTI) 7072 .addImm(UId); 7073 } else { 7074 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 7075 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 7076 .addFrameIndex(FI) 7077 .addImm(4) 7078 .addMemOperand(FIMMOLd)); 7079 7080 if (NumLPads < 256) { 7081 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 7082 .addReg(NewVReg1) 7083 .addImm(NumLPads)); 7084 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 7085 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7086 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 7087 .addImm(NumLPads & 0xFFFF)); 7088 7089 unsigned VReg2 = VReg1; 7090 if ((NumLPads & 0xFFFF0000) != 0) { 7091 VReg2 = MRI->createVirtualRegister(TRC); 7092 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 7093 .addReg(VReg1) 7094 .addImm(NumLPads >> 16)); 7095 } 7096 7097 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7098 .addReg(NewVReg1) 7099 .addReg(VReg2)); 7100 } else { 7101 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7102 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7103 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 7104 7105 // MachineConstantPool wants an explicit alignment. 7106 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 7107 if (Align == 0) 7108 Align = getDataLayout()->getTypeAllocSize(C->getType()); 7109 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7110 7111 unsigned VReg1 = MRI->createVirtualRegister(TRC); 7112 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 7113 .addReg(VReg1, RegState::Define) 7114 .addConstantPoolIndex(Idx) 7115 .addImm(0)); 7116 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 7117 .addReg(NewVReg1) 7118 .addReg(VReg1, RegState::Kill)); 7119 } 7120 7121 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 7122 .addMBB(TrapBB) 7123 .addImm(ARMCC::HI) 7124 .addReg(ARM::CPSR); 7125 7126 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 7127 AddDefaultCC( 7128 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 7129 .addReg(NewVReg1) 7130 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 7131 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 7132 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 7133 .addJumpTableIndex(MJTI) 7134 .addImm(UId)); 7135 7136 MachineMemOperand *JTMMOLd = 7137 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 7138 MachineMemOperand::MOLoad, 4, 4); 7139 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 7140 AddDefaultPred( 7141 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 7142 .addReg(NewVReg3, RegState::Kill) 7143 .addReg(NewVReg4) 7144 .addImm(0) 7145 .addMemOperand(JTMMOLd)); 7146 7147 if (RelocM == Reloc::PIC_) { 7148 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 7149 .addReg(NewVReg5, RegState::Kill) 7150 .addReg(NewVReg4) 7151 .addJumpTableIndex(MJTI) 7152 .addImm(UId); 7153 } else { 7154 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 7155 .addReg(NewVReg5, RegState::Kill) 7156 .addJumpTableIndex(MJTI) 7157 .addImm(UId); 7158 } 7159 } 7160 7161 // Add the jump table entries as successors to the MBB. 7162 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 7163 for (std::vector<MachineBasicBlock*>::iterator 7164 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 7165 MachineBasicBlock *CurMBB = *I; 7166 if (SeenMBBs.insert(CurMBB)) 7167 DispContBB->addSuccessor(CurMBB); 7168 } 7169 7170 // N.B. the order the invoke BBs are processed in doesn't matter here. 7171 const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF); 7172 SmallVector<MachineBasicBlock*, 64> MBBLPads; 7173 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 7174 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 7175 MachineBasicBlock *BB = *I; 7176 7177 // Remove the landing pad successor from the invoke block and replace it 7178 // with the new dispatch block. 7179 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 7180 BB->succ_end()); 7181 while (!Successors.empty()) { 7182 MachineBasicBlock *SMBB = Successors.pop_back_val(); 7183 if (SMBB->isLandingPad()) { 7184 BB->removeSuccessor(SMBB); 7185 MBBLPads.push_back(SMBB); 7186 } 7187 } 7188 7189 BB->addSuccessor(DispatchBB); 7190 7191 // Find the invoke call and mark all of the callee-saved registers as 7192 // 'implicit defined' so that they're spilled. This prevents code from 7193 // moving instructions to before the EH block, where they will never be 7194 // executed. 7195 for (MachineBasicBlock::reverse_iterator 7196 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 7197 if (!II->isCall()) continue; 7198 7199 DenseMap<unsigned, bool> DefRegs; 7200 for (MachineInstr::mop_iterator 7201 OI = II->operands_begin(), OE = II->operands_end(); 7202 OI != OE; ++OI) { 7203 if (!OI->isReg()) continue; 7204 DefRegs[OI->getReg()] = true; 7205 } 7206 7207 MachineInstrBuilder MIB(*MF, &*II); 7208 7209 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 7210 unsigned Reg = SavedRegs[i]; 7211 if (Subtarget->isThumb2() && 7212 !ARM::tGPRRegClass.contains(Reg) && 7213 !ARM::hGPRRegClass.contains(Reg)) 7214 continue; 7215 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 7216 continue; 7217 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 7218 continue; 7219 if (!DefRegs[Reg]) 7220 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 7221 } 7222 7223 break; 7224 } 7225 } 7226 7227 // Mark all former landing pads as non-landing pads. The dispatch is the only 7228 // landing pad now. 7229 for (SmallVectorImpl<MachineBasicBlock*>::iterator 7230 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 7231 (*I)->setIsLandingPad(false); 7232 7233 // The instruction is gone now. 7234 MI->eraseFromParent(); 7235 7236 return MBB; 7237 } 7238 7239 static 7240 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 7241 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 7242 E = MBB->succ_end(); I != E; ++I) 7243 if (*I != Succ) 7244 return *I; 7245 llvm_unreachable("Expecting a BB with two successors!"); 7246 } 7247 7248 MachineBasicBlock *ARMTargetLowering:: 7249 EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { 7250 // This pseudo instruction has 3 operands: dst, src, size 7251 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 7252 // Otherwise, we will generate unrolled scalar copies. 7253 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 7254 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7255 MachineFunction::iterator It = BB; 7256 ++It; 7257 7258 unsigned dest = MI->getOperand(0).getReg(); 7259 unsigned src = MI->getOperand(1).getReg(); 7260 unsigned SizeVal = MI->getOperand(2).getImm(); 7261 unsigned Align = MI->getOperand(3).getImm(); 7262 DebugLoc dl = MI->getDebugLoc(); 7263 7264 bool isThumb2 = Subtarget->isThumb2(); 7265 MachineFunction *MF = BB->getParent(); 7266 MachineRegisterInfo &MRI = MF->getRegInfo(); 7267 unsigned ldrOpc, strOpc, UnitSize = 0; 7268 7269 const TargetRegisterClass *TRC = isThumb2 ? 7270 (const TargetRegisterClass*)&ARM::tGPRRegClass : 7271 (const TargetRegisterClass*)&ARM::GPRRegClass; 7272 const TargetRegisterClass *TRC_Vec = 0; 7273 7274 if (Align & 1) { 7275 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 7276 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 7277 UnitSize = 1; 7278 } else if (Align & 2) { 7279 ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST; 7280 strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST; 7281 UnitSize = 2; 7282 } else { 7283 // Check whether we can use NEON instructions. 7284 if (!MF->getFunction()->getAttributes(). 7285 hasAttribute(AttributeSet::FunctionIndex, 7286 Attribute::NoImplicitFloat) && 7287 Subtarget->hasNEON()) { 7288 if ((Align % 16 == 0) && SizeVal >= 16) { 7289 ldrOpc = ARM::VLD1q32wb_fixed; 7290 strOpc = ARM::VST1q32wb_fixed; 7291 UnitSize = 16; 7292 TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass; 7293 } 7294 else if ((Align % 8 == 0) && SizeVal >= 8) { 7295 ldrOpc = ARM::VLD1d32wb_fixed; 7296 strOpc = ARM::VST1d32wb_fixed; 7297 UnitSize = 8; 7298 TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass; 7299 } 7300 } 7301 // Can't use NEON instructions. 7302 if (UnitSize == 0) { 7303 ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 7304 strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM; 7305 UnitSize = 4; 7306 } 7307 } 7308 7309 unsigned BytesLeft = SizeVal % UnitSize; 7310 unsigned LoopSize = SizeVal - BytesLeft; 7311 7312 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 7313 // Use LDR and STR to copy. 7314 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 7315 // [destOut] = STR_POST(scratch, destIn, UnitSize) 7316 unsigned srcIn = src; 7317 unsigned destIn = dest; 7318 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 7319 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 7320 unsigned srcOut = MRI.createVirtualRegister(TRC); 7321 unsigned destOut = MRI.createVirtualRegister(TRC); 7322 if (UnitSize >= 8) { 7323 AddDefaultPred(BuildMI(*BB, MI, dl, 7324 TII->get(ldrOpc), scratch) 7325 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0)); 7326 7327 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 7328 .addReg(destIn).addImm(0).addReg(scratch)); 7329 } else if (isThumb2) { 7330 AddDefaultPred(BuildMI(*BB, MI, dl, 7331 TII->get(ldrOpc), scratch) 7332 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize)); 7333 7334 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 7335 .addReg(scratch).addReg(destIn) 7336 .addImm(UnitSize)); 7337 } else { 7338 AddDefaultPred(BuildMI(*BB, MI, dl, 7339 TII->get(ldrOpc), scratch) 7340 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0) 7341 .addImm(UnitSize)); 7342 7343 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 7344 .addReg(scratch).addReg(destIn) 7345 .addReg(0).addImm(UnitSize)); 7346 } 7347 srcIn = srcOut; 7348 destIn = destOut; 7349 } 7350 7351 // Handle the leftover bytes with LDRB and STRB. 7352 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 7353 // [destOut] = STRB_POST(scratch, destIn, 1) 7354 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 7355 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 7356 for (unsigned i = 0; i < BytesLeft; i++) { 7357 unsigned scratch = MRI.createVirtualRegister(TRC); 7358 unsigned srcOut = MRI.createVirtualRegister(TRC); 7359 unsigned destOut = MRI.createVirtualRegister(TRC); 7360 if (isThumb2) { 7361 AddDefaultPred(BuildMI(*BB, MI, dl, 7362 TII->get(ldrOpc),scratch) 7363 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 7364 7365 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 7366 .addReg(scratch).addReg(destIn) 7367 .addReg(0).addImm(1)); 7368 } else { 7369 AddDefaultPred(BuildMI(*BB, MI, dl, 7370 TII->get(ldrOpc),scratch) 7371 .addReg(srcOut, RegState::Define).addReg(srcIn) 7372 .addReg(0).addImm(1)); 7373 7374 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 7375 .addReg(scratch).addReg(destIn) 7376 .addReg(0).addImm(1)); 7377 } 7378 srcIn = srcOut; 7379 destIn = destOut; 7380 } 7381 MI->eraseFromParent(); // The instruction is gone now. 7382 return BB; 7383 } 7384 7385 // Expand the pseudo op to a loop. 7386 // thisMBB: 7387 // ... 7388 // movw varEnd, # --> with thumb2 7389 // movt varEnd, # 7390 // ldrcp varEnd, idx --> without thumb2 7391 // fallthrough --> loopMBB 7392 // loopMBB: 7393 // PHI varPhi, varEnd, varLoop 7394 // PHI srcPhi, src, srcLoop 7395 // PHI destPhi, dst, destLoop 7396 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7397 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 7398 // subs varLoop, varPhi, #UnitSize 7399 // bne loopMBB 7400 // fallthrough --> exitMBB 7401 // exitMBB: 7402 // epilogue to handle left-over bytes 7403 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7404 // [destOut] = STRB_POST(scratch, destLoop, 1) 7405 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7406 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7407 MF->insert(It, loopMBB); 7408 MF->insert(It, exitMBB); 7409 7410 // Transfer the remainder of BB and its successor edges to exitMBB. 7411 exitMBB->splice(exitMBB->begin(), BB, 7412 llvm::next(MachineBasicBlock::iterator(MI)), 7413 BB->end()); 7414 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7415 7416 // Load an immediate to varEnd. 7417 unsigned varEnd = MRI.createVirtualRegister(TRC); 7418 if (isThumb2) { 7419 unsigned VReg1 = varEnd; 7420 if ((LoopSize & 0xFFFF0000) != 0) 7421 VReg1 = MRI.createVirtualRegister(TRC); 7422 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1) 7423 .addImm(LoopSize & 0xFFFF)); 7424 7425 if ((LoopSize & 0xFFFF0000) != 0) 7426 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd) 7427 .addReg(VReg1) 7428 .addImm(LoopSize >> 16)); 7429 } else { 7430 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7431 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7432 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 7433 7434 // MachineConstantPool wants an explicit alignment. 7435 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 7436 if (Align == 0) 7437 Align = getDataLayout()->getTypeAllocSize(C->getType()); 7438 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7439 7440 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp)) 7441 .addReg(varEnd, RegState::Define) 7442 .addConstantPoolIndex(Idx) 7443 .addImm(0)); 7444 } 7445 BB->addSuccessor(loopMBB); 7446 7447 // Generate the loop body: 7448 // varPhi = PHI(varLoop, varEnd) 7449 // srcPhi = PHI(srcLoop, src) 7450 // destPhi = PHI(destLoop, dst) 7451 MachineBasicBlock *entryBB = BB; 7452 BB = loopMBB; 7453 unsigned varLoop = MRI.createVirtualRegister(TRC); 7454 unsigned varPhi = MRI.createVirtualRegister(TRC); 7455 unsigned srcLoop = MRI.createVirtualRegister(TRC); 7456 unsigned srcPhi = MRI.createVirtualRegister(TRC); 7457 unsigned destLoop = MRI.createVirtualRegister(TRC); 7458 unsigned destPhi = MRI.createVirtualRegister(TRC); 7459 7460 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 7461 .addReg(varLoop).addMBB(loopMBB) 7462 .addReg(varEnd).addMBB(entryBB); 7463 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 7464 .addReg(srcLoop).addMBB(loopMBB) 7465 .addReg(src).addMBB(entryBB); 7466 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 7467 .addReg(destLoop).addMBB(loopMBB) 7468 .addReg(dest).addMBB(entryBB); 7469 7470 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7471 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 7472 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 7473 if (UnitSize >= 8) { 7474 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7475 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0)); 7476 7477 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7478 .addReg(destPhi).addImm(0).addReg(scratch)); 7479 } else if (isThumb2) { 7480 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7481 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize)); 7482 7483 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7484 .addReg(scratch).addReg(destPhi) 7485 .addImm(UnitSize)); 7486 } else { 7487 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7488 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0) 7489 .addImm(UnitSize)); 7490 7491 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7492 .addReg(scratch).addReg(destPhi) 7493 .addReg(0).addImm(UnitSize)); 7494 } 7495 7496 // Decrement loop variable by UnitSize. 7497 MachineInstrBuilder MIB = BuildMI(BB, dl, 7498 TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 7499 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 7500 MIB->getOperand(5).setReg(ARM::CPSR); 7501 MIB->getOperand(5).setIsDef(true); 7502 7503 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7504 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 7505 7506 // loopMBB can loop back to loopMBB or fall through to exitMBB. 7507 BB->addSuccessor(loopMBB); 7508 BB->addSuccessor(exitMBB); 7509 7510 // Add epilogue to handle BytesLeft. 7511 BB = exitMBB; 7512 MachineInstr *StartOfExit = exitMBB->begin(); 7513 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 7514 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 7515 7516 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7517 // [destOut] = STRB_POST(scratch, destLoop, 1) 7518 unsigned srcIn = srcLoop; 7519 unsigned destIn = destLoop; 7520 for (unsigned i = 0; i < BytesLeft; i++) { 7521 unsigned scratch = MRI.createVirtualRegister(TRC); 7522 unsigned srcOut = MRI.createVirtualRegister(TRC); 7523 unsigned destOut = MRI.createVirtualRegister(TRC); 7524 if (isThumb2) { 7525 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 7526 TII->get(ldrOpc),scratch) 7527 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 7528 7529 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 7530 .addReg(scratch).addReg(destIn) 7531 .addImm(1)); 7532 } else { 7533 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 7534 TII->get(ldrOpc),scratch) 7535 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1)); 7536 7537 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 7538 .addReg(scratch).addReg(destIn) 7539 .addReg(0).addImm(1)); 7540 } 7541 srcIn = srcOut; 7542 destIn = destOut; 7543 } 7544 7545 MI->eraseFromParent(); // The instruction is gone now. 7546 return BB; 7547 } 7548 7549 MachineBasicBlock * 7550 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 7551 MachineBasicBlock *BB) const { 7552 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 7553 DebugLoc dl = MI->getDebugLoc(); 7554 bool isThumb2 = Subtarget->isThumb2(); 7555 switch (MI->getOpcode()) { 7556 default: { 7557 MI->dump(); 7558 llvm_unreachable("Unexpected instr type to insert"); 7559 } 7560 // The Thumb2 pre-indexed stores have the same MI operands, they just 7561 // define them differently in the .td files from the isel patterns, so 7562 // they need pseudos. 7563 case ARM::t2STR_preidx: 7564 MI->setDesc(TII->get(ARM::t2STR_PRE)); 7565 return BB; 7566 case ARM::t2STRB_preidx: 7567 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 7568 return BB; 7569 case ARM::t2STRH_preidx: 7570 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 7571 return BB; 7572 7573 case ARM::STRi_preidx: 7574 case ARM::STRBi_preidx: { 7575 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 7576 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 7577 // Decode the offset. 7578 unsigned Offset = MI->getOperand(4).getImm(); 7579 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 7580 Offset = ARM_AM::getAM2Offset(Offset); 7581 if (isSub) 7582 Offset = -Offset; 7583 7584 MachineMemOperand *MMO = *MI->memoperands_begin(); 7585 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 7586 .addOperand(MI->getOperand(0)) // Rn_wb 7587 .addOperand(MI->getOperand(1)) // Rt 7588 .addOperand(MI->getOperand(2)) // Rn 7589 .addImm(Offset) // offset (skip GPR==zero_reg) 7590 .addOperand(MI->getOperand(5)) // pred 7591 .addOperand(MI->getOperand(6)) 7592 .addMemOperand(MMO); 7593 MI->eraseFromParent(); 7594 return BB; 7595 } 7596 case ARM::STRr_preidx: 7597 case ARM::STRBr_preidx: 7598 case ARM::STRH_preidx: { 7599 unsigned NewOpc; 7600 switch (MI->getOpcode()) { 7601 default: llvm_unreachable("unexpected opcode!"); 7602 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 7603 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 7604 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 7605 } 7606 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 7607 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 7608 MIB.addOperand(MI->getOperand(i)); 7609 MI->eraseFromParent(); 7610 return BB; 7611 } 7612 case ARM::ATOMIC_LOAD_ADD_I8: 7613 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7614 case ARM::ATOMIC_LOAD_ADD_I16: 7615 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7616 case ARM::ATOMIC_LOAD_ADD_I32: 7617 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7618 7619 case ARM::ATOMIC_LOAD_AND_I8: 7620 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7621 case ARM::ATOMIC_LOAD_AND_I16: 7622 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7623 case ARM::ATOMIC_LOAD_AND_I32: 7624 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7625 7626 case ARM::ATOMIC_LOAD_OR_I8: 7627 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7628 case ARM::ATOMIC_LOAD_OR_I16: 7629 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7630 case ARM::ATOMIC_LOAD_OR_I32: 7631 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7632 7633 case ARM::ATOMIC_LOAD_XOR_I8: 7634 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7635 case ARM::ATOMIC_LOAD_XOR_I16: 7636 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7637 case ARM::ATOMIC_LOAD_XOR_I32: 7638 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7639 7640 case ARM::ATOMIC_LOAD_NAND_I8: 7641 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7642 case ARM::ATOMIC_LOAD_NAND_I16: 7643 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7644 case ARM::ATOMIC_LOAD_NAND_I32: 7645 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7646 7647 case ARM::ATOMIC_LOAD_SUB_I8: 7648 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7649 case ARM::ATOMIC_LOAD_SUB_I16: 7650 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7651 case ARM::ATOMIC_LOAD_SUB_I32: 7652 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7653 7654 case ARM::ATOMIC_LOAD_MIN_I8: 7655 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 7656 case ARM::ATOMIC_LOAD_MIN_I16: 7657 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 7658 case ARM::ATOMIC_LOAD_MIN_I32: 7659 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 7660 7661 case ARM::ATOMIC_LOAD_MAX_I8: 7662 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 7663 case ARM::ATOMIC_LOAD_MAX_I16: 7664 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 7665 case ARM::ATOMIC_LOAD_MAX_I32: 7666 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 7667 7668 case ARM::ATOMIC_LOAD_UMIN_I8: 7669 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 7670 case ARM::ATOMIC_LOAD_UMIN_I16: 7671 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 7672 case ARM::ATOMIC_LOAD_UMIN_I32: 7673 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 7674 7675 case ARM::ATOMIC_LOAD_UMAX_I8: 7676 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 7677 case ARM::ATOMIC_LOAD_UMAX_I16: 7678 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 7679 case ARM::ATOMIC_LOAD_UMAX_I32: 7680 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 7681 7682 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 7683 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 7684 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 7685 7686 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 7687 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 7688 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 7689 7690 case ARM::ATOMIC_LOAD_I64: 7691 return EmitAtomicLoad64(MI, BB); 7692 7693 case ARM::ATOMIC_LOAD_ADD_I64: 7694 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 7695 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 7696 /*NeedsCarry*/ true); 7697 case ARM::ATOMIC_LOAD_SUB_I64: 7698 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7699 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7700 /*NeedsCarry*/ true); 7701 case ARM::ATOMIC_LOAD_OR_I64: 7702 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 7703 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7704 case ARM::ATOMIC_LOAD_XOR_I64: 7705 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 7706 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7707 case ARM::ATOMIC_LOAD_AND_I64: 7708 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 7709 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7710 case ARM::ATOMIC_STORE_I64: 7711 case ARM::ATOMIC_SWAP_I64: 7712 return EmitAtomicBinary64(MI, BB, 0, 0, false); 7713 case ARM::ATOMIC_CMP_SWAP_I64: 7714 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7715 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7716 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 7717 case ARM::ATOMIC_LOAD_MIN_I64: 7718 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7719 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7720 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7721 /*IsMinMax*/ true, ARMCC::LT); 7722 case ARM::ATOMIC_LOAD_MAX_I64: 7723 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7724 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7725 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7726 /*IsMinMax*/ true, ARMCC::GE); 7727 case ARM::ATOMIC_LOAD_UMIN_I64: 7728 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7729 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7730 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7731 /*IsMinMax*/ true, ARMCC::LO); 7732 case ARM::ATOMIC_LOAD_UMAX_I64: 7733 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7734 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7735 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7736 /*IsMinMax*/ true, ARMCC::HS); 7737 7738 case ARM::tMOVCCr_pseudo: { 7739 // To "insert" a SELECT_CC instruction, we actually have to insert the 7740 // diamond control-flow pattern. The incoming instruction knows the 7741 // destination vreg to set, the condition code register to branch on, the 7742 // true/false values to select between, and a branch opcode to use. 7743 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7744 MachineFunction::iterator It = BB; 7745 ++It; 7746 7747 // thisMBB: 7748 // ... 7749 // TrueVal = ... 7750 // cmpTY ccX, r1, r2 7751 // bCC copy1MBB 7752 // fallthrough --> copy0MBB 7753 MachineBasicBlock *thisMBB = BB; 7754 MachineFunction *F = BB->getParent(); 7755 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 7756 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 7757 F->insert(It, copy0MBB); 7758 F->insert(It, sinkMBB); 7759 7760 // Transfer the remainder of BB and its successor edges to sinkMBB. 7761 sinkMBB->splice(sinkMBB->begin(), BB, 7762 llvm::next(MachineBasicBlock::iterator(MI)), 7763 BB->end()); 7764 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 7765 7766 BB->addSuccessor(copy0MBB); 7767 BB->addSuccessor(sinkMBB); 7768 7769 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 7770 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 7771 7772 // copy0MBB: 7773 // %FalseValue = ... 7774 // # fallthrough to sinkMBB 7775 BB = copy0MBB; 7776 7777 // Update machine-CFG edges 7778 BB->addSuccessor(sinkMBB); 7779 7780 // sinkMBB: 7781 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 7782 // ... 7783 BB = sinkMBB; 7784 BuildMI(*BB, BB->begin(), dl, 7785 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 7786 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 7787 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 7788 7789 MI->eraseFromParent(); // The pseudo instruction is gone now. 7790 return BB; 7791 } 7792 7793 case ARM::BCCi64: 7794 case ARM::BCCZi64: { 7795 // If there is an unconditional branch to the other successor, remove it. 7796 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 7797 7798 // Compare both parts that make up the double comparison separately for 7799 // equality. 7800 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 7801 7802 unsigned LHS1 = MI->getOperand(1).getReg(); 7803 unsigned LHS2 = MI->getOperand(2).getReg(); 7804 if (RHSisZero) { 7805 AddDefaultPred(BuildMI(BB, dl, 7806 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7807 .addReg(LHS1).addImm(0)); 7808 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7809 .addReg(LHS2).addImm(0) 7810 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7811 } else { 7812 unsigned RHS1 = MI->getOperand(3).getReg(); 7813 unsigned RHS2 = MI->getOperand(4).getReg(); 7814 AddDefaultPred(BuildMI(BB, dl, 7815 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7816 .addReg(LHS1).addReg(RHS1)); 7817 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7818 .addReg(LHS2).addReg(RHS2) 7819 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7820 } 7821 7822 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 7823 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 7824 if (MI->getOperand(0).getImm() == ARMCC::NE) 7825 std::swap(destMBB, exitMBB); 7826 7827 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7828 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 7829 if (isThumb2) 7830 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 7831 else 7832 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 7833 7834 MI->eraseFromParent(); // The pseudo instruction is gone now. 7835 return BB; 7836 } 7837 7838 case ARM::Int_eh_sjlj_setjmp: 7839 case ARM::Int_eh_sjlj_setjmp_nofp: 7840 case ARM::tInt_eh_sjlj_setjmp: 7841 case ARM::t2Int_eh_sjlj_setjmp: 7842 case ARM::t2Int_eh_sjlj_setjmp_nofp: 7843 EmitSjLjDispatchBlock(MI, BB); 7844 return BB; 7845 7846 case ARM::ABS: 7847 case ARM::t2ABS: { 7848 // To insert an ABS instruction, we have to insert the 7849 // diamond control-flow pattern. The incoming instruction knows the 7850 // source vreg to test against 0, the destination vreg to set, 7851 // the condition code register to branch on, the 7852 // true/false values to select between, and a branch opcode to use. 7853 // It transforms 7854 // V1 = ABS V0 7855 // into 7856 // V2 = MOVS V0 7857 // BCC (branch to SinkBB if V0 >= 0) 7858 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 7859 // SinkBB: V1 = PHI(V2, V3) 7860 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7861 MachineFunction::iterator BBI = BB; 7862 ++BBI; 7863 MachineFunction *Fn = BB->getParent(); 7864 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7865 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7866 Fn->insert(BBI, RSBBB); 7867 Fn->insert(BBI, SinkBB); 7868 7869 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 7870 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 7871 bool isThumb2 = Subtarget->isThumb2(); 7872 MachineRegisterInfo &MRI = Fn->getRegInfo(); 7873 // In Thumb mode S must not be specified if source register is the SP or 7874 // PC and if destination register is the SP, so restrict register class 7875 unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? 7876 (const TargetRegisterClass*)&ARM::rGPRRegClass : 7877 (const TargetRegisterClass*)&ARM::GPRRegClass); 7878 7879 // Transfer the remainder of BB and its successor edges to sinkMBB. 7880 SinkBB->splice(SinkBB->begin(), BB, 7881 llvm::next(MachineBasicBlock::iterator(MI)), 7882 BB->end()); 7883 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 7884 7885 BB->addSuccessor(RSBBB); 7886 BB->addSuccessor(SinkBB); 7887 7888 // fall through to SinkMBB 7889 RSBBB->addSuccessor(SinkBB); 7890 7891 // insert a cmp at the end of BB 7892 AddDefaultPred(BuildMI(BB, dl, 7893 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7894 .addReg(ABSSrcReg).addImm(0)); 7895 7896 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 7897 BuildMI(BB, dl, 7898 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 7899 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 7900 7901 // insert rsbri in RSBBB 7902 // Note: BCC and rsbri will be converted into predicated rsbmi 7903 // by if-conversion pass 7904 BuildMI(*RSBBB, RSBBB->begin(), dl, 7905 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 7906 .addReg(ABSSrcReg, RegState::Kill) 7907 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 7908 7909 // insert PHI in SinkBB, 7910 // reuse ABSDstReg to not change uses of ABS instruction 7911 BuildMI(*SinkBB, SinkBB->begin(), dl, 7912 TII->get(ARM::PHI), ABSDstReg) 7913 .addReg(NewRsbDstReg).addMBB(RSBBB) 7914 .addReg(ABSSrcReg).addMBB(BB); 7915 7916 // remove ABS instruction 7917 MI->eraseFromParent(); 7918 7919 // return last added BB 7920 return SinkBB; 7921 } 7922 case ARM::COPY_STRUCT_BYVAL_I32: 7923 ++NumLoopByVals; 7924 return EmitStructByval(MI, BB); 7925 } 7926 } 7927 7928 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 7929 SDNode *Node) const { 7930 if (!MI->hasPostISelHook()) { 7931 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 7932 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 7933 return; 7934 } 7935 7936 const MCInstrDesc *MCID = &MI->getDesc(); 7937 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 7938 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 7939 // operand is still set to noreg. If needed, set the optional operand's 7940 // register to CPSR, and remove the redundant implicit def. 7941 // 7942 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 7943 7944 // Rename pseudo opcodes. 7945 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 7946 if (NewOpc) { 7947 const ARMBaseInstrInfo *TII = 7948 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 7949 MCID = &TII->get(NewOpc); 7950 7951 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 7952 "converted opcode should be the same except for cc_out"); 7953 7954 MI->setDesc(*MCID); 7955 7956 // Add the optional cc_out operand 7957 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 7958 } 7959 unsigned ccOutIdx = MCID->getNumOperands() - 1; 7960 7961 // Any ARM instruction that sets the 's' bit should specify an optional 7962 // "cc_out" operand in the last operand position. 7963 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 7964 assert(!NewOpc && "Optional cc_out operand required"); 7965 return; 7966 } 7967 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 7968 // since we already have an optional CPSR def. 7969 bool definesCPSR = false; 7970 bool deadCPSR = false; 7971 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 7972 i != e; ++i) { 7973 const MachineOperand &MO = MI->getOperand(i); 7974 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 7975 definesCPSR = true; 7976 if (MO.isDead()) 7977 deadCPSR = true; 7978 MI->RemoveOperand(i); 7979 break; 7980 } 7981 } 7982 if (!definesCPSR) { 7983 assert(!NewOpc && "Optional cc_out operand required"); 7984 return; 7985 } 7986 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 7987 if (deadCPSR) { 7988 assert(!MI->getOperand(ccOutIdx).getReg() && 7989 "expect uninitialized optional cc_out operand"); 7990 return; 7991 } 7992 7993 // If this instruction was defined with an optional CPSR def and its dag node 7994 // had a live implicit CPSR def, then activate the optional CPSR def. 7995 MachineOperand &MO = MI->getOperand(ccOutIdx); 7996 MO.setReg(ARM::CPSR); 7997 MO.setIsDef(true); 7998 } 7999 8000 //===----------------------------------------------------------------------===// 8001 // ARM Optimization Hooks 8002 //===----------------------------------------------------------------------===// 8003 8004 // Helper function that checks if N is a null or all ones constant. 8005 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 8006 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 8007 if (!C) 8008 return false; 8009 return AllOnes ? C->isAllOnesValue() : C->isNullValue(); 8010 } 8011 8012 // Return true if N is conditionally 0 or all ones. 8013 // Detects these expressions where cc is an i1 value: 8014 // 8015 // (select cc 0, y) [AllOnes=0] 8016 // (select cc y, 0) [AllOnes=0] 8017 // (zext cc) [AllOnes=0] 8018 // (sext cc) [AllOnes=0/1] 8019 // (select cc -1, y) [AllOnes=1] 8020 // (select cc y, -1) [AllOnes=1] 8021 // 8022 // Invert is set when N is the null/all ones constant when CC is false. 8023 // OtherOp is set to the alternative value of N. 8024 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 8025 SDValue &CC, bool &Invert, 8026 SDValue &OtherOp, 8027 SelectionDAG &DAG) { 8028 switch (N->getOpcode()) { 8029 default: return false; 8030 case ISD::SELECT: { 8031 CC = N->getOperand(0); 8032 SDValue N1 = N->getOperand(1); 8033 SDValue N2 = N->getOperand(2); 8034 if (isZeroOrAllOnes(N1, AllOnes)) { 8035 Invert = false; 8036 OtherOp = N2; 8037 return true; 8038 } 8039 if (isZeroOrAllOnes(N2, AllOnes)) { 8040 Invert = true; 8041 OtherOp = N1; 8042 return true; 8043 } 8044 return false; 8045 } 8046 case ISD::ZERO_EXTEND: 8047 // (zext cc) can never be the all ones value. 8048 if (AllOnes) 8049 return false; 8050 // Fall through. 8051 case ISD::SIGN_EXTEND: { 8052 EVT VT = N->getValueType(0); 8053 CC = N->getOperand(0); 8054 if (CC.getValueType() != MVT::i1) 8055 return false; 8056 Invert = !AllOnes; 8057 if (AllOnes) 8058 // When looking for an AllOnes constant, N is an sext, and the 'other' 8059 // value is 0. 8060 OtherOp = DAG.getConstant(0, VT); 8061 else if (N->getOpcode() == ISD::ZERO_EXTEND) 8062 // When looking for a 0 constant, N can be zext or sext. 8063 OtherOp = DAG.getConstant(1, VT); 8064 else 8065 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 8066 return true; 8067 } 8068 } 8069 } 8070 8071 // Combine a constant select operand into its use: 8072 // 8073 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8074 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8075 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 8076 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 8077 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8078 // 8079 // The transform is rejected if the select doesn't have a constant operand that 8080 // is null, or all ones when AllOnes is set. 8081 // 8082 // Also recognize sext/zext from i1: 8083 // 8084 // (add (zext cc), x) -> (select cc (add x, 1), x) 8085 // (add (sext cc), x) -> (select cc (add x, -1), x) 8086 // 8087 // These transformations eventually create predicated instructions. 8088 // 8089 // @param N The node to transform. 8090 // @param Slct The N operand that is a select. 8091 // @param OtherOp The other N operand (x above). 8092 // @param DCI Context. 8093 // @param AllOnes Require the select constant to be all ones instead of null. 8094 // @returns The new node, or SDValue() on failure. 8095 static 8096 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 8097 TargetLowering::DAGCombinerInfo &DCI, 8098 bool AllOnes = false) { 8099 SelectionDAG &DAG = DCI.DAG; 8100 EVT VT = N->getValueType(0); 8101 SDValue NonConstantVal; 8102 SDValue CCOp; 8103 bool SwapSelectOps; 8104 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 8105 NonConstantVal, DAG)) 8106 return SDValue(); 8107 8108 // Slct is now know to be the desired identity constant when CC is true. 8109 SDValue TrueVal = OtherOp; 8110 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 8111 OtherOp, NonConstantVal); 8112 // Unless SwapSelectOps says CC should be false. 8113 if (SwapSelectOps) 8114 std::swap(TrueVal, FalseVal); 8115 8116 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 8117 CCOp, TrueVal, FalseVal); 8118 } 8119 8120 // Attempt combineSelectAndUse on each operand of a commutative operator N. 8121 static 8122 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 8123 TargetLowering::DAGCombinerInfo &DCI) { 8124 SDValue N0 = N->getOperand(0); 8125 SDValue N1 = N->getOperand(1); 8126 if (N0.getNode()->hasOneUse()) { 8127 SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); 8128 if (Result.getNode()) 8129 return Result; 8130 } 8131 if (N1.getNode()->hasOneUse()) { 8132 SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); 8133 if (Result.getNode()) 8134 return Result; 8135 } 8136 return SDValue(); 8137 } 8138 8139 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 8140 // (only after legalization). 8141 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 8142 TargetLowering::DAGCombinerInfo &DCI, 8143 const ARMSubtarget *Subtarget) { 8144 8145 // Only perform optimization if after legalize, and if NEON is available. We 8146 // also expected both operands to be BUILD_VECTORs. 8147 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 8148 || N0.getOpcode() != ISD::BUILD_VECTOR 8149 || N1.getOpcode() != ISD::BUILD_VECTOR) 8150 return SDValue(); 8151 8152 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 8153 EVT VT = N->getValueType(0); 8154 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 8155 return SDValue(); 8156 8157 // Check that the vector operands are of the right form. 8158 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 8159 // operands, where N is the size of the formed vector. 8160 // Each EXTRACT_VECTOR should have the same input vector and odd or even 8161 // index such that we have a pair wise add pattern. 8162 8163 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 8164 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8165 return SDValue(); 8166 SDValue Vec = N0->getOperand(0)->getOperand(0); 8167 SDNode *V = Vec.getNode(); 8168 unsigned nextIndex = 0; 8169 8170 // For each operands to the ADD which are BUILD_VECTORs, 8171 // check to see if each of their operands are an EXTRACT_VECTOR with 8172 // the same vector and appropriate index. 8173 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 8174 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 8175 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 8176 8177 SDValue ExtVec0 = N0->getOperand(i); 8178 SDValue ExtVec1 = N1->getOperand(i); 8179 8180 // First operand is the vector, verify its the same. 8181 if (V != ExtVec0->getOperand(0).getNode() || 8182 V != ExtVec1->getOperand(0).getNode()) 8183 return SDValue(); 8184 8185 // Second is the constant, verify its correct. 8186 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 8187 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 8188 8189 // For the constant, we want to see all the even or all the odd. 8190 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 8191 || C1->getZExtValue() != nextIndex+1) 8192 return SDValue(); 8193 8194 // Increment index. 8195 nextIndex+=2; 8196 } else 8197 return SDValue(); 8198 } 8199 8200 // Create VPADDL node. 8201 SelectionDAG &DAG = DCI.DAG; 8202 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8203 8204 // Build operand list. 8205 SmallVector<SDValue, 8> Ops; 8206 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 8207 TLI.getPointerTy())); 8208 8209 // Input is the vector. 8210 Ops.push_back(Vec); 8211 8212 // Get widened type and narrowed type. 8213 MVT widenType; 8214 unsigned numElem = VT.getVectorNumElements(); 8215 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 8216 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 8217 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 8218 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 8219 default: 8220 llvm_unreachable("Invalid vector element type for padd optimization."); 8221 } 8222 8223 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), 8224 widenType, &Ops[0], Ops.size()); 8225 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, tmp); 8226 } 8227 8228 static SDValue findMUL_LOHI(SDValue V) { 8229 if (V->getOpcode() == ISD::UMUL_LOHI || 8230 V->getOpcode() == ISD::SMUL_LOHI) 8231 return V; 8232 return SDValue(); 8233 } 8234 8235 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 8236 TargetLowering::DAGCombinerInfo &DCI, 8237 const ARMSubtarget *Subtarget) { 8238 8239 if (Subtarget->isThumb1Only()) return SDValue(); 8240 8241 // Only perform the checks after legalize when the pattern is available. 8242 if (DCI.isBeforeLegalize()) return SDValue(); 8243 8244 // Look for multiply add opportunities. 8245 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 8246 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 8247 // a glue link from the first add to the second add. 8248 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 8249 // a S/UMLAL instruction. 8250 // loAdd UMUL_LOHI 8251 // \ / :lo \ :hi 8252 // \ / \ [no multiline comment] 8253 // ADDC | hiAdd 8254 // \ :glue / / 8255 // \ / / 8256 // ADDE 8257 // 8258 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 8259 SDValue AddcOp0 = AddcNode->getOperand(0); 8260 SDValue AddcOp1 = AddcNode->getOperand(1); 8261 8262 // Check if the two operands are from the same mul_lohi node. 8263 if (AddcOp0.getNode() == AddcOp1.getNode()) 8264 return SDValue(); 8265 8266 assert(AddcNode->getNumValues() == 2 && 8267 AddcNode->getValueType(0) == MVT::i32 && 8268 "Expect ADDC with two result values. First: i32"); 8269 8270 // Check that we have a glued ADDC node. 8271 if (AddcNode->getValueType(1) != MVT::Glue) 8272 return SDValue(); 8273 8274 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 8275 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 8276 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 8277 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 8278 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 8279 return SDValue(); 8280 8281 // Look for the glued ADDE. 8282 SDNode* AddeNode = AddcNode->getGluedUser(); 8283 if (AddeNode == NULL) 8284 return SDValue(); 8285 8286 // Make sure it is really an ADDE. 8287 if (AddeNode->getOpcode() != ISD::ADDE) 8288 return SDValue(); 8289 8290 assert(AddeNode->getNumOperands() == 3 && 8291 AddeNode->getOperand(2).getValueType() == MVT::Glue && 8292 "ADDE node has the wrong inputs"); 8293 8294 // Check for the triangle shape. 8295 SDValue AddeOp0 = AddeNode->getOperand(0); 8296 SDValue AddeOp1 = AddeNode->getOperand(1); 8297 8298 // Make sure that the ADDE operands are not coming from the same node. 8299 if (AddeOp0.getNode() == AddeOp1.getNode()) 8300 return SDValue(); 8301 8302 // Find the MUL_LOHI node walking up ADDE's operands. 8303 bool IsLeftOperandMUL = false; 8304 SDValue MULOp = findMUL_LOHI(AddeOp0); 8305 if (MULOp == SDValue()) 8306 MULOp = findMUL_LOHI(AddeOp1); 8307 else 8308 IsLeftOperandMUL = true; 8309 if (MULOp == SDValue()) 8310 return SDValue(); 8311 8312 // Figure out the right opcode. 8313 unsigned Opc = MULOp->getOpcode(); 8314 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 8315 8316 // Figure out the high and low input values to the MLAL node. 8317 SDValue* HiMul = &MULOp; 8318 SDValue* HiAdd = NULL; 8319 SDValue* LoMul = NULL; 8320 SDValue* LowAdd = NULL; 8321 8322 if (IsLeftOperandMUL) 8323 HiAdd = &AddeOp1; 8324 else 8325 HiAdd = &AddeOp0; 8326 8327 8328 if (AddcOp0->getOpcode() == Opc) { 8329 LoMul = &AddcOp0; 8330 LowAdd = &AddcOp1; 8331 } 8332 if (AddcOp1->getOpcode() == Opc) { 8333 LoMul = &AddcOp1; 8334 LowAdd = &AddcOp0; 8335 } 8336 8337 if (LoMul == NULL) 8338 return SDValue(); 8339 8340 if (LoMul->getNode() != HiMul->getNode()) 8341 return SDValue(); 8342 8343 // Create the merged node. 8344 SelectionDAG &DAG = DCI.DAG; 8345 8346 // Build operand list. 8347 SmallVector<SDValue, 8> Ops; 8348 Ops.push_back(LoMul->getOperand(0)); 8349 Ops.push_back(LoMul->getOperand(1)); 8350 Ops.push_back(*LowAdd); 8351 Ops.push_back(*HiAdd); 8352 8353 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), 8354 DAG.getVTList(MVT::i32, MVT::i32), 8355 &Ops[0], Ops.size()); 8356 8357 // Replace the ADDs' nodes uses by the MLA node's values. 8358 SDValue HiMLALResult(MLALNode.getNode(), 1); 8359 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 8360 8361 SDValue LoMLALResult(MLALNode.getNode(), 0); 8362 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 8363 8364 // Return original node to notify the driver to stop replacing. 8365 SDValue resNode(AddcNode, 0); 8366 return resNode; 8367 } 8368 8369 /// PerformADDCCombine - Target-specific dag combine transform from 8370 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 8371 static SDValue PerformADDCCombine(SDNode *N, 8372 TargetLowering::DAGCombinerInfo &DCI, 8373 const ARMSubtarget *Subtarget) { 8374 8375 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 8376 8377 } 8378 8379 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 8380 /// operands N0 and N1. This is a helper for PerformADDCombine that is 8381 /// called with the default operands, and if that fails, with commuted 8382 /// operands. 8383 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 8384 TargetLowering::DAGCombinerInfo &DCI, 8385 const ARMSubtarget *Subtarget){ 8386 8387 // Attempt to create vpaddl for this add. 8388 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 8389 if (Result.getNode()) 8390 return Result; 8391 8392 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 8393 if (N0.getNode()->hasOneUse()) { 8394 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 8395 if (Result.getNode()) return Result; 8396 } 8397 return SDValue(); 8398 } 8399 8400 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 8401 /// 8402 static SDValue PerformADDCombine(SDNode *N, 8403 TargetLowering::DAGCombinerInfo &DCI, 8404 const ARMSubtarget *Subtarget) { 8405 SDValue N0 = N->getOperand(0); 8406 SDValue N1 = N->getOperand(1); 8407 8408 // First try with the default operand order. 8409 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 8410 if (Result.getNode()) 8411 return Result; 8412 8413 // If that didn't work, try again with the operands commuted. 8414 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 8415 } 8416 8417 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 8418 /// 8419 static SDValue PerformSUBCombine(SDNode *N, 8420 TargetLowering::DAGCombinerInfo &DCI) { 8421 SDValue N0 = N->getOperand(0); 8422 SDValue N1 = N->getOperand(1); 8423 8424 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8425 if (N1.getNode()->hasOneUse()) { 8426 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 8427 if (Result.getNode()) return Result; 8428 } 8429 8430 return SDValue(); 8431 } 8432 8433 /// PerformVMULCombine 8434 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 8435 /// special multiplier accumulator forwarding. 8436 /// vmul d3, d0, d2 8437 /// vmla d3, d1, d2 8438 /// is faster than 8439 /// vadd d3, d0, d1 8440 /// vmul d3, d3, d2 8441 // However, for (A + B) * (A + B), 8442 // vadd d2, d0, d1 8443 // vmul d3, d0, d2 8444 // vmla d3, d1, d2 8445 // is slower than 8446 // vadd d2, d0, d1 8447 // vmul d3, d2, d2 8448 static SDValue PerformVMULCombine(SDNode *N, 8449 TargetLowering::DAGCombinerInfo &DCI, 8450 const ARMSubtarget *Subtarget) { 8451 if (!Subtarget->hasVMLxForwarding()) 8452 return SDValue(); 8453 8454 SelectionDAG &DAG = DCI.DAG; 8455 SDValue N0 = N->getOperand(0); 8456 SDValue N1 = N->getOperand(1); 8457 unsigned Opcode = N0.getOpcode(); 8458 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8459 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 8460 Opcode = N1.getOpcode(); 8461 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8462 Opcode != ISD::FADD && Opcode != ISD::FSUB) 8463 return SDValue(); 8464 std::swap(N0, N1); 8465 } 8466 8467 if (N0 == N1) 8468 return SDValue(); 8469 8470 EVT VT = N->getValueType(0); 8471 SDLoc DL(N); 8472 SDValue N00 = N0->getOperand(0); 8473 SDValue N01 = N0->getOperand(1); 8474 return DAG.getNode(Opcode, DL, VT, 8475 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 8476 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 8477 } 8478 8479 static SDValue PerformMULCombine(SDNode *N, 8480 TargetLowering::DAGCombinerInfo &DCI, 8481 const ARMSubtarget *Subtarget) { 8482 SelectionDAG &DAG = DCI.DAG; 8483 8484 if (Subtarget->isThumb1Only()) 8485 return SDValue(); 8486 8487 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8488 return SDValue(); 8489 8490 EVT VT = N->getValueType(0); 8491 if (VT.is64BitVector() || VT.is128BitVector()) 8492 return PerformVMULCombine(N, DCI, Subtarget); 8493 if (VT != MVT::i32) 8494 return SDValue(); 8495 8496 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8497 if (!C) 8498 return SDValue(); 8499 8500 int64_t MulAmt = C->getSExtValue(); 8501 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt); 8502 8503 ShiftAmt = ShiftAmt & (32 - 1); 8504 SDValue V = N->getOperand(0); 8505 SDLoc DL(N); 8506 8507 SDValue Res; 8508 MulAmt >>= ShiftAmt; 8509 8510 if (MulAmt >= 0) { 8511 if (isPowerOf2_32(MulAmt - 1)) { 8512 // (mul x, 2^N + 1) => (add (shl x, N), x) 8513 Res = DAG.getNode(ISD::ADD, DL, VT, 8514 V, 8515 DAG.getNode(ISD::SHL, DL, VT, 8516 V, 8517 DAG.getConstant(Log2_32(MulAmt - 1), 8518 MVT::i32))); 8519 } else if (isPowerOf2_32(MulAmt + 1)) { 8520 // (mul x, 2^N - 1) => (sub (shl x, N), x) 8521 Res = DAG.getNode(ISD::SUB, DL, VT, 8522 DAG.getNode(ISD::SHL, DL, VT, 8523 V, 8524 DAG.getConstant(Log2_32(MulAmt + 1), 8525 MVT::i32)), 8526 V); 8527 } else 8528 return SDValue(); 8529 } else { 8530 uint64_t MulAmtAbs = -MulAmt; 8531 if (isPowerOf2_32(MulAmtAbs + 1)) { 8532 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 8533 Res = DAG.getNode(ISD::SUB, DL, VT, 8534 V, 8535 DAG.getNode(ISD::SHL, DL, VT, 8536 V, 8537 DAG.getConstant(Log2_32(MulAmtAbs + 1), 8538 MVT::i32))); 8539 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 8540 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 8541 Res = DAG.getNode(ISD::ADD, DL, VT, 8542 V, 8543 DAG.getNode(ISD::SHL, DL, VT, 8544 V, 8545 DAG.getConstant(Log2_32(MulAmtAbs-1), 8546 MVT::i32))); 8547 Res = DAG.getNode(ISD::SUB, DL, VT, 8548 DAG.getConstant(0, MVT::i32),Res); 8549 8550 } else 8551 return SDValue(); 8552 } 8553 8554 if (ShiftAmt != 0) 8555 Res = DAG.getNode(ISD::SHL, DL, VT, 8556 Res, DAG.getConstant(ShiftAmt, MVT::i32)); 8557 8558 // Do not add new nodes to DAG combiner worklist. 8559 DCI.CombineTo(N, Res, false); 8560 return SDValue(); 8561 } 8562 8563 static SDValue PerformANDCombine(SDNode *N, 8564 TargetLowering::DAGCombinerInfo &DCI, 8565 const ARMSubtarget *Subtarget) { 8566 8567 // Attempt to use immediate-form VBIC 8568 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 8569 SDLoc dl(N); 8570 EVT VT = N->getValueType(0); 8571 SelectionDAG &DAG = DCI.DAG; 8572 8573 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8574 return SDValue(); 8575 8576 APInt SplatBits, SplatUndef; 8577 unsigned SplatBitSize; 8578 bool HasAnyUndefs; 8579 if (BVN && 8580 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 8581 if (SplatBitSize <= 64) { 8582 EVT VbicVT; 8583 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 8584 SplatUndef.getZExtValue(), SplatBitSize, 8585 DAG, VbicVT, VT.is128BitVector(), 8586 OtherModImm); 8587 if (Val.getNode()) { 8588 SDValue Input = 8589 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 8590 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 8591 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 8592 } 8593 } 8594 } 8595 8596 if (!Subtarget->isThumb1Only()) { 8597 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 8598 SDValue Result = combineSelectAndUseCommutative(N, true, DCI); 8599 if (Result.getNode()) 8600 return Result; 8601 } 8602 8603 return SDValue(); 8604 } 8605 8606 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR 8607 static SDValue PerformORCombine(SDNode *N, 8608 TargetLowering::DAGCombinerInfo &DCI, 8609 const ARMSubtarget *Subtarget) { 8610 // Attempt to use immediate-form VORR 8611 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 8612 SDLoc dl(N); 8613 EVT VT = N->getValueType(0); 8614 SelectionDAG &DAG = DCI.DAG; 8615 8616 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8617 return SDValue(); 8618 8619 APInt SplatBits, SplatUndef; 8620 unsigned SplatBitSize; 8621 bool HasAnyUndefs; 8622 if (BVN && Subtarget->hasNEON() && 8623 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 8624 if (SplatBitSize <= 64) { 8625 EVT VorrVT; 8626 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 8627 SplatUndef.getZExtValue(), SplatBitSize, 8628 DAG, VorrVT, VT.is128BitVector(), 8629 OtherModImm); 8630 if (Val.getNode()) { 8631 SDValue Input = 8632 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 8633 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 8634 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 8635 } 8636 } 8637 } 8638 8639 if (!Subtarget->isThumb1Only()) { 8640 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 8641 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 8642 if (Result.getNode()) 8643 return Result; 8644 } 8645 8646 // The code below optimizes (or (and X, Y), Z). 8647 // The AND operand needs to have a single user to make these optimizations 8648 // profitable. 8649 SDValue N0 = N->getOperand(0); 8650 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 8651 return SDValue(); 8652 SDValue N1 = N->getOperand(1); 8653 8654 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 8655 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 8656 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 8657 APInt SplatUndef; 8658 unsigned SplatBitSize; 8659 bool HasAnyUndefs; 8660 8661 APInt SplatBits0, SplatBits1; 8662 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 8663 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 8664 // Ensure that the second operand of both ands are constants 8665 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 8666 HasAnyUndefs) && !HasAnyUndefs) { 8667 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 8668 HasAnyUndefs) && !HasAnyUndefs) { 8669 // Ensure that the bit width of the constants are the same and that 8670 // the splat arguments are logical inverses as per the pattern we 8671 // are trying to simplify. 8672 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() && 8673 SplatBits0 == ~SplatBits1) { 8674 // Canonicalize the vector type to make instruction selection 8675 // simpler. 8676 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 8677 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 8678 N0->getOperand(1), 8679 N0->getOperand(0), 8680 N1->getOperand(0)); 8681 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 8682 } 8683 } 8684 } 8685 } 8686 8687 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 8688 // reasonable. 8689 8690 // BFI is only available on V6T2+ 8691 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 8692 return SDValue(); 8693 8694 SDLoc DL(N); 8695 // 1) or (and A, mask), val => ARMbfi A, val, mask 8696 // iff (val & mask) == val 8697 // 8698 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 8699 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 8700 // && mask == ~mask2 8701 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 8702 // && ~mask == mask2 8703 // (i.e., copy a bitfield value into another bitfield of the same width) 8704 8705 if (VT != MVT::i32) 8706 return SDValue(); 8707 8708 SDValue N00 = N0.getOperand(0); 8709 8710 // The value and the mask need to be constants so we can verify this is 8711 // actually a bitfield set. If the mask is 0xffff, we can do better 8712 // via a movt instruction, so don't use BFI in that case. 8713 SDValue MaskOp = N0.getOperand(1); 8714 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 8715 if (!MaskC) 8716 return SDValue(); 8717 unsigned Mask = MaskC->getZExtValue(); 8718 if (Mask == 0xffff) 8719 return SDValue(); 8720 SDValue Res; 8721 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 8722 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 8723 if (N1C) { 8724 unsigned Val = N1C->getZExtValue(); 8725 if ((Val & ~Mask) != Val) 8726 return SDValue(); 8727 8728 if (ARM::isBitFieldInvertedMask(Mask)) { 8729 Val >>= countTrailingZeros(~Mask); 8730 8731 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 8732 DAG.getConstant(Val, MVT::i32), 8733 DAG.getConstant(Mask, MVT::i32)); 8734 8735 // Do not add new nodes to DAG combiner worklist. 8736 DCI.CombineTo(N, Res, false); 8737 return SDValue(); 8738 } 8739 } else if (N1.getOpcode() == ISD::AND) { 8740 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 8741 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 8742 if (!N11C) 8743 return SDValue(); 8744 unsigned Mask2 = N11C->getZExtValue(); 8745 8746 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 8747 // as is to match. 8748 if (ARM::isBitFieldInvertedMask(Mask) && 8749 (Mask == ~Mask2)) { 8750 // The pack halfword instruction works better for masks that fit it, 8751 // so use that when it's available. 8752 if (Subtarget->hasT2ExtractPack() && 8753 (Mask == 0xffff || Mask == 0xffff0000)) 8754 return SDValue(); 8755 // 2a 8756 unsigned amt = countTrailingZeros(Mask2); 8757 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 8758 DAG.getConstant(amt, MVT::i32)); 8759 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 8760 DAG.getConstant(Mask, MVT::i32)); 8761 // Do not add new nodes to DAG combiner worklist. 8762 DCI.CombineTo(N, Res, false); 8763 return SDValue(); 8764 } else if (ARM::isBitFieldInvertedMask(~Mask) && 8765 (~Mask == Mask2)) { 8766 // The pack halfword instruction works better for masks that fit it, 8767 // so use that when it's available. 8768 if (Subtarget->hasT2ExtractPack() && 8769 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 8770 return SDValue(); 8771 // 2b 8772 unsigned lsb = countTrailingZeros(Mask); 8773 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 8774 DAG.getConstant(lsb, MVT::i32)); 8775 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 8776 DAG.getConstant(Mask2, MVT::i32)); 8777 // Do not add new nodes to DAG combiner worklist. 8778 DCI.CombineTo(N, Res, false); 8779 return SDValue(); 8780 } 8781 } 8782 8783 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 8784 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 8785 ARM::isBitFieldInvertedMask(~Mask)) { 8786 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 8787 // where lsb(mask) == #shamt and masked bits of B are known zero. 8788 SDValue ShAmt = N00.getOperand(1); 8789 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 8790 unsigned LSB = countTrailingZeros(Mask); 8791 if (ShAmtC != LSB) 8792 return SDValue(); 8793 8794 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 8795 DAG.getConstant(~Mask, MVT::i32)); 8796 8797 // Do not add new nodes to DAG combiner worklist. 8798 DCI.CombineTo(N, Res, false); 8799 } 8800 8801 return SDValue(); 8802 } 8803 8804 static SDValue PerformXORCombine(SDNode *N, 8805 TargetLowering::DAGCombinerInfo &DCI, 8806 const ARMSubtarget *Subtarget) { 8807 EVT VT = N->getValueType(0); 8808 SelectionDAG &DAG = DCI.DAG; 8809 8810 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8811 return SDValue(); 8812 8813 if (!Subtarget->isThumb1Only()) { 8814 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8815 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 8816 if (Result.getNode()) 8817 return Result; 8818 } 8819 8820 return SDValue(); 8821 } 8822 8823 /// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 8824 /// the bits being cleared by the AND are not demanded by the BFI. 8825 static SDValue PerformBFICombine(SDNode *N, 8826 TargetLowering::DAGCombinerInfo &DCI) { 8827 SDValue N1 = N->getOperand(1); 8828 if (N1.getOpcode() == ISD::AND) { 8829 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 8830 if (!N11C) 8831 return SDValue(); 8832 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 8833 unsigned LSB = countTrailingZeros(~InvMask); 8834 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB; 8835 unsigned Mask = (1 << Width)-1; 8836 unsigned Mask2 = N11C->getZExtValue(); 8837 if ((Mask & (~Mask2)) == 0) 8838 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0), 8839 N->getOperand(0), N1.getOperand(0), 8840 N->getOperand(2)); 8841 } 8842 return SDValue(); 8843 } 8844 8845 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for 8846 /// ARMISD::VMOVRRD. 8847 static SDValue PerformVMOVRRDCombine(SDNode *N, 8848 TargetLowering::DAGCombinerInfo &DCI) { 8849 // vmovrrd(vmovdrr x, y) -> x,y 8850 SDValue InDouble = N->getOperand(0); 8851 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 8852 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 8853 8854 // vmovrrd(load f64) -> (load i32), (load i32) 8855 SDNode *InNode = InDouble.getNode(); 8856 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 8857 InNode->getValueType(0) == MVT::f64 && 8858 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 8859 !cast<LoadSDNode>(InNode)->isVolatile()) { 8860 // TODO: Should this be done for non-FrameIndex operands? 8861 LoadSDNode *LD = cast<LoadSDNode>(InNode); 8862 8863 SelectionDAG &DAG = DCI.DAG; 8864 SDLoc DL(LD); 8865 SDValue BasePtr = LD->getBasePtr(); 8866 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 8867 LD->getPointerInfo(), LD->isVolatile(), 8868 LD->isNonTemporal(), LD->isInvariant(), 8869 LD->getAlignment()); 8870 8871 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8872 DAG.getConstant(4, MVT::i32)); 8873 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 8874 LD->getPointerInfo(), LD->isVolatile(), 8875 LD->isNonTemporal(), LD->isInvariant(), 8876 std::min(4U, LD->getAlignment() / 2)); 8877 8878 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 8879 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 8880 DCI.RemoveFromWorklist(LD); 8881 DAG.DeleteNode(LD); 8882 return Result; 8883 } 8884 8885 return SDValue(); 8886 } 8887 8888 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for 8889 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 8890 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 8891 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 8892 SDValue Op0 = N->getOperand(0); 8893 SDValue Op1 = N->getOperand(1); 8894 if (Op0.getOpcode() == ISD::BITCAST) 8895 Op0 = Op0.getOperand(0); 8896 if (Op1.getOpcode() == ISD::BITCAST) 8897 Op1 = Op1.getOperand(0); 8898 if (Op0.getOpcode() == ARMISD::VMOVRRD && 8899 Op0.getNode() == Op1.getNode() && 8900 Op0.getResNo() == 0 && Op1.getResNo() == 1) 8901 return DAG.getNode(ISD::BITCAST, SDLoc(N), 8902 N->getValueType(0), Op0.getOperand(0)); 8903 return SDValue(); 8904 } 8905 8906 /// PerformSTORECombine - Target-specific dag combine xforms for 8907 /// ISD::STORE. 8908 static SDValue PerformSTORECombine(SDNode *N, 8909 TargetLowering::DAGCombinerInfo &DCI) { 8910 StoreSDNode *St = cast<StoreSDNode>(N); 8911 if (St->isVolatile()) 8912 return SDValue(); 8913 8914 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 8915 // pack all of the elements in one place. Next, store to memory in fewer 8916 // chunks. 8917 SDValue StVal = St->getValue(); 8918 EVT VT = StVal.getValueType(); 8919 if (St->isTruncatingStore() && VT.isVector()) { 8920 SelectionDAG &DAG = DCI.DAG; 8921 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8922 EVT StVT = St->getMemoryVT(); 8923 unsigned NumElems = VT.getVectorNumElements(); 8924 assert(StVT != VT && "Cannot truncate to the same type"); 8925 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 8926 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 8927 8928 // From, To sizes and ElemCount must be pow of two 8929 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 8930 8931 // We are going to use the original vector elt for storing. 8932 // Accumulated smaller vector elements must be a multiple of the store size. 8933 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 8934 8935 unsigned SizeRatio = FromEltSz / ToEltSz; 8936 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 8937 8938 // Create a type on which we perform the shuffle. 8939 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 8940 NumElems*SizeRatio); 8941 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 8942 8943 SDLoc DL(St); 8944 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 8945 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 8946 for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio; 8947 8948 // Can't shuffle using an illegal type. 8949 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 8950 8951 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 8952 DAG.getUNDEF(WideVec.getValueType()), 8953 ShuffleVec.data()); 8954 // At this point all of the data is stored at the bottom of the 8955 // register. We now need to save it to mem. 8956 8957 // Find the largest store unit 8958 MVT StoreType = MVT::i8; 8959 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 8960 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 8961 MVT Tp = (MVT::SimpleValueType)tp; 8962 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 8963 StoreType = Tp; 8964 } 8965 // Didn't find a legal store type. 8966 if (!TLI.isTypeLegal(StoreType)) 8967 return SDValue(); 8968 8969 // Bitcast the original vector into a vector of store-size units 8970 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 8971 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 8972 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 8973 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 8974 SmallVector<SDValue, 8> Chains; 8975 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 8976 TLI.getPointerTy()); 8977 SDValue BasePtr = St->getBasePtr(); 8978 8979 // Perform one or more big stores into memory. 8980 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 8981 for (unsigned I = 0; I < E; I++) { 8982 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 8983 StoreType, ShuffWide, 8984 DAG.getIntPtrConstant(I)); 8985 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 8986 St->getPointerInfo(), St->isVolatile(), 8987 St->isNonTemporal(), St->getAlignment()); 8988 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 8989 Increment); 8990 Chains.push_back(Ch); 8991 } 8992 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0], 8993 Chains.size()); 8994 } 8995 8996 if (!ISD::isNormalStore(St)) 8997 return SDValue(); 8998 8999 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 9000 // ARM stores of arguments in the same cache line. 9001 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 9002 StVal.getNode()->hasOneUse()) { 9003 SelectionDAG &DAG = DCI.DAG; 9004 SDLoc DL(St); 9005 SDValue BasePtr = St->getBasePtr(); 9006 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 9007 StVal.getNode()->getOperand(0), BasePtr, 9008 St->getPointerInfo(), St->isVolatile(), 9009 St->isNonTemporal(), St->getAlignment()); 9010 9011 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 9012 DAG.getConstant(4, MVT::i32)); 9013 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 9014 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 9015 St->isNonTemporal(), 9016 std::min(4U, St->getAlignment() / 2)); 9017 } 9018 9019 if (StVal.getValueType() != MVT::i64 || 9020 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 9021 return SDValue(); 9022 9023 // Bitcast an i64 store extracted from a vector to f64. 9024 // Otherwise, the i64 value will be legalized to a pair of i32 values. 9025 SelectionDAG &DAG = DCI.DAG; 9026 SDLoc dl(StVal); 9027 SDValue IntVec = StVal.getOperand(0); 9028 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 9029 IntVec.getValueType().getVectorNumElements()); 9030 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 9031 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 9032 Vec, StVal.getOperand(1)); 9033 dl = SDLoc(N); 9034 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 9035 // Make the DAGCombiner fold the bitcasts. 9036 DCI.AddToWorklist(Vec.getNode()); 9037 DCI.AddToWorklist(ExtElt.getNode()); 9038 DCI.AddToWorklist(V.getNode()); 9039 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 9040 St->getPointerInfo(), St->isVolatile(), 9041 St->isNonTemporal(), St->getAlignment(), 9042 St->getTBAAInfo()); 9043 } 9044 9045 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 9046 /// are normal, non-volatile loads. If so, it is profitable to bitcast an 9047 /// i64 vector to have f64 elements, since the value can then be loaded 9048 /// directly into a VFP register. 9049 static bool hasNormalLoadOperand(SDNode *N) { 9050 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 9051 for (unsigned i = 0; i < NumElts; ++i) { 9052 SDNode *Elt = N->getOperand(i).getNode(); 9053 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 9054 return true; 9055 } 9056 return false; 9057 } 9058 9059 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 9060 /// ISD::BUILD_VECTOR. 9061 static SDValue PerformBUILD_VECTORCombine(SDNode *N, 9062 TargetLowering::DAGCombinerInfo &DCI){ 9063 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 9064 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 9065 // into a pair of GPRs, which is fine when the value is used as a scalar, 9066 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 9067 SelectionDAG &DAG = DCI.DAG; 9068 if (N->getNumOperands() == 2) { 9069 SDValue RV = PerformVMOVDRRCombine(N, DAG); 9070 if (RV.getNode()) 9071 return RV; 9072 } 9073 9074 // Load i64 elements as f64 values so that type legalization does not split 9075 // them up into i32 values. 9076 EVT VT = N->getValueType(0); 9077 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 9078 return SDValue(); 9079 SDLoc dl(N); 9080 SmallVector<SDValue, 8> Ops; 9081 unsigned NumElts = VT.getVectorNumElements(); 9082 for (unsigned i = 0; i < NumElts; ++i) { 9083 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 9084 Ops.push_back(V); 9085 // Make the DAGCombiner fold the bitcast. 9086 DCI.AddToWorklist(V.getNode()); 9087 } 9088 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 9089 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 9090 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 9091 } 9092 9093 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR. 9094 static SDValue 9095 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9096 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR. 9097 // At that time, we may have inserted bitcasts from integer to float. 9098 // If these bitcasts have survived DAGCombine, change the lowering of this 9099 // BUILD_VECTOR in something more vector friendly, i.e., that does not 9100 // force to use floating point types. 9101 9102 // Make sure we can change the type of the vector. 9103 // This is possible iff: 9104 // 1. The vector is only used in a bitcast to a integer type. I.e., 9105 // 1.1. Vector is used only once. 9106 // 1.2. Use is a bit convert to an integer type. 9107 // 2. The size of its operands are 32-bits (64-bits are not legal). 9108 EVT VT = N->getValueType(0); 9109 EVT EltVT = VT.getVectorElementType(); 9110 9111 // Check 1.1. and 2. 9112 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse()) 9113 return SDValue(); 9114 9115 // By construction, the input type must be float. 9116 assert(EltVT == MVT::f32 && "Unexpected type!"); 9117 9118 // Check 1.2. 9119 SDNode *Use = *N->use_begin(); 9120 if (Use->getOpcode() != ISD::BITCAST || 9121 Use->getValueType(0).isFloatingPoint()) 9122 return SDValue(); 9123 9124 // Check profitability. 9125 // Model is, if more than half of the relevant operands are bitcast from 9126 // i32, turn the build_vector into a sequence of insert_vector_elt. 9127 // Relevant operands are everything that is not statically 9128 // (i.e., at compile time) bitcasted. 9129 unsigned NumOfBitCastedElts = 0; 9130 unsigned NumElts = VT.getVectorNumElements(); 9131 unsigned NumOfRelevantElts = NumElts; 9132 for (unsigned Idx = 0; Idx < NumElts; ++Idx) { 9133 SDValue Elt = N->getOperand(Idx); 9134 if (Elt->getOpcode() == ISD::BITCAST) { 9135 // Assume only bit cast to i32 will go away. 9136 if (Elt->getOperand(0).getValueType() == MVT::i32) 9137 ++NumOfBitCastedElts; 9138 } else if (Elt.getOpcode() == ISD::UNDEF || isa<ConstantSDNode>(Elt)) 9139 // Constants are statically casted, thus do not count them as 9140 // relevant operands. 9141 --NumOfRelevantElts; 9142 } 9143 9144 // Check if more than half of the elements require a non-free bitcast. 9145 if (NumOfBitCastedElts <= NumOfRelevantElts / 2) 9146 return SDValue(); 9147 9148 SelectionDAG &DAG = DCI.DAG; 9149 // Create the new vector type. 9150 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 9151 // Check if the type is legal. 9152 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9153 if (!TLI.isTypeLegal(VecVT)) 9154 return SDValue(); 9155 9156 // Combine: 9157 // ARMISD::BUILD_VECTOR E1, E2, ..., EN. 9158 // => BITCAST INSERT_VECTOR_ELT 9159 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1), 9160 // (BITCAST EN), N. 9161 SDValue Vec = DAG.getUNDEF(VecVT); 9162 SDLoc dl(N); 9163 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) { 9164 SDValue V = N->getOperand(Idx); 9165 if (V.getOpcode() == ISD::UNDEF) 9166 continue; 9167 if (V.getOpcode() == ISD::BITCAST && 9168 V->getOperand(0).getValueType() == MVT::i32) 9169 // Fold obvious case. 9170 V = V.getOperand(0); 9171 else { 9172 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V); 9173 // Make the DAGCombiner fold the bitcasts. 9174 DCI.AddToWorklist(V.getNode()); 9175 } 9176 SDValue LaneIdx = DAG.getConstant(Idx, MVT::i32); 9177 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx); 9178 } 9179 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec); 9180 // Make the DAGCombiner fold the bitcasts. 9181 DCI.AddToWorklist(Vec.getNode()); 9182 return Vec; 9183 } 9184 9185 /// PerformInsertEltCombine - Target-specific dag combine xforms for 9186 /// ISD::INSERT_VECTOR_ELT. 9187 static SDValue PerformInsertEltCombine(SDNode *N, 9188 TargetLowering::DAGCombinerInfo &DCI) { 9189 // Bitcast an i64 load inserted into a vector to f64. 9190 // Otherwise, the i64 value will be legalized to a pair of i32 values. 9191 EVT VT = N->getValueType(0); 9192 SDNode *Elt = N->getOperand(1).getNode(); 9193 if (VT.getVectorElementType() != MVT::i64 || 9194 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 9195 return SDValue(); 9196 9197 SelectionDAG &DAG = DCI.DAG; 9198 SDLoc dl(N); 9199 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 9200 VT.getVectorNumElements()); 9201 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 9202 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 9203 // Make the DAGCombiner fold the bitcasts. 9204 DCI.AddToWorklist(Vec.getNode()); 9205 DCI.AddToWorklist(V.getNode()); 9206 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 9207 Vec, V, N->getOperand(2)); 9208 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 9209 } 9210 9211 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 9212 /// ISD::VECTOR_SHUFFLE. 9213 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 9214 // The LLVM shufflevector instruction does not require the shuffle mask 9215 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 9216 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 9217 // operands do not match the mask length, they are extended by concatenating 9218 // them with undef vectors. That is probably the right thing for other 9219 // targets, but for NEON it is better to concatenate two double-register 9220 // size vector operands into a single quad-register size vector. Do that 9221 // transformation here: 9222 // shuffle(concat(v1, undef), concat(v2, undef)) -> 9223 // shuffle(concat(v1, v2), undef) 9224 SDValue Op0 = N->getOperand(0); 9225 SDValue Op1 = N->getOperand(1); 9226 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 9227 Op1.getOpcode() != ISD::CONCAT_VECTORS || 9228 Op0.getNumOperands() != 2 || 9229 Op1.getNumOperands() != 2) 9230 return SDValue(); 9231 SDValue Concat0Op1 = Op0.getOperand(1); 9232 SDValue Concat1Op1 = Op1.getOperand(1); 9233 if (Concat0Op1.getOpcode() != ISD::UNDEF || 9234 Concat1Op1.getOpcode() != ISD::UNDEF) 9235 return SDValue(); 9236 // Skip the transformation if any of the types are illegal. 9237 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9238 EVT VT = N->getValueType(0); 9239 if (!TLI.isTypeLegal(VT) || 9240 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 9241 !TLI.isTypeLegal(Concat1Op1.getValueType())) 9242 return SDValue(); 9243 9244 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, 9245 Op0.getOperand(0), Op1.getOperand(0)); 9246 // Translate the shuffle mask. 9247 SmallVector<int, 16> NewMask; 9248 unsigned NumElts = VT.getVectorNumElements(); 9249 unsigned HalfElts = NumElts/2; 9250 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 9251 for (unsigned n = 0; n < NumElts; ++n) { 9252 int MaskElt = SVN->getMaskElt(n); 9253 int NewElt = -1; 9254 if (MaskElt < (int)HalfElts) 9255 NewElt = MaskElt; 9256 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 9257 NewElt = HalfElts + MaskElt - NumElts; 9258 NewMask.push_back(NewElt); 9259 } 9260 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat, 9261 DAG.getUNDEF(VT), NewMask.data()); 9262 } 9263 9264 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 9265 /// NEON load/store intrinsics to merge base address updates. 9266 static SDValue CombineBaseUpdate(SDNode *N, 9267 TargetLowering::DAGCombinerInfo &DCI) { 9268 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 9269 return SDValue(); 9270 9271 SelectionDAG &DAG = DCI.DAG; 9272 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 9273 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 9274 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 9275 SDValue Addr = N->getOperand(AddrOpIdx); 9276 9277 // Search for a use of the address operand that is an increment. 9278 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 9279 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 9280 SDNode *User = *UI; 9281 if (User->getOpcode() != ISD::ADD || 9282 UI.getUse().getResNo() != Addr.getResNo()) 9283 continue; 9284 9285 // Check that the add is independent of the load/store. Otherwise, folding 9286 // it would create a cycle. 9287 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 9288 continue; 9289 9290 // Find the new opcode for the updating load/store. 9291 bool isLoad = true; 9292 bool isLaneOp = false; 9293 unsigned NewOpc = 0; 9294 unsigned NumVecs = 0; 9295 if (isIntrinsic) { 9296 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9297 switch (IntNo) { 9298 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 9299 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 9300 NumVecs = 1; break; 9301 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 9302 NumVecs = 2; break; 9303 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 9304 NumVecs = 3; break; 9305 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 9306 NumVecs = 4; break; 9307 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 9308 NumVecs = 2; isLaneOp = true; break; 9309 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 9310 NumVecs = 3; isLaneOp = true; break; 9311 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 9312 NumVecs = 4; isLaneOp = true; break; 9313 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 9314 NumVecs = 1; isLoad = false; break; 9315 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 9316 NumVecs = 2; isLoad = false; break; 9317 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 9318 NumVecs = 3; isLoad = false; break; 9319 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 9320 NumVecs = 4; isLoad = false; break; 9321 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 9322 NumVecs = 2; isLoad = false; isLaneOp = true; break; 9323 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 9324 NumVecs = 3; isLoad = false; isLaneOp = true; break; 9325 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 9326 NumVecs = 4; isLoad = false; isLaneOp = true; break; 9327 } 9328 } else { 9329 isLaneOp = true; 9330 switch (N->getOpcode()) { 9331 default: llvm_unreachable("unexpected opcode for Neon base update"); 9332 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 9333 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 9334 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 9335 } 9336 } 9337 9338 // Find the size of memory referenced by the load/store. 9339 EVT VecTy; 9340 if (isLoad) 9341 VecTy = N->getValueType(0); 9342 else 9343 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 9344 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 9345 if (isLaneOp) 9346 NumBytes /= VecTy.getVectorNumElements(); 9347 9348 // If the increment is a constant, it must match the memory ref size. 9349 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 9350 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 9351 uint64_t IncVal = CInc->getZExtValue(); 9352 if (IncVal != NumBytes) 9353 continue; 9354 } else if (NumBytes >= 3 * 16) { 9355 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 9356 // separate instructions that make it harder to use a non-constant update. 9357 continue; 9358 } 9359 9360 // Create the new updating load/store node. 9361 EVT Tys[6]; 9362 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 9363 unsigned n; 9364 for (n = 0; n < NumResultVecs; ++n) 9365 Tys[n] = VecTy; 9366 Tys[n++] = MVT::i32; 9367 Tys[n] = MVT::Other; 9368 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 9369 SmallVector<SDValue, 8> Ops; 9370 Ops.push_back(N->getOperand(0)); // incoming chain 9371 Ops.push_back(N->getOperand(AddrOpIdx)); 9372 Ops.push_back(Inc); 9373 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 9374 Ops.push_back(N->getOperand(i)); 9375 } 9376 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 9377 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, 9378 Ops.data(), Ops.size(), 9379 MemInt->getMemoryVT(), 9380 MemInt->getMemOperand()); 9381 9382 // Update the uses. 9383 std::vector<SDValue> NewResults; 9384 for (unsigned i = 0; i < NumResultVecs; ++i) { 9385 NewResults.push_back(SDValue(UpdN.getNode(), i)); 9386 } 9387 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 9388 DCI.CombineTo(N, NewResults); 9389 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 9390 9391 break; 9392 } 9393 return SDValue(); 9394 } 9395 9396 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 9397 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 9398 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 9399 /// return true. 9400 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 9401 SelectionDAG &DAG = DCI.DAG; 9402 EVT VT = N->getValueType(0); 9403 // vldN-dup instructions only support 64-bit vectors for N > 1. 9404 if (!VT.is64BitVector()) 9405 return false; 9406 9407 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 9408 SDNode *VLD = N->getOperand(0).getNode(); 9409 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 9410 return false; 9411 unsigned NumVecs = 0; 9412 unsigned NewOpc = 0; 9413 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 9414 if (IntNo == Intrinsic::arm_neon_vld2lane) { 9415 NumVecs = 2; 9416 NewOpc = ARMISD::VLD2DUP; 9417 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 9418 NumVecs = 3; 9419 NewOpc = ARMISD::VLD3DUP; 9420 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 9421 NumVecs = 4; 9422 NewOpc = ARMISD::VLD4DUP; 9423 } else { 9424 return false; 9425 } 9426 9427 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 9428 // numbers match the load. 9429 unsigned VLDLaneNo = 9430 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 9431 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9432 UI != UE; ++UI) { 9433 // Ignore uses of the chain result. 9434 if (UI.getUse().getResNo() == NumVecs) 9435 continue; 9436 SDNode *User = *UI; 9437 if (User->getOpcode() != ARMISD::VDUPLANE || 9438 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 9439 return false; 9440 } 9441 9442 // Create the vldN-dup node. 9443 EVT Tys[5]; 9444 unsigned n; 9445 for (n = 0; n < NumVecs; ++n) 9446 Tys[n] = VT; 9447 Tys[n] = MVT::Other; 9448 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 9449 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 9450 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 9451 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, 9452 Ops, 2, VLDMemInt->getMemoryVT(), 9453 VLDMemInt->getMemOperand()); 9454 9455 // Update the uses. 9456 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 9457 UI != UE; ++UI) { 9458 unsigned ResNo = UI.getUse().getResNo(); 9459 // Ignore uses of the chain result. 9460 if (ResNo == NumVecs) 9461 continue; 9462 SDNode *User = *UI; 9463 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 9464 } 9465 9466 // Now the vldN-lane intrinsic is dead except for its chain result. 9467 // Update uses of the chain. 9468 std::vector<SDValue> VLDDupResults; 9469 for (unsigned n = 0; n < NumVecs; ++n) 9470 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 9471 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 9472 DCI.CombineTo(VLD, VLDDupResults); 9473 9474 return true; 9475 } 9476 9477 /// PerformVDUPLANECombine - Target-specific dag combine xforms for 9478 /// ARMISD::VDUPLANE. 9479 static SDValue PerformVDUPLANECombine(SDNode *N, 9480 TargetLowering::DAGCombinerInfo &DCI) { 9481 SDValue Op = N->getOperand(0); 9482 9483 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 9484 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 9485 if (CombineVLDDUP(N, DCI)) 9486 return SDValue(N, 0); 9487 9488 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 9489 // redundant. Ignore bit_converts for now; element sizes are checked below. 9490 while (Op.getOpcode() == ISD::BITCAST) 9491 Op = Op.getOperand(0); 9492 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 9493 return SDValue(); 9494 9495 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 9496 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 9497 // The canonical VMOV for a zero vector uses a 32-bit element size. 9498 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9499 unsigned EltBits; 9500 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 9501 EltSize = 8; 9502 EVT VT = N->getValueType(0); 9503 if (EltSize > VT.getVectorElementType().getSizeInBits()) 9504 return SDValue(); 9505 9506 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); 9507 } 9508 9509 // isConstVecPow2 - Return true if each vector element is a power of 2, all 9510 // elements are the same constant, C, and Log2(C) ranges from 1 to 32. 9511 static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 9512 { 9513 integerPart cN; 9514 integerPart c0 = 0; 9515 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 9516 I != E; I++) { 9517 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 9518 if (!C) 9519 return false; 9520 9521 bool isExact; 9522 APFloat APF = C->getValueAPF(); 9523 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 9524 != APFloat::opOK || !isExact) 9525 return false; 9526 9527 c0 = (I == 0) ? cN : c0; 9528 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 9529 return false; 9530 } 9531 C = c0; 9532 return true; 9533 } 9534 9535 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 9536 /// can replace combinations of VMUL and VCVT (floating-point to integer) 9537 /// when the VMUL has a constant operand that is a power of 2. 9538 /// 9539 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 9540 /// vmul.f32 d16, d17, d16 9541 /// vcvt.s32.f32 d16, d16 9542 /// becomes: 9543 /// vcvt.s32.f32 d16, d16, #3 9544 static SDValue PerformVCVTCombine(SDNode *N, 9545 TargetLowering::DAGCombinerInfo &DCI, 9546 const ARMSubtarget *Subtarget) { 9547 SelectionDAG &DAG = DCI.DAG; 9548 SDValue Op = N->getOperand(0); 9549 9550 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 9551 Op.getOpcode() != ISD::FMUL) 9552 return SDValue(); 9553 9554 uint64_t C; 9555 SDValue N0 = Op->getOperand(0); 9556 SDValue ConstVec = Op->getOperand(1); 9557 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 9558 9559 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 9560 !isConstVecPow2(ConstVec, isSigned, C)) 9561 return SDValue(); 9562 9563 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 9564 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 9565 if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) { 9566 // These instructions only exist converting from f32 to i32. We can handle 9567 // smaller integers by generating an extra truncate, but larger ones would 9568 // be lossy. 9569 return SDValue(); 9570 } 9571 9572 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 9573 Intrinsic::arm_neon_vcvtfp2fxu; 9574 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 9575 SDValue FixConv = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), 9576 NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 9577 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 9578 DAG.getConstant(Log2_64(C), MVT::i32)); 9579 9580 if (IntTy.getSizeInBits() < FloatTy.getSizeInBits()) 9581 FixConv = DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), FixConv); 9582 9583 return FixConv; 9584 } 9585 9586 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 9587 /// can replace combinations of VCVT (integer to floating-point) and VDIV 9588 /// when the VDIV has a constant operand that is a power of 2. 9589 /// 9590 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 9591 /// vcvt.f32.s32 d16, d16 9592 /// vdiv.f32 d16, d17, d16 9593 /// becomes: 9594 /// vcvt.f32.s32 d16, d16, #3 9595 static SDValue PerformVDIVCombine(SDNode *N, 9596 TargetLowering::DAGCombinerInfo &DCI, 9597 const ARMSubtarget *Subtarget) { 9598 SelectionDAG &DAG = DCI.DAG; 9599 SDValue Op = N->getOperand(0); 9600 unsigned OpOpcode = Op.getNode()->getOpcode(); 9601 9602 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 9603 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 9604 return SDValue(); 9605 9606 uint64_t C; 9607 SDValue ConstVec = N->getOperand(1); 9608 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 9609 9610 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 9611 !isConstVecPow2(ConstVec, isSigned, C)) 9612 return SDValue(); 9613 9614 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 9615 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 9616 if (FloatTy.getSizeInBits() != 32 || IntTy.getSizeInBits() > 32) { 9617 // These instructions only exist converting from i32 to f32. We can handle 9618 // smaller integers by generating an extra extend, but larger ones would 9619 // be lossy. 9620 return SDValue(); 9621 } 9622 9623 SDValue ConvInput = Op.getOperand(0); 9624 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 9625 if (IntTy.getSizeInBits() < FloatTy.getSizeInBits()) 9626 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 9627 SDLoc(N), NumLanes == 2 ? MVT::v2i32 : MVT::v4i32, 9628 ConvInput); 9629 9630 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 9631 Intrinsic::arm_neon_vcvtfxu2fp; 9632 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), 9633 Op.getValueType(), 9634 DAG.getConstant(IntrinsicOpcode, MVT::i32), 9635 ConvInput, DAG.getConstant(Log2_64(C), MVT::i32)); 9636 } 9637 9638 /// Getvshiftimm - Check if this is a valid build_vector for the immediate 9639 /// operand of a vector shift operation, where all the elements of the 9640 /// build_vector must have the same constant integer value. 9641 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 9642 // Ignore bit_converts. 9643 while (Op.getOpcode() == ISD::BITCAST) 9644 Op = Op.getOperand(0); 9645 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 9646 APInt SplatBits, SplatUndef; 9647 unsigned SplatBitSize; 9648 bool HasAnyUndefs; 9649 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 9650 HasAnyUndefs, ElementBits) || 9651 SplatBitSize > ElementBits) 9652 return false; 9653 Cnt = SplatBits.getSExtValue(); 9654 return true; 9655 } 9656 9657 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 9658 /// operand of a vector shift left operation. That value must be in the range: 9659 /// 0 <= Value < ElementBits for a left shift; or 9660 /// 0 <= Value <= ElementBits for a long left shift. 9661 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 9662 assert(VT.isVector() && "vector shift count is not a vector type"); 9663 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 9664 if (! getVShiftImm(Op, ElementBits, Cnt)) 9665 return false; 9666 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 9667 } 9668 9669 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 9670 /// operand of a vector shift right operation. For a shift opcode, the value 9671 /// is positive, but for an intrinsic the value count must be negative. The 9672 /// absolute value must be in the range: 9673 /// 1 <= |Value| <= ElementBits for a right shift; or 9674 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 9675 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 9676 int64_t &Cnt) { 9677 assert(VT.isVector() && "vector shift count is not a vector type"); 9678 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 9679 if (! getVShiftImm(Op, ElementBits, Cnt)) 9680 return false; 9681 if (isIntrinsic) 9682 Cnt = -Cnt; 9683 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 9684 } 9685 9686 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 9687 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 9688 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 9689 switch (IntNo) { 9690 default: 9691 // Don't do anything for most intrinsics. 9692 break; 9693 9694 // Vector shifts: check for immediate versions and lower them. 9695 // Note: This is done during DAG combining instead of DAG legalizing because 9696 // the build_vectors for 64-bit vector element shift counts are generally 9697 // not legal, and it is hard to see their values after they get legalized to 9698 // loads from a constant pool. 9699 case Intrinsic::arm_neon_vshifts: 9700 case Intrinsic::arm_neon_vshiftu: 9701 case Intrinsic::arm_neon_vshiftls: 9702 case Intrinsic::arm_neon_vshiftlu: 9703 case Intrinsic::arm_neon_vshiftn: 9704 case Intrinsic::arm_neon_vrshifts: 9705 case Intrinsic::arm_neon_vrshiftu: 9706 case Intrinsic::arm_neon_vrshiftn: 9707 case Intrinsic::arm_neon_vqshifts: 9708 case Intrinsic::arm_neon_vqshiftu: 9709 case Intrinsic::arm_neon_vqshiftsu: 9710 case Intrinsic::arm_neon_vqshiftns: 9711 case Intrinsic::arm_neon_vqshiftnu: 9712 case Intrinsic::arm_neon_vqshiftnsu: 9713 case Intrinsic::arm_neon_vqrshiftns: 9714 case Intrinsic::arm_neon_vqrshiftnu: 9715 case Intrinsic::arm_neon_vqrshiftnsu: { 9716 EVT VT = N->getOperand(1).getValueType(); 9717 int64_t Cnt; 9718 unsigned VShiftOpc = 0; 9719 9720 switch (IntNo) { 9721 case Intrinsic::arm_neon_vshifts: 9722 case Intrinsic::arm_neon_vshiftu: 9723 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 9724 VShiftOpc = ARMISD::VSHL; 9725 break; 9726 } 9727 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 9728 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 9729 ARMISD::VSHRs : ARMISD::VSHRu); 9730 break; 9731 } 9732 return SDValue(); 9733 9734 case Intrinsic::arm_neon_vshiftls: 9735 case Intrinsic::arm_neon_vshiftlu: 9736 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 9737 break; 9738 llvm_unreachable("invalid shift count for vshll intrinsic"); 9739 9740 case Intrinsic::arm_neon_vrshifts: 9741 case Intrinsic::arm_neon_vrshiftu: 9742 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 9743 break; 9744 return SDValue(); 9745 9746 case Intrinsic::arm_neon_vqshifts: 9747 case Intrinsic::arm_neon_vqshiftu: 9748 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 9749 break; 9750 return SDValue(); 9751 9752 case Intrinsic::arm_neon_vqshiftsu: 9753 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 9754 break; 9755 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 9756 9757 case Intrinsic::arm_neon_vshiftn: 9758 case Intrinsic::arm_neon_vrshiftn: 9759 case Intrinsic::arm_neon_vqshiftns: 9760 case Intrinsic::arm_neon_vqshiftnu: 9761 case Intrinsic::arm_neon_vqshiftnsu: 9762 case Intrinsic::arm_neon_vqrshiftns: 9763 case Intrinsic::arm_neon_vqrshiftnu: 9764 case Intrinsic::arm_neon_vqrshiftnsu: 9765 // Narrowing shifts require an immediate right shift. 9766 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 9767 break; 9768 llvm_unreachable("invalid shift count for narrowing vector shift " 9769 "intrinsic"); 9770 9771 default: 9772 llvm_unreachable("unhandled vector shift"); 9773 } 9774 9775 switch (IntNo) { 9776 case Intrinsic::arm_neon_vshifts: 9777 case Intrinsic::arm_neon_vshiftu: 9778 // Opcode already set above. 9779 break; 9780 case Intrinsic::arm_neon_vshiftls: 9781 case Intrinsic::arm_neon_vshiftlu: 9782 if (Cnt == VT.getVectorElementType().getSizeInBits()) 9783 VShiftOpc = ARMISD::VSHLLi; 9784 else 9785 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 9786 ARMISD::VSHLLs : ARMISD::VSHLLu); 9787 break; 9788 case Intrinsic::arm_neon_vshiftn: 9789 VShiftOpc = ARMISD::VSHRN; break; 9790 case Intrinsic::arm_neon_vrshifts: 9791 VShiftOpc = ARMISD::VRSHRs; break; 9792 case Intrinsic::arm_neon_vrshiftu: 9793 VShiftOpc = ARMISD::VRSHRu; break; 9794 case Intrinsic::arm_neon_vrshiftn: 9795 VShiftOpc = ARMISD::VRSHRN; break; 9796 case Intrinsic::arm_neon_vqshifts: 9797 VShiftOpc = ARMISD::VQSHLs; break; 9798 case Intrinsic::arm_neon_vqshiftu: 9799 VShiftOpc = ARMISD::VQSHLu; break; 9800 case Intrinsic::arm_neon_vqshiftsu: 9801 VShiftOpc = ARMISD::VQSHLsu; break; 9802 case Intrinsic::arm_neon_vqshiftns: 9803 VShiftOpc = ARMISD::VQSHRNs; break; 9804 case Intrinsic::arm_neon_vqshiftnu: 9805 VShiftOpc = ARMISD::VQSHRNu; break; 9806 case Intrinsic::arm_neon_vqshiftnsu: 9807 VShiftOpc = ARMISD::VQSHRNsu; break; 9808 case Intrinsic::arm_neon_vqrshiftns: 9809 VShiftOpc = ARMISD::VQRSHRNs; break; 9810 case Intrinsic::arm_neon_vqrshiftnu: 9811 VShiftOpc = ARMISD::VQRSHRNu; break; 9812 case Intrinsic::arm_neon_vqrshiftnsu: 9813 VShiftOpc = ARMISD::VQRSHRNsu; break; 9814 } 9815 9816 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 9817 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 9818 } 9819 9820 case Intrinsic::arm_neon_vshiftins: { 9821 EVT VT = N->getOperand(1).getValueType(); 9822 int64_t Cnt; 9823 unsigned VShiftOpc = 0; 9824 9825 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 9826 VShiftOpc = ARMISD::VSLI; 9827 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 9828 VShiftOpc = ARMISD::VSRI; 9829 else { 9830 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 9831 } 9832 9833 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 9834 N->getOperand(1), N->getOperand(2), 9835 DAG.getConstant(Cnt, MVT::i32)); 9836 } 9837 9838 case Intrinsic::arm_neon_vqrshifts: 9839 case Intrinsic::arm_neon_vqrshiftu: 9840 // No immediate versions of these to check for. 9841 break; 9842 } 9843 9844 return SDValue(); 9845 } 9846 9847 /// PerformShiftCombine - Checks for immediate versions of vector shifts and 9848 /// lowers them. As with the vector shift intrinsics, this is done during DAG 9849 /// combining instead of DAG legalizing because the build_vectors for 64-bit 9850 /// vector element shift counts are generally not legal, and it is hard to see 9851 /// their values after they get legalized to loads from a constant pool. 9852 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 9853 const ARMSubtarget *ST) { 9854 EVT VT = N->getValueType(0); 9855 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 9856 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 9857 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 9858 SDValue N1 = N->getOperand(1); 9859 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 9860 SDValue N0 = N->getOperand(0); 9861 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 9862 DAG.MaskedValueIsZero(N0.getOperand(0), 9863 APInt::getHighBitsSet(32, 16))) 9864 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1); 9865 } 9866 } 9867 9868 // Nothing to be done for scalar shifts. 9869 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9870 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 9871 return SDValue(); 9872 9873 assert(ST->hasNEON() && "unexpected vector shift"); 9874 int64_t Cnt; 9875 9876 switch (N->getOpcode()) { 9877 default: llvm_unreachable("unexpected shift opcode"); 9878 9879 case ISD::SHL: 9880 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 9881 return DAG.getNode(ARMISD::VSHL, SDLoc(N), VT, N->getOperand(0), 9882 DAG.getConstant(Cnt, MVT::i32)); 9883 break; 9884 9885 case ISD::SRA: 9886 case ISD::SRL: 9887 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 9888 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 9889 ARMISD::VSHRs : ARMISD::VSHRu); 9890 return DAG.getNode(VShiftOpc, SDLoc(N), VT, N->getOperand(0), 9891 DAG.getConstant(Cnt, MVT::i32)); 9892 } 9893 } 9894 return SDValue(); 9895 } 9896 9897 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 9898 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 9899 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 9900 const ARMSubtarget *ST) { 9901 SDValue N0 = N->getOperand(0); 9902 9903 // Check for sign- and zero-extensions of vector extract operations of 8- 9904 // and 16-bit vector elements. NEON supports these directly. They are 9905 // handled during DAG combining because type legalization will promote them 9906 // to 32-bit types and it is messy to recognize the operations after that. 9907 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 9908 SDValue Vec = N0.getOperand(0); 9909 SDValue Lane = N0.getOperand(1); 9910 EVT VT = N->getValueType(0); 9911 EVT EltVT = N0.getValueType(); 9912 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9913 9914 if (VT == MVT::i32 && 9915 (EltVT == MVT::i8 || EltVT == MVT::i16) && 9916 TLI.isTypeLegal(Vec.getValueType()) && 9917 isa<ConstantSDNode>(Lane)) { 9918 9919 unsigned Opc = 0; 9920 switch (N->getOpcode()) { 9921 default: llvm_unreachable("unexpected opcode"); 9922 case ISD::SIGN_EXTEND: 9923 Opc = ARMISD::VGETLANEs; 9924 break; 9925 case ISD::ZERO_EXTEND: 9926 case ISD::ANY_EXTEND: 9927 Opc = ARMISD::VGETLANEu; 9928 break; 9929 } 9930 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane); 9931 } 9932 } 9933 9934 return SDValue(); 9935 } 9936 9937 /// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 9938 /// to match f32 max/min patterns to use NEON vmax/vmin instructions. 9939 static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 9940 const ARMSubtarget *ST) { 9941 // If the target supports NEON, try to use vmax/vmin instructions for f32 9942 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 9943 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 9944 // a NaN; only do the transformation when it matches that behavior. 9945 9946 // For now only do this when using NEON for FP operations; if using VFP, it 9947 // is not obvious that the benefit outweighs the cost of switching to the 9948 // NEON pipeline. 9949 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 9950 N->getValueType(0) != MVT::f32) 9951 return SDValue(); 9952 9953 SDValue CondLHS = N->getOperand(0); 9954 SDValue CondRHS = N->getOperand(1); 9955 SDValue LHS = N->getOperand(2); 9956 SDValue RHS = N->getOperand(3); 9957 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 9958 9959 unsigned Opcode = 0; 9960 bool IsReversed; 9961 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 9962 IsReversed = false; // x CC y ? x : y 9963 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 9964 IsReversed = true ; // x CC y ? y : x 9965 } else { 9966 return SDValue(); 9967 } 9968 9969 bool IsUnordered; 9970 switch (CC) { 9971 default: break; 9972 case ISD::SETOLT: 9973 case ISD::SETOLE: 9974 case ISD::SETLT: 9975 case ISD::SETLE: 9976 case ISD::SETULT: 9977 case ISD::SETULE: 9978 // If LHS is NaN, an ordered comparison will be false and the result will 9979 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 9980 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 9981 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 9982 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 9983 break; 9984 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 9985 // will return -0, so vmin can only be used for unsafe math or if one of 9986 // the operands is known to be nonzero. 9987 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 9988 !DAG.getTarget().Options.UnsafeFPMath && 9989 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 9990 break; 9991 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 9992 break; 9993 9994 case ISD::SETOGT: 9995 case ISD::SETOGE: 9996 case ISD::SETGT: 9997 case ISD::SETGE: 9998 case ISD::SETUGT: 9999 case ISD::SETUGE: 10000 // If LHS is NaN, an ordered comparison will be false and the result will 10001 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 10002 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 10003 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 10004 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 10005 break; 10006 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 10007 // will return +0, so vmax can only be used for unsafe math or if one of 10008 // the operands is known to be nonzero. 10009 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 10010 !DAG.getTarget().Options.UnsafeFPMath && 10011 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 10012 break; 10013 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 10014 break; 10015 } 10016 10017 if (!Opcode) 10018 return SDValue(); 10019 return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), LHS, RHS); 10020 } 10021 10022 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 10023 SDValue 10024 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 10025 SDValue Cmp = N->getOperand(4); 10026 if (Cmp.getOpcode() != ARMISD::CMPZ) 10027 // Only looking at EQ and NE cases. 10028 return SDValue(); 10029 10030 EVT VT = N->getValueType(0); 10031 SDLoc dl(N); 10032 SDValue LHS = Cmp.getOperand(0); 10033 SDValue RHS = Cmp.getOperand(1); 10034 SDValue FalseVal = N->getOperand(0); 10035 SDValue TrueVal = N->getOperand(1); 10036 SDValue ARMcc = N->getOperand(2); 10037 ARMCC::CondCodes CC = 10038 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 10039 10040 // Simplify 10041 // mov r1, r0 10042 // cmp r1, x 10043 // mov r0, y 10044 // moveq r0, x 10045 // to 10046 // cmp r0, x 10047 // movne r0, y 10048 // 10049 // mov r1, r0 10050 // cmp r1, x 10051 // mov r0, x 10052 // movne r0, y 10053 // to 10054 // cmp r0, x 10055 // movne r0, y 10056 /// FIXME: Turn this into a target neutral optimization? 10057 SDValue Res; 10058 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 10059 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 10060 N->getOperand(3), Cmp); 10061 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 10062 SDValue ARMcc; 10063 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 10064 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 10065 N->getOperand(3), NewCmp); 10066 } 10067 10068 if (Res.getNode()) { 10069 APInt KnownZero, KnownOne; 10070 DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne); 10071 // Capture demanded bits information that would be otherwise lost. 10072 if (KnownZero == 0xfffffffe) 10073 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10074 DAG.getValueType(MVT::i1)); 10075 else if (KnownZero == 0xffffff00) 10076 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10077 DAG.getValueType(MVT::i8)); 10078 else if (KnownZero == 0xffff0000) 10079 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 10080 DAG.getValueType(MVT::i16)); 10081 } 10082 10083 return Res; 10084 } 10085 10086 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 10087 DAGCombinerInfo &DCI) const { 10088 switch (N->getOpcode()) { 10089 default: break; 10090 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 10091 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 10092 case ISD::SUB: return PerformSUBCombine(N, DCI); 10093 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 10094 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 10095 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 10096 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 10097 case ARMISD::BFI: return PerformBFICombine(N, DCI); 10098 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 10099 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 10100 case ISD::STORE: return PerformSTORECombine(N, DCI); 10101 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 10102 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 10103 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 10104 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 10105 case ISD::FP_TO_SINT: 10106 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 10107 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 10108 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 10109 case ISD::SHL: 10110 case ISD::SRA: 10111 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 10112 case ISD::SIGN_EXTEND: 10113 case ISD::ZERO_EXTEND: 10114 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 10115 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 10116 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 10117 case ARMISD::VLD2DUP: 10118 case ARMISD::VLD3DUP: 10119 case ARMISD::VLD4DUP: 10120 return CombineBaseUpdate(N, DCI); 10121 case ARMISD::BUILD_VECTOR: 10122 return PerformARMBUILD_VECTORCombine(N, DCI); 10123 case ISD::INTRINSIC_VOID: 10124 case ISD::INTRINSIC_W_CHAIN: 10125 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10126 case Intrinsic::arm_neon_vld1: 10127 case Intrinsic::arm_neon_vld2: 10128 case Intrinsic::arm_neon_vld3: 10129 case Intrinsic::arm_neon_vld4: 10130 case Intrinsic::arm_neon_vld2lane: 10131 case Intrinsic::arm_neon_vld3lane: 10132 case Intrinsic::arm_neon_vld4lane: 10133 case Intrinsic::arm_neon_vst1: 10134 case Intrinsic::arm_neon_vst2: 10135 case Intrinsic::arm_neon_vst3: 10136 case Intrinsic::arm_neon_vst4: 10137 case Intrinsic::arm_neon_vst2lane: 10138 case Intrinsic::arm_neon_vst3lane: 10139 case Intrinsic::arm_neon_vst4lane: 10140 return CombineBaseUpdate(N, DCI); 10141 default: break; 10142 } 10143 break; 10144 } 10145 return SDValue(); 10146 } 10147 10148 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 10149 EVT VT) const { 10150 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 10151 } 10152 10153 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 10154 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 10155 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 10156 10157 switch (VT.getSimpleVT().SimpleTy) { 10158 default: 10159 return false; 10160 case MVT::i8: 10161 case MVT::i16: 10162 case MVT::i32: { 10163 // Unaligned access can use (for example) LRDB, LRDH, LDR 10164 if (AllowsUnaligned) { 10165 if (Fast) 10166 *Fast = Subtarget->hasV7Ops(); 10167 return true; 10168 } 10169 return false; 10170 } 10171 case MVT::f64: 10172 case MVT::v2f64: { 10173 // For any little-endian targets with neon, we can support unaligned ld/st 10174 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 10175 // A big-endian target may also explictly support unaligned accesses 10176 if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) { 10177 if (Fast) 10178 *Fast = true; 10179 return true; 10180 } 10181 return false; 10182 } 10183 } 10184 } 10185 10186 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 10187 unsigned AlignCheck) { 10188 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 10189 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 10190 } 10191 10192 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 10193 unsigned DstAlign, unsigned SrcAlign, 10194 bool IsMemset, bool ZeroMemset, 10195 bool MemcpyStrSrc, 10196 MachineFunction &MF) const { 10197 const Function *F = MF.getFunction(); 10198 10199 // See if we can use NEON instructions for this... 10200 if ((!IsMemset || ZeroMemset) && 10201 Subtarget->hasNEON() && 10202 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 10203 Attribute::NoImplicitFloat)) { 10204 bool Fast; 10205 if (Size >= 16 && 10206 (memOpAlign(SrcAlign, DstAlign, 16) || 10207 (allowsUnalignedMemoryAccesses(MVT::v2f64, &Fast) && Fast))) { 10208 return MVT::v2f64; 10209 } else if (Size >= 8 && 10210 (memOpAlign(SrcAlign, DstAlign, 8) || 10211 (allowsUnalignedMemoryAccesses(MVT::f64, &Fast) && Fast))) { 10212 return MVT::f64; 10213 } 10214 } 10215 10216 // Lowering to i32/i16 if the size permits. 10217 if (Size >= 4) 10218 return MVT::i32; 10219 else if (Size >= 2) 10220 return MVT::i16; 10221 10222 // Let the target-independent logic figure it out. 10223 return MVT::Other; 10224 } 10225 10226 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 10227 if (Val.getOpcode() != ISD::LOAD) 10228 return false; 10229 10230 EVT VT1 = Val.getValueType(); 10231 if (!VT1.isSimple() || !VT1.isInteger() || 10232 !VT2.isSimple() || !VT2.isInteger()) 10233 return false; 10234 10235 switch (VT1.getSimpleVT().SimpleTy) { 10236 default: break; 10237 case MVT::i1: 10238 case MVT::i8: 10239 case MVT::i16: 10240 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 10241 return true; 10242 } 10243 10244 return false; 10245 } 10246 10247 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { 10248 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 10249 return false; 10250 10251 if (!isTypeLegal(EVT::getEVT(Ty1))) 10252 return false; 10253 10254 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"); 10255 10256 // Assuming the caller doesn't have a zeroext or signext return parameter, 10257 // truncation all the way down to i1 is valid. 10258 return true; 10259 } 10260 10261 10262 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 10263 if (V < 0) 10264 return false; 10265 10266 unsigned Scale = 1; 10267 switch (VT.getSimpleVT().SimpleTy) { 10268 default: return false; 10269 case MVT::i1: 10270 case MVT::i8: 10271 // Scale == 1; 10272 break; 10273 case MVT::i16: 10274 // Scale == 2; 10275 Scale = 2; 10276 break; 10277 case MVT::i32: 10278 // Scale == 4; 10279 Scale = 4; 10280 break; 10281 } 10282 10283 if ((V & (Scale - 1)) != 0) 10284 return false; 10285 V /= Scale; 10286 return V == (V & ((1LL << 5) - 1)); 10287 } 10288 10289 static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 10290 const ARMSubtarget *Subtarget) { 10291 bool isNeg = false; 10292 if (V < 0) { 10293 isNeg = true; 10294 V = - V; 10295 } 10296 10297 switch (VT.getSimpleVT().SimpleTy) { 10298 default: return false; 10299 case MVT::i1: 10300 case MVT::i8: 10301 case MVT::i16: 10302 case MVT::i32: 10303 // + imm12 or - imm8 10304 if (isNeg) 10305 return V == (V & ((1LL << 8) - 1)); 10306 return V == (V & ((1LL << 12) - 1)); 10307 case MVT::f32: 10308 case MVT::f64: 10309 // Same as ARM mode. FIXME: NEON? 10310 if (!Subtarget->hasVFP2()) 10311 return false; 10312 if ((V & 3) != 0) 10313 return false; 10314 V >>= 2; 10315 return V == (V & ((1LL << 8) - 1)); 10316 } 10317 } 10318 10319 /// isLegalAddressImmediate - Return true if the integer value can be used 10320 /// as the offset of the target addressing mode for load / store of the 10321 /// given type. 10322 static bool isLegalAddressImmediate(int64_t V, EVT VT, 10323 const ARMSubtarget *Subtarget) { 10324 if (V == 0) 10325 return true; 10326 10327 if (!VT.isSimple()) 10328 return false; 10329 10330 if (Subtarget->isThumb1Only()) 10331 return isLegalT1AddressImmediate(V, VT); 10332 else if (Subtarget->isThumb2()) 10333 return isLegalT2AddressImmediate(V, VT, Subtarget); 10334 10335 // ARM mode. 10336 if (V < 0) 10337 V = - V; 10338 switch (VT.getSimpleVT().SimpleTy) { 10339 default: return false; 10340 case MVT::i1: 10341 case MVT::i8: 10342 case MVT::i32: 10343 // +- imm12 10344 return V == (V & ((1LL << 12) - 1)); 10345 case MVT::i16: 10346 // +- imm8 10347 return V == (V & ((1LL << 8) - 1)); 10348 case MVT::f32: 10349 case MVT::f64: 10350 if (!Subtarget->hasVFP2()) // FIXME: NEON? 10351 return false; 10352 if ((V & 3) != 0) 10353 return false; 10354 V >>= 2; 10355 return V == (V & ((1LL << 8) - 1)); 10356 } 10357 } 10358 10359 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 10360 EVT VT) const { 10361 int Scale = AM.Scale; 10362 if (Scale < 0) 10363 return false; 10364 10365 switch (VT.getSimpleVT().SimpleTy) { 10366 default: return false; 10367 case MVT::i1: 10368 case MVT::i8: 10369 case MVT::i16: 10370 case MVT::i32: 10371 if (Scale == 1) 10372 return true; 10373 // r + r << imm 10374 Scale = Scale & ~1; 10375 return Scale == 2 || Scale == 4 || Scale == 8; 10376 case MVT::i64: 10377 // r + r 10378 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 10379 return true; 10380 return false; 10381 case MVT::isVoid: 10382 // Note, we allow "void" uses (basically, uses that aren't loads or 10383 // stores), because arm allows folding a scale into many arithmetic 10384 // operations. This should be made more precise and revisited later. 10385 10386 // Allow r << imm, but the imm has to be a multiple of two. 10387 if (Scale & 1) return false; 10388 return isPowerOf2_32(Scale); 10389 } 10390 } 10391 10392 /// isLegalAddressingMode - Return true if the addressing mode represented 10393 /// by AM is legal for this target, for a load/store of the specified type. 10394 bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 10395 Type *Ty) const { 10396 EVT VT = getValueType(Ty, true); 10397 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 10398 return false; 10399 10400 // Can never fold addr of global into load/store. 10401 if (AM.BaseGV) 10402 return false; 10403 10404 switch (AM.Scale) { 10405 case 0: // no scale reg, must be "r+i" or "r", or "i". 10406 break; 10407 case 1: 10408 if (Subtarget->isThumb1Only()) 10409 return false; 10410 // FALL THROUGH. 10411 default: 10412 // ARM doesn't support any R+R*scale+imm addr modes. 10413 if (AM.BaseOffs) 10414 return false; 10415 10416 if (!VT.isSimple()) 10417 return false; 10418 10419 if (Subtarget->isThumb2()) 10420 return isLegalT2ScaledAddressingMode(AM, VT); 10421 10422 int Scale = AM.Scale; 10423 switch (VT.getSimpleVT().SimpleTy) { 10424 default: return false; 10425 case MVT::i1: 10426 case MVT::i8: 10427 case MVT::i32: 10428 if (Scale < 0) Scale = -Scale; 10429 if (Scale == 1) 10430 return true; 10431 // r + r << imm 10432 return isPowerOf2_32(Scale & ~1); 10433 case MVT::i16: 10434 case MVT::i64: 10435 // r + r 10436 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 10437 return true; 10438 return false; 10439 10440 case MVT::isVoid: 10441 // Note, we allow "void" uses (basically, uses that aren't loads or 10442 // stores), because arm allows folding a scale into many arithmetic 10443 // operations. This should be made more precise and revisited later. 10444 10445 // Allow r << imm, but the imm has to be a multiple of two. 10446 if (Scale & 1) return false; 10447 return isPowerOf2_32(Scale); 10448 } 10449 } 10450 return true; 10451 } 10452 10453 /// isLegalICmpImmediate - Return true if the specified immediate is legal 10454 /// icmp immediate, that is the target has icmp instructions which can compare 10455 /// a register against the immediate without having to materialize the 10456 /// immediate into a register. 10457 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 10458 // Thumb2 and ARM modes can use cmn for negative immediates. 10459 if (!Subtarget->isThumb()) 10460 return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1; 10461 if (Subtarget->isThumb2()) 10462 return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1; 10463 // Thumb1 doesn't have cmn, and only 8-bit immediates. 10464 return Imm >= 0 && Imm <= 255; 10465 } 10466 10467 /// isLegalAddImmediate - Return true if the specified immediate is a legal add 10468 /// *or sub* immediate, that is the target has add or sub instructions which can 10469 /// add a register with the immediate without having to materialize the 10470 /// immediate into a register. 10471 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 10472 // Same encoding for add/sub, just flip the sign. 10473 int64_t AbsImm = llvm::abs64(Imm); 10474 if (!Subtarget->isThumb()) 10475 return ARM_AM::getSOImmVal(AbsImm) != -1; 10476 if (Subtarget->isThumb2()) 10477 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 10478 // Thumb1 only has 8-bit unsigned immediate. 10479 return AbsImm >= 0 && AbsImm <= 255; 10480 } 10481 10482 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 10483 bool isSEXTLoad, SDValue &Base, 10484 SDValue &Offset, bool &isInc, 10485 SelectionDAG &DAG) { 10486 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 10487 return false; 10488 10489 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 10490 // AddressingMode 3 10491 Base = Ptr->getOperand(0); 10492 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 10493 int RHSC = (int)RHS->getZExtValue(); 10494 if (RHSC < 0 && RHSC > -256) { 10495 assert(Ptr->getOpcode() == ISD::ADD); 10496 isInc = false; 10497 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 10498 return true; 10499 } 10500 } 10501 isInc = (Ptr->getOpcode() == ISD::ADD); 10502 Offset = Ptr->getOperand(1); 10503 return true; 10504 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 10505 // AddressingMode 2 10506 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 10507 int RHSC = (int)RHS->getZExtValue(); 10508 if (RHSC < 0 && RHSC > -0x1000) { 10509 assert(Ptr->getOpcode() == ISD::ADD); 10510 isInc = false; 10511 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 10512 Base = Ptr->getOperand(0); 10513 return true; 10514 } 10515 } 10516 10517 if (Ptr->getOpcode() == ISD::ADD) { 10518 isInc = true; 10519 ARM_AM::ShiftOpc ShOpcVal= 10520 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 10521 if (ShOpcVal != ARM_AM::no_shift) { 10522 Base = Ptr->getOperand(1); 10523 Offset = Ptr->getOperand(0); 10524 } else { 10525 Base = Ptr->getOperand(0); 10526 Offset = Ptr->getOperand(1); 10527 } 10528 return true; 10529 } 10530 10531 isInc = (Ptr->getOpcode() == ISD::ADD); 10532 Base = Ptr->getOperand(0); 10533 Offset = Ptr->getOperand(1); 10534 return true; 10535 } 10536 10537 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 10538 return false; 10539 } 10540 10541 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 10542 bool isSEXTLoad, SDValue &Base, 10543 SDValue &Offset, bool &isInc, 10544 SelectionDAG &DAG) { 10545 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 10546 return false; 10547 10548 Base = Ptr->getOperand(0); 10549 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 10550 int RHSC = (int)RHS->getZExtValue(); 10551 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 10552 assert(Ptr->getOpcode() == ISD::ADD); 10553 isInc = false; 10554 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 10555 return true; 10556 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 10557 isInc = Ptr->getOpcode() == ISD::ADD; 10558 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 10559 return true; 10560 } 10561 } 10562 10563 return false; 10564 } 10565 10566 /// getPreIndexedAddressParts - returns true by value, base pointer and 10567 /// offset pointer and addressing mode by reference if the node's address 10568 /// can be legally represented as pre-indexed load / store address. 10569 bool 10570 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 10571 SDValue &Offset, 10572 ISD::MemIndexedMode &AM, 10573 SelectionDAG &DAG) const { 10574 if (Subtarget->isThumb1Only()) 10575 return false; 10576 10577 EVT VT; 10578 SDValue Ptr; 10579 bool isSEXTLoad = false; 10580 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10581 Ptr = LD->getBasePtr(); 10582 VT = LD->getMemoryVT(); 10583 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 10584 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10585 Ptr = ST->getBasePtr(); 10586 VT = ST->getMemoryVT(); 10587 } else 10588 return false; 10589 10590 bool isInc; 10591 bool isLegal = false; 10592 if (Subtarget->isThumb2()) 10593 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 10594 Offset, isInc, DAG); 10595 else 10596 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 10597 Offset, isInc, DAG); 10598 if (!isLegal) 10599 return false; 10600 10601 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 10602 return true; 10603 } 10604 10605 /// getPostIndexedAddressParts - returns true by value, base pointer and 10606 /// offset pointer and addressing mode by reference if this node can be 10607 /// combined with a load / store to form a post-indexed load / store. 10608 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 10609 SDValue &Base, 10610 SDValue &Offset, 10611 ISD::MemIndexedMode &AM, 10612 SelectionDAG &DAG) const { 10613 if (Subtarget->isThumb1Only()) 10614 return false; 10615 10616 EVT VT; 10617 SDValue Ptr; 10618 bool isSEXTLoad = false; 10619 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10620 VT = LD->getMemoryVT(); 10621 Ptr = LD->getBasePtr(); 10622 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 10623 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10624 VT = ST->getMemoryVT(); 10625 Ptr = ST->getBasePtr(); 10626 } else 10627 return false; 10628 10629 bool isInc; 10630 bool isLegal = false; 10631 if (Subtarget->isThumb2()) 10632 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 10633 isInc, DAG); 10634 else 10635 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 10636 isInc, DAG); 10637 if (!isLegal) 10638 return false; 10639 10640 if (Ptr != Base) { 10641 // Swap base ptr and offset to catch more post-index load / store when 10642 // it's legal. In Thumb2 mode, offset must be an immediate. 10643 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 10644 !Subtarget->isThumb2()) 10645 std::swap(Base, Offset); 10646 10647 // Post-indexed load / store update the base pointer. 10648 if (Ptr != Base) 10649 return false; 10650 } 10651 10652 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 10653 return true; 10654 } 10655 10656 void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 10657 APInt &KnownZero, 10658 APInt &KnownOne, 10659 const SelectionDAG &DAG, 10660 unsigned Depth) const { 10661 unsigned BitWidth = KnownOne.getBitWidth(); 10662 KnownZero = KnownOne = APInt(BitWidth, 0); 10663 switch (Op.getOpcode()) { 10664 default: break; 10665 case ARMISD::ADDC: 10666 case ARMISD::ADDE: 10667 case ARMISD::SUBC: 10668 case ARMISD::SUBE: 10669 // These nodes' second result is a boolean 10670 if (Op.getResNo() == 0) 10671 break; 10672 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 10673 break; 10674 case ARMISD::CMOV: { 10675 // Bits are known zero/one if known on the LHS and RHS. 10676 DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 10677 if (KnownZero == 0 && KnownOne == 0) return; 10678 10679 APInt KnownZeroRHS, KnownOneRHS; 10680 DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 10681 KnownZero &= KnownZeroRHS; 10682 KnownOne &= KnownOneRHS; 10683 return; 10684 } 10685 } 10686 } 10687 10688 //===----------------------------------------------------------------------===// 10689 // ARM Inline Assembly Support 10690 //===----------------------------------------------------------------------===// 10691 10692 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 10693 // Looking for "rev" which is V6+. 10694 if (!Subtarget->hasV6Ops()) 10695 return false; 10696 10697 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 10698 std::string AsmStr = IA->getAsmString(); 10699 SmallVector<StringRef, 4> AsmPieces; 10700 SplitString(AsmStr, AsmPieces, ";\n"); 10701 10702 switch (AsmPieces.size()) { 10703 default: return false; 10704 case 1: 10705 AsmStr = AsmPieces[0]; 10706 AsmPieces.clear(); 10707 SplitString(AsmStr, AsmPieces, " \t,"); 10708 10709 // rev $0, $1 10710 if (AsmPieces.size() == 3 && 10711 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 10712 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 10713 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 10714 if (Ty && Ty->getBitWidth() == 32) 10715 return IntrinsicLowering::LowerToByteSwap(CI); 10716 } 10717 break; 10718 } 10719 10720 return false; 10721 } 10722 10723 /// getConstraintType - Given a constraint letter, return the type of 10724 /// constraint it is for this target. 10725 ARMTargetLowering::ConstraintType 10726 ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 10727 if (Constraint.size() == 1) { 10728 switch (Constraint[0]) { 10729 default: break; 10730 case 'l': return C_RegisterClass; 10731 case 'w': return C_RegisterClass; 10732 case 'h': return C_RegisterClass; 10733 case 'x': return C_RegisterClass; 10734 case 't': return C_RegisterClass; 10735 case 'j': return C_Other; // Constant for movw. 10736 // An address with a single base register. Due to the way we 10737 // currently handle addresses it is the same as an 'r' memory constraint. 10738 case 'Q': return C_Memory; 10739 } 10740 } else if (Constraint.size() == 2) { 10741 switch (Constraint[0]) { 10742 default: break; 10743 // All 'U+' constraints are addresses. 10744 case 'U': return C_Memory; 10745 } 10746 } 10747 return TargetLowering::getConstraintType(Constraint); 10748 } 10749 10750 /// Examine constraint type and operand type and determine a weight value. 10751 /// This object must already have been set up with the operand type 10752 /// and the current alternative constraint selected. 10753 TargetLowering::ConstraintWeight 10754 ARMTargetLowering::getSingleConstraintMatchWeight( 10755 AsmOperandInfo &info, const char *constraint) const { 10756 ConstraintWeight weight = CW_Invalid; 10757 Value *CallOperandVal = info.CallOperandVal; 10758 // If we don't have a value, we can't do a match, 10759 // but allow it at the lowest weight. 10760 if (CallOperandVal == NULL) 10761 return CW_Default; 10762 Type *type = CallOperandVal->getType(); 10763 // Look at the constraint type. 10764 switch (*constraint) { 10765 default: 10766 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10767 break; 10768 case 'l': 10769 if (type->isIntegerTy()) { 10770 if (Subtarget->isThumb()) 10771 weight = CW_SpecificReg; 10772 else 10773 weight = CW_Register; 10774 } 10775 break; 10776 case 'w': 10777 if (type->isFloatingPointTy()) 10778 weight = CW_Register; 10779 break; 10780 } 10781 return weight; 10782 } 10783 10784 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 10785 RCPair 10786 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 10787 MVT VT) const { 10788 if (Constraint.size() == 1) { 10789 // GCC ARM Constraint Letters 10790 switch (Constraint[0]) { 10791 case 'l': // Low regs or general regs. 10792 if (Subtarget->isThumb()) 10793 return RCPair(0U, &ARM::tGPRRegClass); 10794 return RCPair(0U, &ARM::GPRRegClass); 10795 case 'h': // High regs or no regs. 10796 if (Subtarget->isThumb()) 10797 return RCPair(0U, &ARM::hGPRRegClass); 10798 break; 10799 case 'r': 10800 return RCPair(0U, &ARM::GPRRegClass); 10801 case 'w': 10802 if (VT == MVT::f32) 10803 return RCPair(0U, &ARM::SPRRegClass); 10804 if (VT.getSizeInBits() == 64) 10805 return RCPair(0U, &ARM::DPRRegClass); 10806 if (VT.getSizeInBits() == 128) 10807 return RCPair(0U, &ARM::QPRRegClass); 10808 break; 10809 case 'x': 10810 if (VT == MVT::f32) 10811 return RCPair(0U, &ARM::SPR_8RegClass); 10812 if (VT.getSizeInBits() == 64) 10813 return RCPair(0U, &ARM::DPR_8RegClass); 10814 if (VT.getSizeInBits() == 128) 10815 return RCPair(0U, &ARM::QPR_8RegClass); 10816 break; 10817 case 't': 10818 if (VT == MVT::f32) 10819 return RCPair(0U, &ARM::SPRRegClass); 10820 break; 10821 } 10822 } 10823 if (StringRef("{cc}").equals_lower(Constraint)) 10824 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 10825 10826 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 10827 } 10828 10829 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10830 /// vector. If it is invalid, don't add anything to Ops. 10831 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10832 std::string &Constraint, 10833 std::vector<SDValue>&Ops, 10834 SelectionDAG &DAG) const { 10835 SDValue Result(0, 0); 10836 10837 // Currently only support length 1 constraints. 10838 if (Constraint.length() != 1) return; 10839 10840 char ConstraintLetter = Constraint[0]; 10841 switch (ConstraintLetter) { 10842 default: break; 10843 case 'j': 10844 case 'I': case 'J': case 'K': case 'L': 10845 case 'M': case 'N': case 'O': 10846 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 10847 if (!C) 10848 return; 10849 10850 int64_t CVal64 = C->getSExtValue(); 10851 int CVal = (int) CVal64; 10852 // None of these constraints allow values larger than 32 bits. Check 10853 // that the value fits in an int. 10854 if (CVal != CVal64) 10855 return; 10856 10857 switch (ConstraintLetter) { 10858 case 'j': 10859 // Constant suitable for movw, must be between 0 and 10860 // 65535. 10861 if (Subtarget->hasV6T2Ops()) 10862 if (CVal >= 0 && CVal <= 65535) 10863 break; 10864 return; 10865 case 'I': 10866 if (Subtarget->isThumb1Only()) { 10867 // This must be a constant between 0 and 255, for ADD 10868 // immediates. 10869 if (CVal >= 0 && CVal <= 255) 10870 break; 10871 } else if (Subtarget->isThumb2()) { 10872 // A constant that can be used as an immediate value in a 10873 // data-processing instruction. 10874 if (ARM_AM::getT2SOImmVal(CVal) != -1) 10875 break; 10876 } else { 10877 // A constant that can be used as an immediate value in a 10878 // data-processing instruction. 10879 if (ARM_AM::getSOImmVal(CVal) != -1) 10880 break; 10881 } 10882 return; 10883 10884 case 'J': 10885 if (Subtarget->isThumb()) { // FIXME thumb2 10886 // This must be a constant between -255 and -1, for negated ADD 10887 // immediates. This can be used in GCC with an "n" modifier that 10888 // prints the negated value, for use with SUB instructions. It is 10889 // not useful otherwise but is implemented for compatibility. 10890 if (CVal >= -255 && CVal <= -1) 10891 break; 10892 } else { 10893 // This must be a constant between -4095 and 4095. It is not clear 10894 // what this constraint is intended for. Implemented for 10895 // compatibility with GCC. 10896 if (CVal >= -4095 && CVal <= 4095) 10897 break; 10898 } 10899 return; 10900 10901 case 'K': 10902 if (Subtarget->isThumb1Only()) { 10903 // A 32-bit value where only one byte has a nonzero value. Exclude 10904 // zero to match GCC. This constraint is used by GCC internally for 10905 // constants that can be loaded with a move/shift combination. 10906 // It is not useful otherwise but is implemented for compatibility. 10907 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 10908 break; 10909 } else if (Subtarget->isThumb2()) { 10910 // A constant whose bitwise inverse can be used as an immediate 10911 // value in a data-processing instruction. This can be used in GCC 10912 // with a "B" modifier that prints the inverted value, for use with 10913 // BIC and MVN instructions. It is not useful otherwise but is 10914 // implemented for compatibility. 10915 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 10916 break; 10917 } else { 10918 // A constant whose bitwise inverse can be used as an immediate 10919 // value in a data-processing instruction. This can be used in GCC 10920 // with a "B" modifier that prints the inverted value, for use with 10921 // BIC and MVN instructions. It is not useful otherwise but is 10922 // implemented for compatibility. 10923 if (ARM_AM::getSOImmVal(~CVal) != -1) 10924 break; 10925 } 10926 return; 10927 10928 case 'L': 10929 if (Subtarget->isThumb1Only()) { 10930 // This must be a constant between -7 and 7, 10931 // for 3-operand ADD/SUB immediate instructions. 10932 if (CVal >= -7 && CVal < 7) 10933 break; 10934 } else if (Subtarget->isThumb2()) { 10935 // A constant whose negation can be used as an immediate value in a 10936 // data-processing instruction. This can be used in GCC with an "n" 10937 // modifier that prints the negated value, for use with SUB 10938 // instructions. It is not useful otherwise but is implemented for 10939 // compatibility. 10940 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 10941 break; 10942 } else { 10943 // A constant whose negation can be used as an immediate value in a 10944 // data-processing instruction. This can be used in GCC with an "n" 10945 // modifier that prints the negated value, for use with SUB 10946 // instructions. It is not useful otherwise but is implemented for 10947 // compatibility. 10948 if (ARM_AM::getSOImmVal(-CVal) != -1) 10949 break; 10950 } 10951 return; 10952 10953 case 'M': 10954 if (Subtarget->isThumb()) { // FIXME thumb2 10955 // This must be a multiple of 4 between 0 and 1020, for 10956 // ADD sp + immediate. 10957 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 10958 break; 10959 } else { 10960 // A power of two or a constant between 0 and 32. This is used in 10961 // GCC for the shift amount on shifted register operands, but it is 10962 // useful in general for any shift amounts. 10963 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 10964 break; 10965 } 10966 return; 10967 10968 case 'N': 10969 if (Subtarget->isThumb()) { // FIXME thumb2 10970 // This must be a constant between 0 and 31, for shift amounts. 10971 if (CVal >= 0 && CVal <= 31) 10972 break; 10973 } 10974 return; 10975 10976 case 'O': 10977 if (Subtarget->isThumb()) { // FIXME thumb2 10978 // This must be a multiple of 4 between -508 and 508, for 10979 // ADD/SUB sp = sp + immediate. 10980 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 10981 break; 10982 } 10983 return; 10984 } 10985 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 10986 break; 10987 } 10988 10989 if (Result.getNode()) { 10990 Ops.push_back(Result); 10991 return; 10992 } 10993 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10994 } 10995 10996 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 10997 assert(Subtarget->isTargetAEABI() && "Register-based DivRem lowering only"); 10998 unsigned Opcode = Op->getOpcode(); 10999 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 11000 "Invalid opcode for Div/Rem lowering"); 11001 bool isSigned = (Opcode == ISD::SDIVREM); 11002 EVT VT = Op->getValueType(0); 11003 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 11004 11005 RTLIB::Libcall LC; 11006 switch (VT.getSimpleVT().SimpleTy) { 11007 default: llvm_unreachable("Unexpected request for libcall!"); 11008 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 11009 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 11010 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 11011 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 11012 } 11013 11014 SDValue InChain = DAG.getEntryNode(); 11015 11016 TargetLowering::ArgListTy Args; 11017 TargetLowering::ArgListEntry Entry; 11018 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { 11019 EVT ArgVT = Op->getOperand(i).getValueType(); 11020 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 11021 Entry.Node = Op->getOperand(i); 11022 Entry.Ty = ArgTy; 11023 Entry.isSExt = isSigned; 11024 Entry.isZExt = !isSigned; 11025 Args.push_back(Entry); 11026 } 11027 11028 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 11029 getPointerTy()); 11030 11031 Type *RetTy = (Type*)StructType::get(Ty, Ty, NULL); 11032 11033 SDLoc dl(Op); 11034 TargetLowering:: 11035 CallLoweringInfo CLI(InChain, RetTy, isSigned, !isSigned, false, true, 11036 0, getLibcallCallingConv(LC), /*isTailCall=*/false, 11037 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 11038 Callee, Args, DAG, dl); 11039 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 11040 11041 return CallInfo.first; 11042 } 11043 11044 bool 11045 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 11046 // The ARM target isn't yet aware of offsets. 11047 return false; 11048 } 11049 11050 bool ARM::isBitFieldInvertedMask(unsigned v) { 11051 if (v == 0xffffffff) 11052 return false; 11053 11054 // there can be 1's on either or both "outsides", all the "inside" 11055 // bits must be 0's 11056 unsigned TO = CountTrailingOnes_32(v); 11057 unsigned LO = CountLeadingOnes_32(v); 11058 v = (v >> TO) << TO; 11059 v = (v << LO) >> LO; 11060 return v == 0; 11061 } 11062 11063 /// isFPImmLegal - Returns true if the target can instruction select the 11064 /// specified FP immediate natively. If false, the legalizer will 11065 /// materialize the FP immediate as a load from a constant pool. 11066 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 11067 if (!Subtarget->hasVFP3()) 11068 return false; 11069 if (VT == MVT::f32) 11070 return ARM_AM::getFP32Imm(Imm) != -1; 11071 if (VT == MVT::f64) 11072 return ARM_AM::getFP64Imm(Imm) != -1; 11073 return false; 11074 } 11075 11076 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 11077 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 11078 /// specified in the intrinsic calls. 11079 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 11080 const CallInst &I, 11081 unsigned Intrinsic) const { 11082 switch (Intrinsic) { 11083 case Intrinsic::arm_neon_vld1: 11084 case Intrinsic::arm_neon_vld2: 11085 case Intrinsic::arm_neon_vld3: 11086 case Intrinsic::arm_neon_vld4: 11087 case Intrinsic::arm_neon_vld2lane: 11088 case Intrinsic::arm_neon_vld3lane: 11089 case Intrinsic::arm_neon_vld4lane: { 11090 Info.opc = ISD::INTRINSIC_W_CHAIN; 11091 // Conservatively set memVT to the entire set of vectors loaded. 11092 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 11093 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 11094 Info.ptrVal = I.getArgOperand(0); 11095 Info.offset = 0; 11096 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 11097 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 11098 Info.vol = false; // volatile loads with NEON intrinsics not supported 11099 Info.readMem = true; 11100 Info.writeMem = false; 11101 return true; 11102 } 11103 case Intrinsic::arm_neon_vst1: 11104 case Intrinsic::arm_neon_vst2: 11105 case Intrinsic::arm_neon_vst3: 11106 case Intrinsic::arm_neon_vst4: 11107 case Intrinsic::arm_neon_vst2lane: 11108 case Intrinsic::arm_neon_vst3lane: 11109 case Intrinsic::arm_neon_vst4lane: { 11110 Info.opc = ISD::INTRINSIC_VOID; 11111 // Conservatively set memVT to the entire set of vectors stored. 11112 unsigned NumElts = 0; 11113 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 11114 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 11115 if (!ArgTy->isVectorTy()) 11116 break; 11117 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 11118 } 11119 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 11120 Info.ptrVal = I.getArgOperand(0); 11121 Info.offset = 0; 11122 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 11123 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 11124 Info.vol = false; // volatile stores with NEON intrinsics not supported 11125 Info.readMem = false; 11126 Info.writeMem = true; 11127 return true; 11128 } 11129 case Intrinsic::arm_ldrex: { 11130 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 11131 Info.opc = ISD::INTRINSIC_W_CHAIN; 11132 Info.memVT = MVT::getVT(PtrTy->getElementType()); 11133 Info.ptrVal = I.getArgOperand(0); 11134 Info.offset = 0; 11135 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); 11136 Info.vol = true; 11137 Info.readMem = true; 11138 Info.writeMem = false; 11139 return true; 11140 } 11141 case Intrinsic::arm_strex: { 11142 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 11143 Info.opc = ISD::INTRINSIC_W_CHAIN; 11144 Info.memVT = MVT::getVT(PtrTy->getElementType()); 11145 Info.ptrVal = I.getArgOperand(1); 11146 Info.offset = 0; 11147 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType()); 11148 Info.vol = true; 11149 Info.readMem = false; 11150 Info.writeMem = true; 11151 return true; 11152 } 11153 case Intrinsic::arm_strexd: { 11154 Info.opc = ISD::INTRINSIC_W_CHAIN; 11155 Info.memVT = MVT::i64; 11156 Info.ptrVal = I.getArgOperand(2); 11157 Info.offset = 0; 11158 Info.align = 8; 11159 Info.vol = true; 11160 Info.readMem = false; 11161 Info.writeMem = true; 11162 return true; 11163 } 11164 case Intrinsic::arm_ldrexd: { 11165 Info.opc = ISD::INTRINSIC_W_CHAIN; 11166 Info.memVT = MVT::i64; 11167 Info.ptrVal = I.getArgOperand(0); 11168 Info.offset = 0; 11169 Info.align = 8; 11170 Info.vol = true; 11171 Info.readMem = true; 11172 Info.writeMem = false; 11173 return true; 11174 } 11175 default: 11176 break; 11177 } 11178 11179 return false; 11180 } 11181